Fri Jul 24 11:30:29 2009 UTC ()
- rework the page pinning API, so that now a function is provided for
each level of indirection encountered during virtual memory translations. Update
pmap accordingly. Pinning looks cleaner that way, and it offers the possibility
to pin lower level pages if necessary (NetBSD does not do it currently).

- some fixes and comments to explain how page validation/invalidation take
place during save/restore/migrate under Xen. L2 shadow entries from PAE are now
handled, so basically, suspend/resume works with PAE.

- fixes an issue reported by Christoph (cegger@) for xencons suspend/resume
in dom0.

TODO:

- PAE save/restore is currently limited to single-user only, multi-user
support requires modifications in PAE pmap that should be discussed first. See
the comments about the L2 shadow pages cached in pmap_pdp_cache in this commit.

- grant table bug is still there; do not use the kernels of this branch
to test suspend/resume, unless you want to experience bad crashes in dom0,
and push the big red button.

Now there is light at the end of the tunnel :)

Note: XEN2 kernels will neither build nor work with this branch.


(jym)
diff -r1.21.2.3 -r1.21.2.4 src/sys/arch/x86/include/pmap.h
diff -r1.77.2.4 -r1.77.2.5 src/sys/arch/x86/x86/pmap.c
diff -r1.21.8.2 -r1.21.8.3 src/sys/arch/xen/include/xenpmap.h
diff -r1.12.4.4 -r1.12.4.5 src/sys/arch/xen/x86/x86_xpmap.c
diff -r1.4.12.3 -r1.4.12.4 src/sys/arch/xen/xen/xen_machdep.c
diff -r1.31.2.3 -r1.31.2.4 src/sys/arch/xen/xen/xencons.c

cvs diff -r1.21.2.3 -r1.21.2.4 src/sys/arch/x86/include/pmap.h (expand / switch to unified diff)

--- src/sys/arch/x86/include/pmap.h 2009/07/23 23:31:36 1.21.2.3
+++ src/sys/arch/x86/include/pmap.h 2009/07/24 11:30:28 1.21.2.4
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.h,v 1.21.2.3 2009/07/23 23:31:36 jym Exp $ */ 1/* $NetBSD: pmap.h,v 1.21.2.4 2009/07/24 11:30:28 jym Exp $ */
2 2
3/* 3/*
4 * 4 *
5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions 9 * modification, are permitted provided that the following conditions
10 * are met: 10 * are met:
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
@@ -344,26 +344,54 @@ kvtopte(vaddr_t va) @@ -344,26 +344,54 @@ kvtopte(vaddr_t va)
344 344
345 return (PTE_BASE + pl1_i(va)); 345 return (PTE_BASE + pl1_i(va));
346} 346}
347 347
348paddr_t vtophys(vaddr_t); 348paddr_t vtophys(vaddr_t);
349vaddr_t pmap_map(vaddr_t, paddr_t, paddr_t, vm_prot_t); 349vaddr_t pmap_map(vaddr_t, paddr_t, paddr_t, vm_prot_t);
350void pmap_cpu_init_early(struct cpu_info *); 350void pmap_cpu_init_early(struct cpu_info *);
351void pmap_cpu_init_late(struct cpu_info *); 351void pmap_cpu_init_late(struct cpu_info *);
352bool sse2_idlezero_page(void *); 352bool sse2_idlezero_page(void *);
353 353
354 354
355#ifdef XEN 355#ifdef XEN
356 356
 357#ifdef PAE
 358/*
 359 * Under PAE, Xen handles our recursive mappings to the L2 shadow pages
 360 * erroneously during a restore (the last entry of the PDIR_SLOT_PTE).
 361 * This pages are found in two places:
 362 * - the used ones are found in the pmaps list
 363 * - the unused ones (but still valid from a Xen's point of view) are cached
 364 * inside the pmap_pdp_cache pool.
 365 * This list is not protected by locks, as it is expected to be accessed only
 366 * during pmap_create()/pmap_destroy(), and save/restore code, which
 367 * cannot run concurrently.
 368struct l2_pdirpte {
 369 SLIST_ENTRY(l2_pinned) l2_pdirpte_list;
 370 paddr_t slot_pte;
 371 paddr_t slot_pte_content;
 372};
 373 */
 374
 375/*
 376 * Head of the list of all pages pinned as L2 and containing valid entry
 377 * in PDIR_SLOT_PTE
 378SLIST_HEAD(l2_pdirpte_head, l2_pdirpte);
 379 */
 380
 381void pmap_map_shadow_entries(void);
 382void pmap_unmap_shadow_entries(void);
 383#endif /* PAE */
 384
357#define XPTE_MASK L1_FRAME 385#define XPTE_MASK L1_FRAME
358/* XPTE_SHIFT = L1_SHIFT - log2(sizeof(pt_entry_t)) */ 386/* XPTE_SHIFT = L1_SHIFT - log2(sizeof(pt_entry_t)) */
359#if defined(__x86_64__) || defined(PAE) 387#if defined(__x86_64__) || defined(PAE)
360#define XPTE_SHIFT 9 388#define XPTE_SHIFT 9
361#else 389#else
362#define XPTE_SHIFT 10 390#define XPTE_SHIFT 10
363#endif 391#endif
364 392
365/* PTE access inline fuctions */ 393/* PTE access inline fuctions */
366 394
367/* 395/*
368 * Get the machine address of the pointed pte 396 * Get the machine address of the pointed pte
369 * We use hardware MMU to get value so works only for levels 1-3 397 * We use hardware MMU to get value so works only for levels 1-3

cvs diff -r1.77.2.4 -r1.77.2.5 src/sys/arch/x86/x86/pmap.c (expand / switch to unified diff)

--- src/sys/arch/x86/x86/pmap.c 2009/07/23 23:31:37 1.77.2.4
+++ src/sys/arch/x86/x86/pmap.c 2009/07/24 11:30:28 1.77.2.5
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.77.2.4 2009/07/23 23:31:37 jym Exp $ */ 1/* $NetBSD: pmap.c,v 1.77.2.5 2009/07/24 11:30:28 jym Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2007 Manuel Bouyer. 4 * Copyright (c) 2007 Manuel Bouyer.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions 7 * modification, are permitted provided that the following conditions
8 * are met: 8 * are met:
9 * 1. Redistributions of source code must retain the above copyright 9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer. 10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright 11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the 12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution. 13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software 14 * 3. All advertising materials mentioning features or use of this software
@@ -144,27 +144,27 @@ @@ -144,27 +144,27 @@
144 * Hibler/Jolitz pmap, as modified for FreeBSD by John S. Dyson 144 * Hibler/Jolitz pmap, as modified for FreeBSD by John S. Dyson
145 * and David Greenman. 145 * and David Greenman.
146 * 146 *
147 * [3] the Mach pmap. this pmap, from CMU, seems to have migrated 147 * [3] the Mach pmap. this pmap, from CMU, seems to have migrated
148 * between several processors. the VAX version was done by 148 * between several processors. the VAX version was done by
149 * Avadis Tevanian, Jr., and Michael Wayne Young. the i386 149 * Avadis Tevanian, Jr., and Michael Wayne Young. the i386
150 * version was done by Lance Berc, Mike Kupfer, Bob Baron, 150 * version was done by Lance Berc, Mike Kupfer, Bob Baron,
151 * David Golub, and Richard Draves. the alpha version was 151 * David Golub, and Richard Draves. the alpha version was
152 * done by Alessandro Forin (CMU/Mach) and Chris Demetriou 152 * done by Alessandro Forin (CMU/Mach) and Chris Demetriou
153 * (NetBSD/alpha). 153 * (NetBSD/alpha).
154 */ 154 */
155 155
156#include <sys/cdefs.h> 156#include <sys/cdefs.h>
157__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.77.2.4 2009/07/23 23:31:37 jym Exp $"); 157__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.77.2.5 2009/07/24 11:30:28 jym Exp $");
158 158
159#include "opt_user_ldt.h" 159#include "opt_user_ldt.h"
160#include "opt_lockdebug.h" 160#include "opt_lockdebug.h"
161#include "opt_multiprocessor.h" 161#include "opt_multiprocessor.h"
162#include "opt_xen.h" 162#include "opt_xen.h"
163#if !defined(__x86_64__) 163#if !defined(__x86_64__)
164#include "opt_kstack_dr0.h" 164#include "opt_kstack_dr0.h"
165#endif /* !defined(__x86_64__) */ 165#endif /* !defined(__x86_64__) */
166 166
167#include <sys/param.h> 167#include <sys/param.h>
168#include <sys/systm.h> 168#include <sys/systm.h>
169#include <sys/proc.h> 169#include <sys/proc.h>
170#include <sys/pool.h> 170#include <sys/pool.h>
@@ -547,26 +547,32 @@ static char *csrcp, *cdstp, *zerop, *ptp @@ -547,26 +547,32 @@ static char *csrcp, *cdstp, *zerop, *ptp
547 547
548static struct pool_cache pmap_pdp_cache; 548static struct pool_cache pmap_pdp_cache;
549int pmap_pdp_ctor(void *, void *, int); 549int pmap_pdp_ctor(void *, void *, int);
550void pmap_pdp_dtor(void *, void *); 550void pmap_pdp_dtor(void *, void *);
551#ifdef PAE 551#ifdef PAE
552/* need to allocate items of 4 pages */ 552/* need to allocate items of 4 pages */
553void *pmap_pdp_alloc(struct pool *, int); 553void *pmap_pdp_alloc(struct pool *, int);
554void pmap_pdp_free(struct pool *, void *); 554void pmap_pdp_free(struct pool *, void *);
555static struct pool_allocator pmap_pdp_allocator = { 555static struct pool_allocator pmap_pdp_allocator = {
556 .pa_alloc = pmap_pdp_alloc, 556 .pa_alloc = pmap_pdp_alloc,
557 .pa_free = pmap_pdp_free, 557 .pa_free = pmap_pdp_free,
558 .pa_pagesz = PAGE_SIZE * PDP_SIZE, 558 .pa_pagesz = PAGE_SIZE * PDP_SIZE,
559}; 559};
 560
 561#ifdef XEN
 562/* pool for allocation of the structures tracking L2 pinned shadow pages */
 563//static struct pool_cache l2_pdirpte_cache;
 564#endif /* XEN */
 565
560#endif /* PAE */ 566#endif /* PAE */
561 567
562void *vmmap; /* XXX: used by mem.c... it should really uvm_map_reserve it */ 568void *vmmap; /* XXX: used by mem.c... it should really uvm_map_reserve it */
563 569
564extern vaddr_t idt_vaddr; /* we allocate IDT early */ 570extern vaddr_t idt_vaddr; /* we allocate IDT early */
565extern paddr_t idt_paddr; 571extern paddr_t idt_paddr;
566 572
567#ifdef _LP64 573#ifdef _LP64
568extern vaddr_t lo32_vaddr; 574extern vaddr_t lo32_vaddr;
569extern vaddr_t lo32_paddr; 575extern vaddr_t lo32_paddr;
570#endif 576#endif
571 577
572extern int end; 578extern int end;
@@ -742,61 +748,138 @@ pmap_is_active(struct pmap *pmap, struct @@ -742,61 +748,138 @@ pmap_is_active(struct pmap *pmap, struct
742} 748}
743 749
744/* 750/*
745 * Flush the content of APDP_PDE 751 * Flush the content of APDP_PDE
746 */ 752 */
747static inline 753static inline
748void pmap_unmap_apdp_pde(void) { 754void pmap_unmap_apdp_pde(void) {
749 755
750 int i; 756 int i;
751 757
752 for (i = 0; i < PDP_SIZE; i++) { 758 for (i = 0; i < PDP_SIZE; i++) {
753 pmap_pte_set(&APDP_PDE[i], 0); 759 pmap_pte_set(&APDP_PDE[i], 0);
754#ifdef PAE 760#ifdef PAE
755 /* clear shadow entry too */ 761 /* clear current pmap shadow entries too */
756 pmap_pte_set(&APDP_PDE_SHADOW[i], 0); 762 pmap_pte_set(&APDP_PDE_SHADOW[i], 0);
757#endif 763#endif
758 } 764 }
759 765
760} 766}
761 767
 768#ifdef XEN3
762/* 769/*
763 * Flush all APDP entries found in pmaps 770 * Flush all APDP entries found in pmaps
764 * Required during Xen save/restore operations, as it does not 771 * Required during Xen save/restore operations, as it does not
765 * handle alternative recursive mappings properly 772 * handle alternative recursive mappings properly
766 */ 773 */
767void 774void
768pmap_unmap_all_apdp_pdes(void) { 775pmap_unmap_all_apdp_pdes(void) {
769 776
770 // XXX JYM PAE 777 int i;
771 
772 int s; 778 int s;
773 struct pmap *pm; 779 struct pmap *pm;
774 780
775 s = splvm(); 781 s = splvm();
776 782
 783#ifdef PAE
 784 /*
 785 * For PAE, there are two places where alternative recursive mappings
 786 * could be found: in the L2 shadow pages, and the "real" L2 kernel
 787 * page (pmap_kl2pd), which is unique and static.
 788 * We first clear the APDP for the current pmap. As L2 kernel page is
 789 * unique, we only need to do it once for all pmaps.
 790 */
 791 pmap_unmap_apdp_pde();
 792#endif
 793
 794 mutex_enter(&pmaps_lock);
 795 /*
 796 * Set APDP entries to 0 in all pmaps.
 797 * Note that for PAE kernels, this only clears the APDP entries
 798 * found in the L2 shadow pages, as pmap_pdirpa() is used to obtain
 799 * the PA of the pmap->pm_pdir[] pages (forming the 4 contiguous
 800 * pages of PAE PD: 3 for user space, 1 for the L2 kernel shadow page)
 801 */
 802 LIST_FOREACH(pm, &pmaps, pm_list) {
 803 for (i = 0; i < PDP_SIZE; i++) {
 804 xpq_queue_pte_update(
 805 xpmap_ptom(pmap_pdirpa(pm, PDIR_SLOT_APTE + i)), 0);
 806 }
 807 }
 808 mutex_exit(&pmaps_lock);
 809
 810 xpq_flush_queue();
 811
 812 splx(s);
 813
 814}
 815
 816#ifdef PAE
 817/*
 818 * NetBSD uses L2 shadow pages to support PAE with Xen. However, Xen does not
 819 * handle them correctly during save/restore, leading to incorrect page
 820 * tracking and pinning during restore.
 821 * For save/restore to succeed, two functions are introduced:
 822 * - pmap_map_shadow_entries(), used by resume code to set the last entry
 823 * of PDIR_SLOT_PTE so that it points to the correct L2 shadow page
 824 * entries to their correct values, so that pmap works again.
 825 * - pmap_unmap_shadow_entries(), used by suspend code to clear all
 826 * PDIR_SLOT_PTE entries pointing to L2 shadow entries
 827 */
 828void
 829pmap_map_shadow_entries(void) {
 830
 831 struct pmap *pm;
 832
777 mutex_enter(&pmaps_lock); 833 mutex_enter(&pmaps_lock);
 834
778 LIST_FOREACH(pm, &pmaps, pm_list) { 835 LIST_FOREACH(pm, &pmaps, pm_list) {
779 xpq_queue_pte_update( 836 xpq_queue_pte_update(
780 xpmap_ptom(pmap_pdirpa(pm, PDIR_SLOT_APTE)), 837 xpmap_ptom(pmap_pdirpa(pm, PDIR_SLOT_PTE + 3)),
781 0); 838 xpmap_ptom(pmap_pdirpa(pm, PDIR_SLOT_KERN)) | PG_V);
782 } 839 }
 840
 841 mutex_exit(&pmaps_lock);
 842
 843 xpq_queue_pte_update(
 844 xpmap_ptom(pmap_pdirpa(pmap_kernel(), PDIR_SLOT_PTE + 3)),
 845 xpmap_ptom(pmap_pdirpa(pmap_kernel(), PDIR_SLOT_KERN)) | PG_V);
 846
783 xpq_flush_queue(); 847 xpq_flush_queue();
 848}
 849
 850void
 851pmap_unmap_shadow_entries(void) {
 852
 853 struct pmap *pm;
 854
 855 mutex_enter(&pmaps_lock);
 856
 857 LIST_FOREACH(pm, &pmaps, pm_list) {
 858 xpq_queue_pte_update(
 859 xpmap_ptom(pmap_pdirpa(pm, PDIR_SLOT_PTE + 3)), 0);
 860 }
784 861
785 mutex_exit(&pmaps_lock); 862 mutex_exit(&pmaps_lock);
786 863
787 splx(s); 864 /* do it for pmap_kernel() too! */
 865 xpq_queue_pte_update(
 866 xpmap_ptom(pmap_pdirpa(pmap_kernel(), PDIR_SLOT_PTE + 3)), 0);
 867
 868 xpq_flush_queue();
788 869
789} 870}
 871#endif /* PAE */
 872#endif /* XEN3 */
790 873
791 874
792static void 875static void
793pmap_apte_flush(struct pmap *pmap) 876pmap_apte_flush(struct pmap *pmap)
794{ 877{
795 878
796 KASSERT(kpreempt_disabled()); 879 KASSERT(kpreempt_disabled());
797 880
798 /* 881 /*
799 * Flush the APTE mapping from all other CPUs that 882 * Flush the APTE mapping from all other CPUs that
800 * are using the pmap we are using (who's APTE space 883 * are using the pmap we are using (who's APTE space
801 * is the one we've just modified). 884 * is the one we've just modified).
802 * 885 *
@@ -1559,27 +1642,27 @@ pmap_bootstrap(vaddr_t kva_start) @@ -1559,27 +1642,27 @@ pmap_bootstrap(vaddr_t kva_start)
1559 * when deactivate a pmap, Xen will still consider it active. 1642 * when deactivate a pmap, Xen will still consider it active.
1560 * So we set user PGD to this one to lift all protection on 1643 * So we set user PGD to this one to lift all protection on
1561 * the now inactive page tables set. 1644 * the now inactive page tables set.
1562 */ 1645 */
1563 xen_dummy_user_pgd = avail_start; 1646 xen_dummy_user_pgd = avail_start;
1564 avail_start += PAGE_SIZE; 1647 avail_start += PAGE_SIZE;
1565  1648
1566 /* Zero fill it, the less checks in Xen it requires the better */ 1649 /* Zero fill it, the less checks in Xen it requires the better */
1567 memset((void *) (xen_dummy_user_pgd + KERNBASE), 0, PAGE_SIZE); 1650 memset((void *) (xen_dummy_user_pgd + KERNBASE), 0, PAGE_SIZE);
1568 /* Mark read-only */ 1651 /* Mark read-only */
1569 HYPERVISOR_update_va_mapping(xen_dummy_user_pgd + KERNBASE, 1652 HYPERVISOR_update_va_mapping(xen_dummy_user_pgd + KERNBASE,
1570 pmap_pa2pte(xen_dummy_user_pgd) | PG_u | PG_V, UVMF_INVLPG); 1653 pmap_pa2pte(xen_dummy_user_pgd) | PG_u | PG_V, UVMF_INVLPG);
1571 /* Pin as L4 */ 1654 /* Pin as L4 */
1572 xpq_queue_pin_table(xpmap_ptom_masked(xen_dummy_user_pgd)); 1655 xpq_queue_pin_l4_table(xpmap_ptom_masked(xen_dummy_user_pgd));
1573#endif /* __x86_64__ */ 1656#endif /* __x86_64__ */
1574 idt_vaddr = virtual_avail; /* don't need pte */ 1657 idt_vaddr = virtual_avail; /* don't need pte */
1575 idt_paddr = avail_start; /* steal a page */ 1658 idt_paddr = avail_start; /* steal a page */
1576 /* 1659 /*
1577 * Xen require one more page as we can't store 1660 * Xen require one more page as we can't store
1578 * GDT and LDT on the same page 1661 * GDT and LDT on the same page
1579 */ 1662 */
1580 virtual_avail += 3 * PAGE_SIZE; 1663 virtual_avail += 3 * PAGE_SIZE;
1581 avail_start += 3 * PAGE_SIZE; 1664 avail_start += 3 * PAGE_SIZE;
1582#else /* XEN */ 1665#else /* XEN */
1583 idt_vaddr = virtual_avail; /* don't need pte */ 1666 idt_vaddr = virtual_avail; /* don't need pte */
1584 idt_paddr = avail_start; /* steal a page */ 1667 idt_paddr = avail_start; /* steal a page */
1585#if defined(__x86_64__) 1668#if defined(__x86_64__)
@@ -1620,26 +1703,30 @@ pmap_bootstrap(vaddr_t kva_start) @@ -1620,26 +1703,30 @@ pmap_bootstrap(vaddr_t kva_start)
1620 */ 1703 */
1621 1704
1622 mutex_init(&pmaps_lock, MUTEX_DEFAULT, IPL_NONE); 1705 mutex_init(&pmaps_lock, MUTEX_DEFAULT, IPL_NONE);
1623 LIST_INIT(&pmaps); 1706 LIST_INIT(&pmaps);
1624 pmap_cpu_init_early(curcpu()); 1707 pmap_cpu_init_early(curcpu());
1625 1708
1626 /* 1709 /*
1627 * initialize caches. 1710 * initialize caches.
1628 */ 1711 */
1629 1712
1630 pool_cache_bootstrap(&pmap_cache, sizeof(struct pmap), 0, 0, 0, 1713 pool_cache_bootstrap(&pmap_cache, sizeof(struct pmap), 0, 0, 0,
1631 "pmappl", NULL, IPL_NONE, NULL, NULL, NULL); 1714 "pmappl", NULL, IPL_NONE, NULL, NULL, NULL);
1632#ifdef PAE 1715#ifdef PAE
 1716#ifdef XEN
 1717 //pool_cache_bootstrap(&l2_pdirpte_cache, sizeof(struct l2_pdirpte), 0, 0,
 1718 // 0, "l2_pdirptepl", NULL, IPL_NONE, NULL, NULL, NULL);
 1719#endif /* XEN */
1633 pool_cache_bootstrap(&pmap_pdp_cache, PAGE_SIZE * PDP_SIZE, 0, 0, 0, 1720 pool_cache_bootstrap(&pmap_pdp_cache, PAGE_SIZE * PDP_SIZE, 0, 0, 0,
1634 "pdppl", &pmap_pdp_allocator, IPL_NONE, 1721 "pdppl", &pmap_pdp_allocator, IPL_NONE,
1635 pmap_pdp_ctor, pmap_pdp_dtor, NULL); 1722 pmap_pdp_ctor, pmap_pdp_dtor, NULL);
1636#else /* PAE */ 1723#else /* PAE */
1637 pool_cache_bootstrap(&pmap_pdp_cache, PAGE_SIZE, 0, 0, 0, 1724 pool_cache_bootstrap(&pmap_pdp_cache, PAGE_SIZE, 0, 0, 0,
1638 "pdppl", NULL, IPL_NONE, pmap_pdp_ctor, pmap_pdp_dtor, NULL); 1725 "pdppl", NULL, IPL_NONE, pmap_pdp_ctor, pmap_pdp_dtor, NULL);
1639#endif /* PAE */ 1726#endif /* PAE */
1640 pool_cache_bootstrap(&pmap_pv_cache, sizeof(struct pv_entry), 0, 0, 1727 pool_cache_bootstrap(&pmap_pv_cache, sizeof(struct pv_entry), 0, 0,
1641 PR_LARGECACHE, "pvpl", &pool_allocator_meta, IPL_NONE, NULL, 1728 PR_LARGECACHE, "pvpl", &pool_allocator_meta, IPL_NONE, NULL,
1642 NULL, NULL); 1729 NULL, NULL);
1643 1730
1644 /* 1731 /*
1645 * ensure the TLB is sync'd with reality by flushing it... 1732 * ensure the TLB is sync'd with reality by flushing it...
@@ -2212,32 +2299,32 @@ pmap_pdp_ctor(void *arg, void *v, int fl @@ -2212,32 +2299,32 @@ pmap_pdp_ctor(void *arg, void *v, int fl
2212 for (i = 0; i < PDP_SIZE; i++, object += PAGE_SIZE) { 2299 for (i = 0; i < PDP_SIZE; i++, object += PAGE_SIZE) {
2213 (void) pmap_extract(pmap_kernel(), object, &pdirpa); 2300 (void) pmap_extract(pmap_kernel(), object, &pdirpa);
2214 /* remap this page RO */ 2301 /* remap this page RO */
2215 pmap_kenter_pa(object, pdirpa, VM_PROT_READ); 2302 pmap_kenter_pa(object, pdirpa, VM_PROT_READ);
2216 pmap_update(pmap_kernel()); 2303 pmap_update(pmap_kernel());
2217 /* 2304 /*
2218 * pin as L2/L4 page, we have to do the page with the 2305 * pin as L2/L4 page, we have to do the page with the
2219 * PDIR_SLOT_PTE entries last 2306 * PDIR_SLOT_PTE entries last
2220 */ 2307 */
2221#ifdef PAE 2308#ifdef PAE
2222 if (i == l2tol3(PDIR_SLOT_PTE)) 2309 if (i == l2tol3(PDIR_SLOT_PTE))
2223 continue; 2310 continue;
2224#endif 2311#endif
2225 xpq_queue_pin_table(xpmap_ptom_masked(pdirpa)); 2312 xpq_queue_pin_l2_table(xpmap_ptom_masked(pdirpa));
2226 } 2313 }
2227#ifdef PAE 2314#ifdef PAE
2228 object = ((vaddr_t)pdir) + PAGE_SIZE * l2tol3(PDIR_SLOT_PTE); 2315 object = ((vaddr_t)pdir) + PAGE_SIZE * l2tol3(PDIR_SLOT_PTE);
2229 (void)pmap_extract(pmap_kernel(), object, &pdirpa); 2316 (void)pmap_extract(pmap_kernel(), object, &pdirpa);
2230 xpq_queue_pin_table(xpmap_ptom_masked(pdirpa)); 2317 xpq_queue_pin_l2_table(xpmap_ptom_masked(pdirpa));
2231#endif 2318#endif
2232 xpq_flush_queue(); 2319 xpq_flush_queue();
2233 splx(s); 2320 splx(s);
2234 2321
2235 xen_release_ptom_lock(); 2322 xen_release_ptom_lock();
2236#endif /* XEN */ 2323#endif /* XEN */
2237 2324
2238 return (0); 2325 return (0);
2239} 2326}
2240 2327
2241/* 2328/*
2242 * pmap_pdp_dtor: destructor for the PDP cache. 2329 * pmap_pdp_dtor: destructor for the PDP cache.
2243 */ 2330 */
@@ -2302,28 +2389,32 @@ pmap_pdp_free(struct pool *pp, void *v) @@ -2302,28 +2389,32 @@ pmap_pdp_free(struct pool *pp, void *v)
2302 2389
2303/* 2390/*
2304 * pmap_create: create a pmap 2391 * pmap_create: create a pmap
2305 * 2392 *
2306 * => note: old pmap interface took a "size" args which allowed for 2393 * => note: old pmap interface took a "size" args which allowed for
2307 * the creation of "software only" pmaps (not in bsd). 2394 * the creation of "software only" pmaps (not in bsd).
2308 */ 2395 */
2309 2396
2310struct pmap * 2397struct pmap *
2311pmap_create(void) 2398pmap_create(void)
2312{ 2399{
2313 struct pmap *pmap; 2400 struct pmap *pmap;
2314 int i; 2401 int i;
 2402#ifdef PAE
 2403 //struct l2_pdirpte *l2s;
 2404#endif /* PAE */
2315 2405
2316 pmap = pool_cache_get(&pmap_cache, PR_WAITOK); 2406 pmap = pool_cache_get(&pmap_cache, PR_WAITOK);
 2407 //l2p = pool_cache_get(&l2_pdirpte_cache, PR_WAITOK);
2317 2408
2318 /* init uvm_object */ 2409 /* init uvm_object */
2319 for (i = 0; i < PTP_LEVELS - 1; i++) { 2410 for (i = 0; i < PTP_LEVELS - 1; i++) {
2320 UVM_OBJ_INIT(&pmap->pm_obj[i], NULL, 1); 2411 UVM_OBJ_INIT(&pmap->pm_obj[i], NULL, 1);
2321 pmap->pm_ptphint[i] = NULL; 2412 pmap->pm_ptphint[i] = NULL;
2322 } 2413 }
2323 pmap->pm_stats.wired_count = 0; 2414 pmap->pm_stats.wired_count = 0;
2324 pmap->pm_stats.resident_count = 1; /* count the PDP allocd below */ 2415 pmap->pm_stats.resident_count = 1; /* count the PDP allocd below */
2325#if !defined(__x86_64__) 2416#if !defined(__x86_64__)
2326 pmap->pm_hiexec = 0; 2417 pmap->pm_hiexec = 0;
2327#endif /* !defined(__x86_64__) */ 2418#endif /* !defined(__x86_64__) */
2328 pmap->pm_flags = 0; 2419 pmap->pm_flags = 0;
2329 pmap->pm_cpus = 0; 2420 pmap->pm_cpus = 0;
@@ -2344,26 +2435,34 @@ pmap_create(void) @@ -2344,26 +2435,34 @@ pmap_create(void)
2344 mutex_exit(&pmaps_lock); 2435 mutex_exit(&pmaps_lock);
2345 pool_cache_destruct_object(&pmap_pdp_cache, pmap->pm_pdir); 2436 pool_cache_destruct_object(&pmap_pdp_cache, pmap->pm_pdir);
2346 goto try_again; 2437 goto try_again;
2347 } 2438 }
2348 2439
2349#ifdef XEN3 2440#ifdef XEN3
2350 xen_acquire_reader_ptom_lock(); 2441 xen_acquire_reader_ptom_lock();
2351#endif 2442#endif
2352 2443
2353#ifdef PAE 2444#ifdef PAE
2354 for (i = 0; i < PDP_SIZE; i++) 2445 for (i = 0; i < PDP_SIZE; i++)
2355 pmap->pm_pdirpa[i] = 2446 pmap->pm_pdirpa[i] =
2356 pmap_pte2pa(pmap->pm_pdir[PDIR_SLOT_PTE + i]); 2447 pmap_pte2pa(pmap->pm_pdir[PDIR_SLOT_PTE + i]);
 2448
 2449 /*
 2450 * We now have an L2 page with a valid PDIR_SLOT_PTE pointing
 2451 * to a shadow L2, so track it
 2452 l2p->slot_pte = pdirpa;
 2453 l2p->slot_pte_content = pmap_pte2pa(pmap->pm_pdir[PDIR_SLOT_PTE + i]);
 2454 */
 2455
2357#else 2456#else
2358 pmap->pm_pdirpa = pmap_pte2pa(pmap->pm_pdir[PDIR_SLOT_PTE]); 2457 pmap->pm_pdirpa = pmap_pte2pa(pmap->pm_pdir[PDIR_SLOT_PTE]);
2359#endif 2458#endif
2360 2459
2361#ifdef XEN3 2460#ifdef XEN3
2362 xen_release_ptom_lock(); 2461 xen_release_ptom_lock();
2363#endif 2462#endif
2364 2463
2365 LIST_INSERT_HEAD(&pmaps, pmap, pm_list); 2464 LIST_INSERT_HEAD(&pmaps, pmap, pm_list);
2366 2465
2367 mutex_exit(&pmaps_lock); 2466 mutex_exit(&pmaps_lock);
2368 2467
2369 return (pmap); 2468 return (pmap);

cvs diff -r1.21.8.2 -r1.21.8.3 src/sys/arch/xen/include/xenpmap.h (expand / switch to unified diff)

--- src/sys/arch/xen/include/xenpmap.h 2009/05/13 17:18:50 1.21.8.2
+++ src/sys/arch/xen/include/xenpmap.h 2009/07/24 11:30:28 1.21.8.3
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: xenpmap.h,v 1.21.8.2 2009/05/13 17:18:50 jym Exp $ */ 1/* $NetBSD: xenpmap.h,v 1.21.8.3 2009/07/24 11:30:28 jym Exp $ */
2 2
3/* 3/*
4 * 4 *
5 * Copyright (c) 2004 Christian Limpach. 5 * Copyright (c) 2004 Christian Limpach.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions 9 * modification, are permitted provided that the following conditions
10 * are met: 10 * are met:
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
@@ -35,30 +35,54 @@ @@ -35,30 +35,54 @@
35#ifndef _XEN_XENPMAP_H_ 35#ifndef _XEN_XENPMAP_H_
36#define _XEN_XENPMAP_H_ 36#define _XEN_XENPMAP_H_
37#include "opt_xen.h" 37#include "opt_xen.h"
38 38
39#define INVALID_P2M_ENTRY (~0UL) 39#define INVALID_P2M_ENTRY (~0UL)
40 40
41void xpq_queue_machphys_update(paddr_t, paddr_t); 41void xpq_queue_machphys_update(paddr_t, paddr_t);
42void xpq_queue_invlpg(vaddr_t); 42void xpq_queue_invlpg(vaddr_t);
43void xpq_queue_pte_update(paddr_t, pt_entry_t); 43void xpq_queue_pte_update(paddr_t, pt_entry_t);
44void xpq_queue_pt_switch(paddr_t); 44void xpq_queue_pt_switch(paddr_t);
45void xpq_flush_queue(void); 45void xpq_flush_queue(void);
46void xpq_queue_set_ldt(vaddr_t, uint32_t); 46void xpq_queue_set_ldt(vaddr_t, uint32_t);
47void xpq_queue_tlb_flush(void); 47void xpq_queue_tlb_flush(void);
48void xpq_queue_pin_table(paddr_t); 48void xpq_queue_pin_table(paddr_t, unsigned int);
49void xpq_queue_unpin_table(paddr_t); 49void xpq_queue_unpin_table(paddr_t);
50int xpq_update_foreign(paddr_t, pt_entry_t, int); 50int xpq_update_foreign(paddr_t, pt_entry_t, int);
51 51
 52static void inline
 53xpq_queue_pin_l1_table(paddr_t pa) {
 54 xpq_queue_pin_table(pa, MMUEXT_PIN_L1_TABLE);
 55}
 56
 57static void inline
 58xpq_queue_pin_l2_table(paddr_t pa) {
 59 xpq_queue_pin_table(pa, MMUEXT_PIN_L2_TABLE);
 60}
 61
 62#if defined(PAE) || defined(__x86_64__)
 63static void inline
 64xpq_queue_pin_l3_table(paddr_t pa) {
 65 xpq_queue_pin_table(pa, MMUEXT_PIN_L3_TABLE);
 66}
 67#endif
 68
 69#if defined(__x86_64__)
 70static void inline
 71xpq_queue_pin_l4_table(paddr_t pa) {
 72 xpq_queue_pin_table(pa, MMUEXT_PIN_L4_TABLE);
 73}
 74#endif
 75
52extern unsigned long *xpmap_phys_to_machine_mapping; 76extern unsigned long *xpmap_phys_to_machine_mapping;
53 77
54/*  78/*
55 * On Xen-2, the start of the day virtual memory starts at KERNTEXTOFF 79 * On Xen-2, the start of the day virtual memory starts at KERNTEXTOFF
56 * (0xc0100000). On Xen-3 for domain0 it starts at KERNBASE (0xc0000000). 80 * (0xc0100000). On Xen-3 for domain0 it starts at KERNBASE (0xc0000000).
57 * So the offset between physical and virtual address is different on 81 * So the offset between physical and virtual address is different on
58 * Xen-2 and Xen-3 for domain0. 82 * Xen-2 and Xen-3 for domain0.
59 * starting with xen-3.0.2, we can add notes so that virtual memory starts 83 * starting with xen-3.0.2, we can add notes so that virtual memory starts
60 * at KERNBASE for domU as well. 84 * at KERNBASE for domU as well.
61 */  85 */
62#if defined(XEN3) && (defined(DOM0OPS) || !defined(XEN_COMPAT_030001)) 86#if defined(XEN3) && (defined(DOM0OPS) || !defined(XEN_COMPAT_030001))
63#define XPMAP_OFFSET 0 87#define XPMAP_OFFSET 0
64#else 88#else

cvs diff -r1.12.4.4 -r1.12.4.5 src/sys/arch/xen/x86/x86_xpmap.c (expand / switch to unified diff)

--- src/sys/arch/xen/x86/x86_xpmap.c 2009/07/23 23:31:37 1.12.4.4
+++ src/sys/arch/xen/x86/x86_xpmap.c 2009/07/24 11:30:28 1.12.4.5
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: x86_xpmap.c,v 1.12.4.4 2009/07/23 23:31:37 jym Exp $ */ 1/* $NetBSD: x86_xpmap.c,v 1.12.4.5 2009/07/24 11:30:28 jym Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2006 Mathieu Ropert <mro@adviseo.fr> 4 * Copyright (c) 2006 Mathieu Ropert <mro@adviseo.fr>
5 * 5 *
6 * Permission to use, copy, modify, and distribute this software for any 6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above 7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies. 8 * copyright notice and this permission notice appear in all copies.
9 * 9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
@@ -69,27 +69,27 @@ @@ -69,27 +69,27 @@
69 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 69 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
70 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 70 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
71 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 71 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
72 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 72 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
73 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 73 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
74 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 74 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
75 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 75 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
76 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 76 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
77 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 77 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
78 */ 78 */
79 79
80 80
81#include <sys/cdefs.h> 81#include <sys/cdefs.h>
82__KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.12.4.4 2009/07/23 23:31:37 jym Exp $"); 82__KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.12.4.5 2009/07/24 11:30:28 jym Exp $");
83 83
84#include "opt_xen.h" 84#include "opt_xen.h"
85#include "opt_ddb.h" 85#include "opt_ddb.h"
86#include "ksyms.h" 86#include "ksyms.h"
87 87
88#include <sys/param.h> 88#include <sys/param.h>
89#include <sys/systm.h> 89#include <sys/systm.h>
90#include <sys/rwlock.h> 90#include <sys/rwlock.h>
91 91
92#include <uvm/uvm.h> 92#include <uvm/uvm.h>
93 93
94#include <machine/pmap.h> 94#include <machine/pmap.h>
95#include <machine/gdt.h> 95#include <machine/gdt.h>
@@ -288,60 +288,41 @@ xpq_queue_pt_switch(paddr_t pa) @@ -288,60 +288,41 @@ xpq_queue_pt_switch(paddr_t pa)
288{ 288{
289 struct mmuext_op op; 289 struct mmuext_op op;
290 xpq_flush_queue(); 290 xpq_flush_queue();
291 291
292 XENPRINTK2(("xpq_queue_pt_switch: 0x%" PRIx64 " 0x%" PRIx64 "\n", 292 XENPRINTK2(("xpq_queue_pt_switch: 0x%" PRIx64 " 0x%" PRIx64 "\n",
293 (int64_t)pa, (int64_t)pa)); 293 (int64_t)pa, (int64_t)pa));
294 op.cmd = MMUEXT_NEW_BASEPTR; 294 op.cmd = MMUEXT_NEW_BASEPTR;
295 op.arg1.mfn = pa >> PAGE_SHIFT; 295 op.arg1.mfn = pa >> PAGE_SHIFT;
296 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 296 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
297 panic("xpq_queue_pt_switch"); 297 panic("xpq_queue_pt_switch");
298} 298}
299 299
300void 300void
301xpq_queue_pin_table(paddr_t pa) 301xpq_queue_pin_table(paddr_t pa, unsigned int level) {
302{ 
303 struct mmuext_op op; 
304 xpq_flush_queue(); 
305 
306 XENPRINTK2(("xpq_queue_pin_table: 0x%" PRIx64 " 0x%" PRIx64 "\n", 
307 (int64_t)pa, (int64_t)pa)); 
308 op.arg1.mfn = pa >> PAGE_SHIFT; 
309 
310#if defined(__x86_64__) 
311 op.cmd = MMUEXT_PIN_L4_TABLE; 
312#else 
313 op.cmd = MMUEXT_PIN_L2_TABLE; 
314#endif 
315 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 
316 panic("xpq_queue_pin_table"); 
317} 
318 302
319#ifdef PAE 
320static void 
321xpq_queue_pin_l3_table(paddr_t pa) 
322{ 
323 struct mmuext_op op; 303 struct mmuext_op op;
324 xpq_flush_queue(); 304 xpq_flush_queue();
325 305
326 XENPRINTK2(("xpq_queue_pin_l2_table: 0x%" PRIx64 " 0x%" PRIx64 "\n", 306 XENPRINTK2(("xpq_queue_pin_table: level %u %#"PRIx64"\n",
327 (int64_t)pa, (int64_t)pa)); 307 level, (int64_t)pa));
 308
328 op.arg1.mfn = pa >> PAGE_SHIFT; 309 op.arg1.mfn = pa >> PAGE_SHIFT;
 310 op.cmd = level;
329 311
330 op.cmd = MMUEXT_PIN_L3_TABLE; 
331 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 312 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
332 panic("xpq_queue_pin_table"); 313 panic("xpq_queue_pin_table: level %u %#"PRIx64"\n",
 314 level, (int64_t)pa);
333} 315}
334#endif 
335 316
336void 317void
337xpq_queue_unpin_table(paddr_t pa) 318xpq_queue_unpin_table(paddr_t pa)
338{ 319{
339 struct mmuext_op op; 320 struct mmuext_op op;
340 xpq_flush_queue(); 321 xpq_flush_queue();
341 322
342 XENPRINTK2(("xpq_queue_unpin_table: 0x%" PRIx64 " 0x%" PRIx64 "\n", 323 XENPRINTK2(("xpq_queue_unpin_table: 0x%" PRIx64 " 0x%" PRIx64 "\n",
343 (int64_t)pa, (int64_t)pa)); 324 (int64_t)pa, (int64_t)pa));
344 op.arg1.mfn = pa >> PAGE_SHIFT; 325 op.arg1.mfn = pa >> PAGE_SHIFT;
345 op.cmd = MMUEXT_UNPIN_TABLE; 326 op.cmd = MMUEXT_UNPIN_TABLE;
346 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 327 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
347 panic("xpq_queue_unpin_table"); 328 panic("xpq_queue_unpin_table");
@@ -924,61 +905,63 @@ xen_bootstrap_tables (vaddr_t old_pgd, v @@ -924,61 +905,63 @@ xen_bootstrap_tables (vaddr_t old_pgd, v
924 pde[PDIR_SLOT_PTE + 3] = xpmap_ptom_masked(addr) | PG_k | PG_V; 905 pde[PDIR_SLOT_PTE + 3] = xpmap_ptom_masked(addr) | PG_k | PG_V;
925 __PRINTK(("pde[%d] va 0x%lx pa 0x%lx entry 0x%" PRIx64 "\n", 906 __PRINTK(("pde[%d] va 0x%lx pa 0x%lx entry 0x%" PRIx64 "\n",
926 (int)(PDIR_SLOT_PTE + 3), pde + PAGE_SIZE * 4, (long)addr, 907 (int)(PDIR_SLOT_PTE + 3), pde + PAGE_SIZE * 4, (long)addr,
927 (int64_t)pde[PDIR_SLOT_PTE + 3])); 908 (int64_t)pde[PDIR_SLOT_PTE + 3]));
928#endif 909#endif
929 /* Mark tables RO, and pin the kernel's shadow as L2 */ 910 /* Mark tables RO, and pin the kernel's shadow as L2 */
930 addr = (u_long)pde - KERNBASE; 911 addr = (u_long)pde - KERNBASE;
931 for (i = 0; i < 5; i++, addr += PAGE_SIZE) { 912 for (i = 0; i < 5; i++, addr += PAGE_SIZE) {
932 xen_bt_set_readonly(((vaddr_t)pde) + PAGE_SIZE * i); 913 xen_bt_set_readonly(((vaddr_t)pde) + PAGE_SIZE * i);
933 if (i == 2 || i == 3) 914 if (i == 2 || i == 3)
934 continue; 915 continue;
935#if 0 916#if 0
936 __PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", i, (int64_t)addr)); 917 __PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", i, (int64_t)addr));
937 xpq_queue_pin_table(xpmap_ptom_masked(addr)); 918 xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
938#endif 919#endif
939 } 920 }
940 if (final) { 921 if (final) {
941 addr = (u_long)pde - KERNBASE + 3 * PAGE_SIZE; 922 addr = (u_long)pde - KERNBASE + 3 * PAGE_SIZE;
942 __PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", 2, (int64_t)addr)); 923 __PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", 2, (int64_t)addr));
943 xpq_queue_pin_table(xpmap_ptom_masked(addr)); 924 xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
944 } 925 }
945#if 0 926#if 0
946 addr = (u_long)pde - KERNBASE + 2 * PAGE_SIZE; 927 addr = (u_long)pde - KERNBASE + 2 * PAGE_SIZE;
947 __PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", 2, (int64_t)addr)); 928 __PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", 2, (int64_t)addr));
948 xpq_queue_pin_table(xpmap_ptom_masked(addr)); 929 xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
949#endif 930#endif
950#else /* PAE */ 931#else /* PAE */
951 /* recursive entry in higher-level PD */ 932 /* recursive entry in higher-level PD */
952 bt_pgd[PDIR_SLOT_PTE] = 933 bt_pgd[PDIR_SLOT_PTE] =
953 xpmap_ptom_masked(new_pgd - KERNBASE) | PG_k | PG_V; 934 xpmap_ptom_masked(new_pgd - KERNBASE) | PG_k | PG_V;
954 __PRINTK(("bt_pgd[PDIR_SLOT_PTE] va 0x%lx pa 0x%" PRIx64 935 __PRINTK(("bt_pgd[PDIR_SLOT_PTE] va 0x%lx pa 0x%" PRIx64
955 " entry 0x%" PRIx64 "\n", new_pgd, (int64_t)new_pgd - KERNBASE, 936 " entry 0x%" PRIx64 "\n", new_pgd, (int64_t)new_pgd - KERNBASE,
956 (int64_t)bt_pgd[PDIR_SLOT_PTE])); 937 (int64_t)bt_pgd[PDIR_SLOT_PTE]));
957 /* Mark tables RO */ 938 /* Mark tables RO */
958 xen_bt_set_readonly((vaddr_t) pde); 939 xen_bt_set_readonly((vaddr_t) pde);
959#endif 940#endif
960#if PTP_LEVELS > 2 || defined(PAE) 941#if PTP_LEVELS > 2 || defined(PAE)
961 xen_bt_set_readonly((vaddr_t) pdtpe); 942 xen_bt_set_readonly((vaddr_t) pdtpe);
962#endif 943#endif
963#if PTP_LEVELS > 3 944#if PTP_LEVELS > 3
964 xen_bt_set_readonly(new_pgd); 945 xen_bt_set_readonly(new_pgd);
965#endif 946#endif
966 /* Pin the PGD */ 947 /* Pin the PGD */
967 __PRINTK(("pin PGD\n")); 948 __PRINTK(("pin PGD\n"));
968#ifdef PAE 949#ifdef PAE
969 xpq_queue_pin_l3_table(xpmap_ptom_masked(new_pgd - KERNBASE)); 950 xpq_queue_pin_l3_table(xpmap_ptom_masked(new_pgd - KERNBASE));
 951#elif __x86_64__
 952 xpq_queue_pin_l4_table(xpmap_ptom_masked(new_pgd - KERNBASE));
970#else 953#else
971 xpq_queue_pin_table(xpmap_ptom_masked(new_pgd - KERNBASE)); 954 xpq_queue_pin_l2_table(xpmap_ptom_masked(new_pgd - KERNBASE));
972#endif 955#endif
973#ifdef __i386__ 956#ifdef __i386__
974 /* Save phys. addr of PDP, for libkvm. */ 957 /* Save phys. addr of PDP, for libkvm. */
975 PDPpaddr = (long)pde; 958 PDPpaddr = (long)pde;
976#ifdef PAE 959#ifdef PAE
977 /* also save the address of the L3 page */ 960 /* also save the address of the L3 page */
978 pmap_l3pd = pdtpe; 961 pmap_l3pd = pdtpe;
979 pmap_l3paddr = (new_pgd - KERNBASE); 962 pmap_l3paddr = (new_pgd - KERNBASE);
980#endif /* PAE */ 963#endif /* PAE */
981#endif /* i386 */ 964#endif /* i386 */
982 /* Switch to new tables */ 965 /* Switch to new tables */
983 __PRINTK(("switch to PGD\n")); 966 __PRINTK(("switch to PGD\n"));
984 xpq_queue_pt_switch(xpmap_ptom_masked(new_pgd - KERNBASE)); 967 xpq_queue_pt_switch(xpmap_ptom_masked(new_pgd - KERNBASE));

cvs diff -r1.4.12.3 -r1.4.12.4 src/sys/arch/xen/xen/xen_machdep.c (expand / switch to unified diff)

--- src/sys/arch/xen/xen/xen_machdep.c 2009/05/31 20:15:37 1.4.12.3
+++ src/sys/arch/xen/xen/xen_machdep.c 2009/07/24 11:30:28 1.4.12.4
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: xen_machdep.c,v 1.4.12.3 2009/05/31 20:15:37 jym Exp $ */ 1/* $NetBSD: xen_machdep.c,v 1.4.12.4 2009/07/24 11:30:28 jym Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2006 Manuel Bouyer. 4 * Copyright (c) 2006 Manuel Bouyer.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions 7 * modification, are permitted provided that the following conditions
8 * are met: 8 * are met:
9 * 1. Redistributions of source code must retain the above copyright 9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer. 10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright 11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the 12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution. 13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software 14 * 3. All advertising materials mentioning features or use of this software
@@ -53,27 +53,27 @@ @@ -53,27 +53,27 @@
53 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 53 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
54 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 54 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
55 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 55 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
56 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 56 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
57 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 57 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
61 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 61 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 */ 62 */
63 63
64 64
65#include <sys/cdefs.h> 65#include <sys/cdefs.h>
66__KERNEL_RCSID(0, "$NetBSD: xen_machdep.c,v 1.4.12.3 2009/05/31 20:15:37 jym Exp $"); 66__KERNEL_RCSID(0, "$NetBSD: xen_machdep.c,v 1.4.12.4 2009/07/24 11:30:28 jym Exp $");
67 67
68#include "opt_xen.h" 68#include "opt_xen.h"
69 69
70#include <sys/param.h> 70#include <sys/param.h>
71#include <sys/systm.h> 71#include <sys/systm.h>
72#include <sys/boot_flag.h> 72#include <sys/boot_flag.h>
73#include <sys/mount.h> 73#include <sys/mount.h>
74#include <sys/reboot.h> 74#include <sys/reboot.h>
75#include <sys/timetc.h> 75#include <sys/timetc.h>
76#include <sys/sysctl.h> 76#include <sys/sysctl.h>
77#include <sys/pmf.h> 77#include <sys/pmf.h>
78 78
79#include <xen/hypervisor.h> 79#include <xen/hypervisor.h>
@@ -283,26 +283,30 @@ xen_prepare_suspend(void) { @@ -283,26 +283,30 @@ xen_prepare_suspend(void) {
283 283
284 kpreempt_disable(); 284 kpreempt_disable();
285 285
286 xen_suspendclocks(); 286 xen_suspendclocks();
287 287
288 xen_acquire_writer_ptom_lock(); 288 xen_acquire_writer_ptom_lock();
289 289
290 /* 290 /*
291 * Xen lazy evaluation of recursive mappings requires 291 * Xen lazy evaluation of recursive mappings requires
292 * to flush the APDP entries 292 * to flush the APDP entries
293 */ 293 */
294 pmap_unmap_all_apdp_pdes(); 294 pmap_unmap_all_apdp_pdes();
295 295
 296#ifdef PAE
 297 pmap_unmap_shadow_entries();
 298#endif
 299
296 /* 300 /*
297 * save/restore code does not translate these MFNs to their 301 * save/restore code does not translate these MFNs to their
298 * associated PFNs, so we must do it 302 * associated PFNs, so we must do it
299 */ 303 */
300 xen_start_info.store_mfn = mfn_to_pfn(xen_start_info.store_mfn); 304 xen_start_info.store_mfn = mfn_to_pfn(xen_start_info.store_mfn);
301 xen_start_info.console_mfn = mfn_to_pfn(xen_start_info.console_mfn); 305 xen_start_info.console_mfn = mfn_to_pfn(xen_start_info.console_mfn);
302 306
303 DPRINTK(("suspending domain\n")); 307 DPRINTK(("suspending domain\n"));
304 aprint_verbose("suspending domain\n"); 308 aprint_verbose("suspending domain\n");
305 309
306 /* invalidate the shared_info page */ 310 /* invalidate the shared_info page */
307 if (HYPERVISOR_update_va_mapping((vaddr_t)HYPERVISOR_shared_info, 311 if (HYPERVISOR_update_va_mapping((vaddr_t)HYPERVISOR_shared_info,
308 0, UVMF_INVLPG)) { 312 0, UVMF_INVLPG)) {
@@ -316,26 +320,30 @@ xen_prepare_suspend(void) { @@ -316,26 +320,30 @@ xen_prepare_suspend(void) {
316 * First operations before restoring domain context 320 * First operations before restoring domain context
317 */ 321 */
318static void 322static void
319xen_prepare_resume(void) { 323xen_prepare_resume(void) {
320 324
321 /* map the new shared_info page */ 325 /* map the new shared_info page */
322 if (HYPERVISOR_update_va_mapping((vaddr_t)HYPERVISOR_shared_info, 326 if (HYPERVISOR_update_va_mapping((vaddr_t)HYPERVISOR_shared_info,
323 xen_start_info.shared_info | PG_RW | PG_V, 327 xen_start_info.shared_info | PG_RW | PG_V,
324 UVMF_INVLPG)) { 328 UVMF_INVLPG)) {
325 DPRINTK(("could not map new shared info page")); 329 DPRINTK(("could not map new shared info page"));
326 HYPERVISOR_crash(); 330 HYPERVISOR_crash();
327 } 331 }
328 332
 333#ifdef PAE
 334 pmap_map_shadow_entries();
 335#endif
 336
329 if (xen_start_info.nr_pages != physmem) { 337 if (xen_start_info.nr_pages != physmem) {
330 /* 338 /*
331 * XXX JYM for now, we crash - fix it with balloon when 339 * XXX JYM for now, we crash - fix it with balloon when
332 * supported 340 * supported
333 */ 341 */
334 DPRINTK(("xen_start_info.nr_pages != physmem")); 342 DPRINTK(("xen_start_info.nr_pages != physmem"));
335 HYPERVISOR_crash(); 343 HYPERVISOR_crash();
336 } 344 }
337 345
338 xen_release_ptom_lock(); 346 xen_release_ptom_lock();
339 347
340 DPRINTK(("preparing domain resume\n")); 348 DPRINTK(("preparing domain resume\n"));
341 aprint_verbose("preparing domain resume\n"); 349 aprint_verbose("preparing domain resume\n");

cvs diff -r1.31.2.3 -r1.31.2.4 src/sys/arch/xen/xen/xencons.c (expand / switch to unified diff)

--- src/sys/arch/xen/xen/xencons.c 2009/07/23 23:31:38 1.31.2.3
+++ src/sys/arch/xen/xen/xencons.c 2009/07/24 11:30:28 1.31.2.4
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: xencons.c,v 1.31.2.3 2009/07/23 23:31:38 jym Exp $ */ 1/* $NetBSD: xencons.c,v 1.31.2.4 2009/07/24 11:30:28 jym Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2006 Manuel Bouyer. 4 * Copyright (c) 2006 Manuel Bouyer.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions 7 * modification, are permitted provided that the following conditions
8 * are met: 8 * are met:
9 * 1. Redistributions of source code must retain the above copyright 9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer. 10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright 11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the 12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution. 13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software 14 * 3. All advertising materials mentioning features or use of this software
@@ -53,38 +53,39 @@ @@ -53,38 +53,39 @@
53 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 53 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
54 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 54 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
55 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 55 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
56 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 56 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
57 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 57 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
61 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 61 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 */ 62 */
63 63
64 64
65#include <sys/cdefs.h> 65#include <sys/cdefs.h>
66__KERNEL_RCSID(0, "$NetBSD: xencons.c,v 1.31.2.3 2009/07/23 23:31:38 jym Exp $"); 66__KERNEL_RCSID(0, "$NetBSD: xencons.c,v 1.31.2.4 2009/07/24 11:30:28 jym Exp $");
67 67
68#include "opt_xen.h" 68#include "opt_xen.h"
69 69
70#include <sys/param.h> 70#include <sys/param.h>
71#include <sys/ioctl.h> 71#include <sys/ioctl.h>
72#include <sys/proc.h> 72#include <sys/proc.h>
73#include <sys/tty.h> 73#include <sys/tty.h>
74#include <sys/systm.h> 74#include <sys/systm.h>
75#include <sys/device.h> 75#include <sys/device.h>
76#include <sys/conf.h> 76#include <sys/conf.h>
77#include <sys/kauth.h> 77#include <sys/kauth.h>
 78#include <sys/kernel.h>
78 79
79#include <machine/stdarg.h> 80#include <machine/stdarg.h>
80#include <xen/xen.h> 81#include <xen/xen.h>
81#include <xen/hypervisor.h> 82#include <xen/hypervisor.h>
82#include <xen/evtchn.h> 83#include <xen/evtchn.h>
83#ifdef XEN3 84#ifdef XEN3
84#include <uvm/uvm.h> 85#include <uvm/uvm.h>
85#include <machine/pmap.h> 86#include <machine/pmap.h>
86#include <xen/xen3-public/io/console.h> 87#include <xen/xen3-public/io/console.h>
87#else 88#else
88#include <xen/ctrl_if.h> 89#include <xen/ctrl_if.h>
89#endif 90#endif
90 91
@@ -218,65 +219,63 @@ xencons_attach(device_t parent, device_t @@ -218,65 +219,63 @@ xencons_attach(device_t parent, device_t
218 xencons_resume(self, PMF_F_NONE); 219 xencons_resume(self, PMF_F_NONE);
219 } 220 }
220 sc->polling = 0; 221 sc->polling = 0;
221 222
222 if (!pmf_device_register(self, xencons_suspend, xencons_resume)) 223 if (!pmf_device_register(self, xencons_suspend, xencons_resume))
223 aprint_error_dev(self, "couldn't establish power handler\n"); 224 aprint_error_dev(self, "couldn't establish power handler\n");
224} 225}
225 226
226static bool 227static bool
227xencons_suspend(device_t dev PMF_FN_ARGS) { 228xencons_suspend(device_t dev PMF_FN_ARGS) {
228 229
229 int evtch; 230 int evtch;
230 231
231 if (xendomain_is_dom0()) { 232 /* dom0 console should not be suspended */
232 evtch = unbind_virq_from_evtch(VIRQ_CONSOLE); 233 if (!xendomain_is_dom0()) {
233 hypervisor_mask_event(evtch); 
234 if (event_remove_handler(evtch, xencons_intr, 
235 xencons_console_device) != 0) 
236 aprint_error_dev(dev, 
237 "can't remove handler: xencons_intr\n"); 
238 } else { 
239#ifdef XEN3 234#ifdef XEN3
240 evtch = xen_start_info.console_evtchn; 235 evtch = xen_start_info.console_evtchn;
241 hypervisor_mask_event(evtch); 236 hypervisor_mask_event(evtch);
242 if (event_remove_handler(evtch, xencons_handler, 237 if (event_remove_handler(evtch, xencons_handler,
243 xencons_console_device) != 0) 238 xencons_console_device) != 0) {
244 aprint_error_dev(dev, 239 aprint_error_dev(dev,
245 "can't remove handler: xencons_handler\n"); 240 "can't remove handler: xencons_handler\n");
 241 }
246#endif 242#endif
 243
 244 aprint_verbose_dev(dev, "removed event channel %d\n", evtch);
247 } 245 }
248 246
249 aprint_verbose_dev(dev, "removed event channel %d\n", evtch); 
250 247
251 return true; 248 return true;
252} 249}
253 250
254static bool 251static bool
255xencons_resume(device_t dev PMF_FN_ARGS) { 252xencons_resume(device_t dev PMF_FN_ARGS) {
256 253
257 int evtch = -1; 254 int evtch = -1;
258 255
259 if (xendomain_is_dom0()) { 256 if (xendomain_is_dom0()) {
260 evtch = bind_virq_to_evtch(VIRQ_CONSOLE); 257 /* dom0 console resume is required only during first start-up */
261 if (event_set_handler(evtch, xencons_intr, 258 if (cold) {
262 xencons_console_device, IPL_TTY, "xencons") != 0) 259 evtch = bind_virq_to_evtch(VIRQ_CONSOLE);
263 aprint_error_dev(dev, "can't register xencons_intr\n"); 260 event_set_handler(evtch, xencons_intr,
 261 xencons_console_device, IPL_TTY, "xencons");
 262 }
264 } else { 263 } else {
265#ifdef XEN3 264#ifdef XEN3
266 evtch = xen_start_info.console_evtchn; 265 evtch = xen_start_info.console_evtchn;
267 if (event_set_handler(evtch, xencons_handler, 266 event_set_handler(evtch, xencons_handler,
268 xencons_console_device, IPL_TTY, "xencons") != 0) 267 xencons_console_device, IPL_TTY, "xencons");
269 aprint_error_dev(dev, "can't register xencons_handler\n"); 268
270#else 269#else
271 (void)ctrl_if_register_receiver(CMSG_CONSOLE, 270 (void)ctrl_if_register_receiver(CMSG_CONSOLE,
272 xencons_rx, 0); 271 xencons_rx, 0);
273#endif 272#endif
274 } 273 }
275 274
276 if (evtch != -1) { 275 if (evtch != -1) {
277 aprint_verbose_dev(dev, "using event channel %d\n", evtch); 276 aprint_verbose_dev(dev, "using event channel %d\n", evtch);
278 hypervisor_enable_event(evtch); 277 hypervisor_enable_event(evtch);
279 } 278 }
280 279
281 return true; 280 return true;
282} 281}