Fri Feb 12 01:55:46 2010 UTC ()
Starting with Xen 3 API, MMU_EXTENDED_COMMAND (tlb flush, cache flush, page
pinning/unpinning, set_ldt, invlpg) operations cannot be queued in the
xpq_queue[] any more, as they use their own specific hypercall, mmuext_op().

Their associated xpq_queue_*() functions already call xpq_flush_queue()
before issuing the mmuext_op() hypercall, which makes these xpq_flush_queue()
calls not necessary.

Rapidly discussed with bouyer@ in private mail. XEN3_DOM0/XEN3PAE_DOM0 tested
through a build.sh release, amd64 was only compile tested. No regression
expected.


(jym)
diff -r1.102 -r1.103 src/sys/arch/x86/x86/pmap.c
diff -r1.17 -r1.18 src/sys/arch/xen/x86/x86_xpmap.c
diff -r1.16 -r1.17 src/sys/arch/xen/x86/xen_bus_dma.c
diff -r1.9 -r1.10 src/sys/arch/xen/x86/xenfunc.c

cvs diff -r1.102 -r1.103 src/sys/arch/x86/x86/pmap.c (expand / switch to unified diff)

--- src/sys/arch/x86/x86/pmap.c 2010/02/10 00:39:30 1.102
+++ src/sys/arch/x86/x86/pmap.c 2010/02/12 01:55:45 1.103
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.102 2010/02/10 00:39:30 jym Exp $ */ 1/* $NetBSD: pmap.c,v 1.103 2010/02/12 01:55:45 jym Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2007 Manuel Bouyer. 4 * Copyright (c) 2007 Manuel Bouyer.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions 7 * modification, are permitted provided that the following conditions
8 * are met: 8 * are met:
9 * 1. Redistributions of source code must retain the above copyright 9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer. 10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright 11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the 12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution. 13 * documentation and/or other materials provided with the distribution.
14 * 14 *
@@ -139,27 +139,27 @@ @@ -139,27 +139,27 @@
139 * Hibler/Jolitz pmap, as modified for FreeBSD by John S. Dyson 139 * Hibler/Jolitz pmap, as modified for FreeBSD by John S. Dyson
140 * and David Greenman. 140 * and David Greenman.
141 * 141 *
142 * [3] the Mach pmap. this pmap, from CMU, seems to have migrated 142 * [3] the Mach pmap. this pmap, from CMU, seems to have migrated
143 * between several processors. the VAX version was done by 143 * between several processors. the VAX version was done by
144 * Avadis Tevanian, Jr., and Michael Wayne Young. the i386 144 * Avadis Tevanian, Jr., and Michael Wayne Young. the i386
145 * version was done by Lance Berc, Mike Kupfer, Bob Baron, 145 * version was done by Lance Berc, Mike Kupfer, Bob Baron,
146 * David Golub, and Richard Draves. the alpha version was 146 * David Golub, and Richard Draves. the alpha version was
147 * done by Alessandro Forin (CMU/Mach) and Chris Demetriou 147 * done by Alessandro Forin (CMU/Mach) and Chris Demetriou
148 * (NetBSD/alpha). 148 * (NetBSD/alpha).
149 */ 149 */
150 150
151#include <sys/cdefs.h> 151#include <sys/cdefs.h>
152__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.102 2010/02/10 00:39:30 jym Exp $"); 152__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.103 2010/02/12 01:55:45 jym Exp $");
153 153
154#include "opt_user_ldt.h" 154#include "opt_user_ldt.h"
155#include "opt_lockdebug.h" 155#include "opt_lockdebug.h"
156#include "opt_multiprocessor.h" 156#include "opt_multiprocessor.h"
157#include "opt_xen.h" 157#include "opt_xen.h"
158#if !defined(__x86_64__) 158#if !defined(__x86_64__)
159#include "opt_kstack_dr0.h" 159#include "opt_kstack_dr0.h"
160#endif /* !defined(__x86_64__) */ 160#endif /* !defined(__x86_64__) */
161 161
162#include <sys/param.h> 162#include <sys/param.h>
163#include <sys/systm.h> 163#include <sys/systm.h>
164#include <sys/proc.h> 164#include <sys/proc.h>
165#include <sys/pool.h> 165#include <sys/pool.h>
@@ -845,27 +845,26 @@ pmap_map_ptes(struct pmap *pmap, struct  @@ -845,27 +845,26 @@ pmap_map_ptes(struct pmap *pmap, struct
845 xpq_queue_pte_update( 845 xpq_queue_pte_update(
846 xpmap_ptom(pmap_pdirpa(pmap, PDIR_SLOT_PTE + i)), 846 xpmap_ptom(pmap_pdirpa(pmap, PDIR_SLOT_PTE + i)),
847 npde); 847 npde);
848 xpq_queue_pte_update(xpmap_ptetomach(&APDP_PDE[i]), 848 xpq_queue_pte_update(xpmap_ptetomach(&APDP_PDE[i]),
849 npde); 849 npde);
850#ifdef PAE 850#ifdef PAE
851 /* update shadow entry too */ 851 /* update shadow entry too */
852 xpq_queue_pte_update( 852 xpq_queue_pte_update(
853 xpmap_ptetomach(&APDP_PDE_SHADOW[i]), npde); 853 xpmap_ptetomach(&APDP_PDE_SHADOW[i]), npde);
854#endif /* PAE */ 854#endif /* PAE */
855 xpq_queue_invlpg( 855 xpq_queue_invlpg(
856 (vaddr_t)&pmap->pm_pdir[PDIR_SLOT_PTE + i]); 856 (vaddr_t)&pmap->pm_pdir[PDIR_SLOT_PTE + i]);
857 } 857 }
858 xpq_flush_queue(); 
859 if (pmap_valid_entry(opde)) 858 if (pmap_valid_entry(opde))
860 pmap_apte_flush(ourpmap); 859 pmap_apte_flush(ourpmap);
861 splx(s); 860 splx(s);
862 } 861 }
863#else /* XEN */ 862#else /* XEN */
864 npde = pmap_pa2pte(pmap_pdirpa(pmap, 0)) | PG_RW | PG_V; 863 npde = pmap_pa2pte(pmap_pdirpa(pmap, 0)) | PG_RW | PG_V;
865 if (!pmap_valid_entry(opde) || 864 if (!pmap_valid_entry(opde) ||
866 pmap_pte2pa(opde) != pmap_pdirpa(pmap, 0)) { 865 pmap_pte2pa(opde) != pmap_pdirpa(pmap, 0)) {
867 pmap_pte_set(APDP_PDE, npde); 866 pmap_pte_set(APDP_PDE, npde);
868 pmap_pte_flush(); 867 pmap_pte_flush();
869 if (pmap_valid_entry(opde)) 868 if (pmap_valid_entry(opde))
870 pmap_apte_flush(ourpmap); 869 pmap_apte_flush(ourpmap);
871 } 870 }
@@ -2122,27 +2121,26 @@ pmap_pdp_ctor(void *arg, void *v, int fl @@ -2122,27 +2121,26 @@ pmap_pdp_ctor(void *arg, void *v, int fl
2122 * PDIR_SLOT_PTE entries last 2121 * PDIR_SLOT_PTE entries last
2123 */ 2122 */
2124#ifdef PAE 2123#ifdef PAE
2125 if (i == l2tol3(PDIR_SLOT_PTE)) 2124 if (i == l2tol3(PDIR_SLOT_PTE))
2126 continue; 2125 continue;
2127#endif 2126#endif
2128 xpq_queue_pin_table(xpmap_ptom_masked(pdirpa)); 2127 xpq_queue_pin_table(xpmap_ptom_masked(pdirpa));
2129 } 2128 }
2130#ifdef PAE 2129#ifdef PAE
2131 object = ((vaddr_t)pdir) + PAGE_SIZE * l2tol3(PDIR_SLOT_PTE); 2130 object = ((vaddr_t)pdir) + PAGE_SIZE * l2tol3(PDIR_SLOT_PTE);
2132 (void)pmap_extract(pmap_kernel(), object, &pdirpa); 2131 (void)pmap_extract(pmap_kernel(), object, &pdirpa);
2133 xpq_queue_pin_table(xpmap_ptom_masked(pdirpa)); 2132 xpq_queue_pin_table(xpmap_ptom_masked(pdirpa));
2134#endif 2133#endif
2135 xpq_flush_queue(); 
2136 splx(s); 2134 splx(s);
2137#endif /* XEN */ 2135#endif /* XEN */
2138 2136
2139 return (0); 2137 return (0);
2140} 2138}
2141 2139
2142/* 2140/*
2143 * pmap_pdp_dtor: destructor for the PDP cache. 2141 * pmap_pdp_dtor: destructor for the PDP cache.
2144 */ 2142 */
2145 2143
2146void 2144void
2147pmap_pdp_dtor(void *arg, void *v) 2145pmap_pdp_dtor(void *arg, void *v)
2148{ 2146{
@@ -2156,27 +2154,26 @@ pmap_pdp_dtor(void *arg, void *v) @@ -2156,27 +2154,26 @@ pmap_pdp_dtor(void *arg, void *v)
2156 for (i = 0; i < PDP_SIZE; i++, object += PAGE_SIZE) { 2154 for (i = 0; i < PDP_SIZE; i++, object += PAGE_SIZE) {
2157 /* fetch the physical address of the page directory. */ 2155 /* fetch the physical address of the page directory. */
2158 (void) pmap_extract(pmap_kernel(), object, &pdirpa); 2156 (void) pmap_extract(pmap_kernel(), object, &pdirpa);
2159 /* unpin page table */ 2157 /* unpin page table */
2160 xpq_queue_unpin_table(xpmap_ptom_masked(pdirpa)); 2158 xpq_queue_unpin_table(xpmap_ptom_masked(pdirpa));
2161 } 2159 }
2162 object = (vaddr_t)v; 2160 object = (vaddr_t)v;
2163 for (i = 0; i < PDP_SIZE; i++, object += PAGE_SIZE) { 2161 for (i = 0; i < PDP_SIZE; i++, object += PAGE_SIZE) {
2164 /* Set page RW again */ 2162 /* Set page RW again */
2165 pte = kvtopte(object); 2163 pte = kvtopte(object);
2166 xpq_queue_pte_update(xpmap_ptetomach(pte), *pte | PG_RW); 2164 xpq_queue_pte_update(xpmap_ptetomach(pte), *pte | PG_RW);
2167 xpq_queue_invlpg((vaddr_t)object); 2165 xpq_queue_invlpg((vaddr_t)object);
2168 } 2166 }
2169 xpq_flush_queue(); 
2170 splx(s); 2167 splx(s);
2171#endif /* XEN */ 2168#endif /* XEN */
2172} 2169}
2173 2170
2174#ifdef PAE 2171#ifdef PAE
2175 2172
2176/* pmap_pdp_alloc: Allocate a page for the pdp memory pool. */ 2173/* pmap_pdp_alloc: Allocate a page for the pdp memory pool. */
2177 2174
2178void * 2175void *
2179pmap_pdp_alloc(struct pool *pp, int flags) 2176pmap_pdp_alloc(struct pool *pp, int flags)
2180{ 2177{
2181 return (void *)uvm_km_alloc(kernel_map, 2178 return (void *)uvm_km_alloc(kernel_map,
2182 PAGE_SIZE * PDP_SIZE, PAGE_SIZE * PDP_SIZE, 2179 PAGE_SIZE * PDP_SIZE, PAGE_SIZE * PDP_SIZE,
@@ -2733,27 +2730,26 @@ pmap_load(void) @@ -2733,27 +2730,26 @@ pmap_load(void)
2733 */ 2730 */
2734 int i, s; 2731 int i, s;
2735 pd_entry_t *old_pgd, *new_pgd; 2732 pd_entry_t *old_pgd, *new_pgd;
2736 paddr_t addr; 2733 paddr_t addr;
2737 s = splvm(); 2734 s = splvm();
2738 new_pgd = pmap->pm_pdir; 2735 new_pgd = pmap->pm_pdir;
2739 old_pgd = pmap_kernel()->pm_pdir; 2736 old_pgd = pmap_kernel()->pm_pdir;
2740 addr = xpmap_ptom(pmap_pdirpa(pmap_kernel(), 0)); 2737 addr = xpmap_ptom(pmap_pdirpa(pmap_kernel(), 0));
2741 for (i = 0; i < PDIR_SLOT_PTE; 2738 for (i = 0; i < PDIR_SLOT_PTE;
2742 i++, addr += sizeof(pd_entry_t)) { 2739 i++, addr += sizeof(pd_entry_t)) {
2743 if ((new_pgd[i] & PG_V) || (old_pgd[i] & PG_V)) 2740 if ((new_pgd[i] & PG_V) || (old_pgd[i] & PG_V))
2744 xpq_queue_pte_update(addr, new_pgd[i]); 2741 xpq_queue_pte_update(addr, new_pgd[i]);
2745 } 2742 }
2746 xpq_flush_queue(); /* XXXtlb */ 
2747 tlbflush(); 2743 tlbflush();
2748 xen_set_user_pgd(pmap_pdirpa(pmap, 0)); 2744 xen_set_user_pgd(pmap_pdirpa(pmap, 0));
2749 xen_current_user_pgd = pmap_pdirpa(pmap, 0); 2745 xen_current_user_pgd = pmap_pdirpa(pmap, 0);
2750 splx(s); 2746 splx(s);
2751 } 2747 }
2752#else /* XEN && x86_64 */ 2748#else /* XEN && x86_64 */
2753#if defined(XEN) 2749#if defined(XEN)
2754 /* 2750 /*
2755 * clear APDP slot, in case it points to a page table that has  2751 * clear APDP slot, in case it points to a page table that has
2756 * been freed 2752 * been freed
2757 */ 2753 */
2758 if (*APDP_PDE) { 2754 if (*APDP_PDE) {
2759 int i; 2755 int i;
@@ -2774,27 +2770,26 @@ pmap_load(void) @@ -2774,27 +2770,26 @@ pmap_load(void)
2774#endif /* XEN */ 2770#endif /* XEN */
2775 lldt(pmap->pm_ldt_sel); 2771 lldt(pmap->pm_ldt_sel);
2776#ifdef PAE 2772#ifdef PAE
2777 { 2773 {
2778 paddr_t l3_pd = xpmap_ptom_masked(pmap_l3paddr); 2774 paddr_t l3_pd = xpmap_ptom_masked(pmap_l3paddr);
2779 int i; 2775 int i;
2780 int s = splvm(); 2776 int s = splvm();
2781 /* don't update the kernel L3 slot */ 2777 /* don't update the kernel L3 slot */
2782 for (i = 0 ; i < PDP_SIZE - 1 ; i++, l3_pd += sizeof(pd_entry_t)) { 2778 for (i = 0 ; i < PDP_SIZE - 1 ; i++, l3_pd += sizeof(pd_entry_t)) {
2783 xpq_queue_pte_update(l3_pd, 2779 xpq_queue_pte_update(l3_pd,
2784 xpmap_ptom(pmap->pm_pdirpa[i]) | PG_V); 2780 xpmap_ptom(pmap->pm_pdirpa[i]) | PG_V);
2785 } 2781 }
2786 tlbflush(); 2782 tlbflush();
2787 xpq_flush_queue(); 
2788 splx(s); 2783 splx(s);
2789 } 2784 }
2790#else /* PAE */ 2785#else /* PAE */
2791 { 2786 {
2792 u_int gen = uvm_emap_gen_return(); 2787 u_int gen = uvm_emap_gen_return();
2793 lcr3(pcb->pcb_cr3); 2788 lcr3(pcb->pcb_cr3);
2794 uvm_emap_update(gen); 2789 uvm_emap_update(gen);
2795 } 2790 }
2796#endif /* PAE */ 2791#endif /* PAE */
2797#endif /* XEN && x86_64 */ 2792#endif /* XEN && x86_64 */
2798 2793
2799 ci->ci_want_pmapload = 0; 2794 ci->ci_want_pmapload = 0;
2800 2795

cvs diff -r1.17 -r1.18 src/sys/arch/xen/x86/x86_xpmap.c (expand / switch to unified diff)

--- src/sys/arch/xen/x86/x86_xpmap.c 2009/10/23 02:32:34 1.17
+++ src/sys/arch/xen/x86/x86_xpmap.c 2010/02/12 01:55:45 1.18
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: x86_xpmap.c,v 1.17 2009/10/23 02:32:34 snj Exp $ */ 1/* $NetBSD: x86_xpmap.c,v 1.18 2010/02/12 01:55:45 jym Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2006 Mathieu Ropert <mro@adviseo.fr> 4 * Copyright (c) 2006 Mathieu Ropert <mro@adviseo.fr>
5 * 5 *
6 * Permission to use, copy, modify, and distribute this software for any 6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above 7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies. 8 * copyright notice and this permission notice appear in all copies.
9 * 9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
@@ -59,27 +59,27 @@ @@ -59,27 +59,27 @@
59 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 59 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
60 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 60 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
61 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 61 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
62 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 62 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
63 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 63 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
64 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 64 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
65 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 65 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
66 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 66 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
67 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 67 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
68 */ 68 */
69 69
70 70
71#include <sys/cdefs.h> 71#include <sys/cdefs.h>
72__KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.17 2009/10/23 02:32:34 snj Exp $"); 72__KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.18 2010/02/12 01:55:45 jym Exp $");
73 73
74#include "opt_xen.h" 74#include "opt_xen.h"
75#include "opt_ddb.h" 75#include "opt_ddb.h"
76#include "ksyms.h" 76#include "ksyms.h"
77 77
78#include <sys/param.h> 78#include <sys/param.h>
79#include <sys/systm.h> 79#include <sys/systm.h>
80 80
81#include <uvm/uvm.h> 81#include <uvm/uvm.h>
82 82
83#include <machine/pmap.h> 83#include <machine/pmap.h>
84#include <machine/gdt.h> 84#include <machine/gdt.h>
85#include <xen/xenfunc.h> 85#include <xen/xenfunc.h>
@@ -143,27 +143,26 @@ xen_set_ldt(vaddr_t base, uint32_t entri @@ -143,27 +143,26 @@ xen_set_ldt(vaddr_t base, uint32_t entri
143#else 143#else
144 end = base + entries * sizeof(union descriptor); 144 end = base + entries * sizeof(union descriptor);
145#endif 145#endif
146 146
147 for (va = base; va < end; va += PAGE_SIZE) { 147 for (va = base; va < end; va += PAGE_SIZE) {
148 KASSERT(va >= VM_MIN_KERNEL_ADDRESS); 148 KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
149 ptp = kvtopte(va); 149 ptp = kvtopte(va);
150 XENPRINTF(("xen_set_ldt %p %d %p\n", (void *)base, 150 XENPRINTF(("xen_set_ldt %p %d %p\n", (void *)base,
151 entries, ptp)); 151 entries, ptp));
152 pmap_pte_clearbits(ptp, PG_RW); 152 pmap_pte_clearbits(ptp, PG_RW);
153 } 153 }
154 s = splvm(); 154 s = splvm();
155 xpq_queue_set_ldt(base, entries); 155 xpq_queue_set_ldt(base, entries);
156 xpq_flush_queue(); 
157 splx(s); 156 splx(s);
158} 157}
159 158
160#ifdef XENDEBUG 159#ifdef XENDEBUG
161void xpq_debug_dump(void); 160void xpq_debug_dump(void);
162#endif 161#endif
163 162
164#define XPQUEUE_SIZE 2048 163#define XPQUEUE_SIZE 2048
165static mmu_update_t xpq_queue[XPQUEUE_SIZE]; 164static mmu_update_t xpq_queue[XPQUEUE_SIZE];
166static int xpq_idx = 0; 165static int xpq_idx = 0;
167 166
168void 167void
169xpq_flush_queue(void) 168xpq_flush_queue(void)

cvs diff -r1.16 -r1.17 src/sys/arch/xen/x86/xen_bus_dma.c (expand / switch to unified diff)

--- src/sys/arch/xen/x86/xen_bus_dma.c 2010/01/23 22:32:42 1.16
+++ src/sys/arch/xen/x86/xen_bus_dma.c 2010/02/12 01:55:46 1.17
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: xen_bus_dma.c,v 1.16 2010/01/23 22:32:42 cegger Exp $ */ 1/* $NetBSD: xen_bus_dma.c,v 1.17 2010/02/12 01:55:46 jym Exp $ */
2/* NetBSD bus_dma.c,v 1.21 2005/04/16 07:53:35 yamt Exp */ 2/* NetBSD bus_dma.c,v 1.21 2005/04/16 07:53:35 yamt Exp */
3 3
4/*- 4/*-
5 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. 5 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * This code is derived from software contributed to The NetBSD Foundation 8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace 9 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
10 * Simulation Facility, NASA Ames Research Center. 10 * Simulation Facility, NASA Ames Research Center.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions 13 * modification, are permitted provided that the following conditions
14 * are met: 14 * are met:
@@ -22,27 +22,27 @@ @@ -22,27 +22,27 @@
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE. 31 * POSSIBILITY OF SUCH DAMAGE.
32 */ 32 */
33 33
34#include <sys/cdefs.h> 34#include <sys/cdefs.h>
35__KERNEL_RCSID(0, "$NetBSD: xen_bus_dma.c,v 1.16 2010/01/23 22:32:42 cegger Exp $"); 35__KERNEL_RCSID(0, "$NetBSD: xen_bus_dma.c,v 1.17 2010/02/12 01:55:46 jym Exp $");
36 36
37#include <sys/param.h> 37#include <sys/param.h>
38#include <sys/systm.h> 38#include <sys/systm.h>
39#include <sys/kernel.h> 39#include <sys/kernel.h>
40#include <sys/mbuf.h> 40#include <sys/mbuf.h>
41#include <sys/proc.h> 41#include <sys/proc.h>
42 42
43#include <machine/bus.h> 43#include <machine/bus.h>
44#include <machine/bus_private.h> 44#include <machine/bus_private.h>
45 45
46#include <uvm/uvm_extern.h> 46#include <uvm/uvm_extern.h>
47 47
48extern paddr_t avail_end; 48extern paddr_t avail_end;
@@ -132,27 +132,26 @@ _xen_alloc_contig(bus_size_t size, bus_s @@ -132,27 +132,26 @@ _xen_alloc_contig(bus_size_t size, bus_s
132 pgnext = pg->pageq.queue.tqe_next; 132 pgnext = pg->pageq.queue.tqe_next;
133 pa = VM_PAGE_TO_PHYS(pg); 133 pa = VM_PAGE_TO_PHYS(pg);
134 xpmap_phys_to_machine_mapping[ 134 xpmap_phys_to_machine_mapping[
135 (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = mfn+i; 135 (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = mfn+i;
136 xpq_queue_machphys_update(((paddr_t)(mfn+i)) << PAGE_SHIFT, pa); 136 xpq_queue_machphys_update(((paddr_t)(mfn+i)) << PAGE_SHIFT, pa);
137 /* while here, give extra pages back to UVM */ 137 /* while here, give extra pages back to UVM */
138 if (i >= npagesreq) { 138 if (i >= npagesreq) {
139 TAILQ_REMOVE(mlistp, pg, pageq.queue); 139 TAILQ_REMOVE(mlistp, pg, pageq.queue);
140 uvm_pagefree(pg); 140 uvm_pagefree(pg);
141 } 141 }
142 } 142 }
143 /* Flush updates through and flush the TLB */ 143 /* Flush updates through and flush the TLB */
144 xpq_queue_tlb_flush(); 144 xpq_queue_tlb_flush();
145 xpq_flush_queue(); 
146 splx(s); 145 splx(s);
147 return 0; 146 return 0;
148 147
149failed: 148failed:
150 /* 149 /*
151 * Attempt to recover from a failed decrease or increase reservation: 150 * Attempt to recover from a failed decrease or increase reservation:
152 * if decrease_reservation failed, we don't have given all pages 151 * if decrease_reservation failed, we don't have given all pages
153 * back to Xen; give them back to UVM, and get the missing pages 152 * back to Xen; give them back to UVM, and get the missing pages
154 * from Xen. 153 * from Xen.
155 * if increase_reservation failed, we expect pg to be NULL and we just 154 * if increase_reservation failed, we expect pg to be NULL and we just
156 * get back the missing pages from Xen one by one. 155 * get back the missing pages from Xen one by one.
157 */ 156 */
158 /* give back remaining pages to UVM */ 157 /* give back remaining pages to UVM */
@@ -175,27 +174,26 @@ failed: @@ -175,27 +174,26 @@ failed:
175 printf("xen_alloc_contig: recovery " 174 printf("xen_alloc_contig: recovery "
176 "XENMEM_increase_reservation failed!\n"); 175 "XENMEM_increase_reservation failed!\n");
177 break; 176 break;
178 } 177 }
179 pa = VM_PAGE_TO_PHYS(pg); 178 pa = VM_PAGE_TO_PHYS(pg);
180 xpmap_phys_to_machine_mapping[ 179 xpmap_phys_to_machine_mapping[
181 (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = mfn; 180 (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = mfn;
182 xpq_queue_machphys_update(((paddr_t)mfn) << PAGE_SHIFT, pa); 181 xpq_queue_machphys_update(((paddr_t)mfn) << PAGE_SHIFT, pa);
183 TAILQ_REMOVE(mlistp, pg, pageq.queue); 182 TAILQ_REMOVE(mlistp, pg, pageq.queue);
184 uvm_pagefree(pg); 183 uvm_pagefree(pg);
185 } 184 }
186 /* Flush updates through and flush the TLB */ 185 /* Flush updates through and flush the TLB */
187 xpq_queue_tlb_flush(); 186 xpq_queue_tlb_flush();
188 xpq_flush_queue(); 
189 splx(s); 187 splx(s);
190 return error; 188 return error;
191} 189}
192 190
193 191
194/* 192/*
195 * Allocate physical memory from the given physical address range. 193 * Allocate physical memory from the given physical address range.
196 * Called by DMA-safe memory allocation methods. 194 * Called by DMA-safe memory allocation methods.
197 * We need our own version to deal with physical vs machine addresses. 195 * We need our own version to deal with physical vs machine addresses.
198 */ 196 */
199int 197int
200_xen_bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, 198_xen_bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size,
201 bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs, 199 bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,

cvs diff -r1.9 -r1.10 src/sys/arch/xen/x86/xenfunc.c (expand / switch to unified diff)

--- src/sys/arch/xen/x86/xenfunc.c 2009/10/23 02:32:34 1.9
+++ src/sys/arch/xen/x86/xenfunc.c 2010/02/12 01:55:46 1.10
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: xenfunc.c,v 1.9 2009/10/23 02:32:34 snj Exp $ */ 1/* $NetBSD: xenfunc.c,v 1.10 2010/02/12 01:55:46 jym Exp $ */
2 2
3/* 3/*
4 * 4 *
5 * Copyright (c) 2004 Christian Limpach. 5 * Copyright (c) 2004 Christian Limpach.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions 9 * modification, are permitted provided that the following conditions
10 * are met: 10 * are met:
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
@@ -17,27 +17,27 @@ @@ -17,27 +17,27 @@
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */ 27 */
28 28
29#include <sys/cdefs.h> 29#include <sys/cdefs.h>
30__KERNEL_RCSID(0, "$NetBSD: xenfunc.c,v 1.9 2009/10/23 02:32:34 snj Exp $"); 30__KERNEL_RCSID(0, "$NetBSD: xenfunc.c,v 1.10 2010/02/12 01:55:46 jym Exp $");
31 31
32#include <sys/param.h> 32#include <sys/param.h>
33 33
34#include <uvm/uvm_extern.h> 34#include <uvm/uvm_extern.h>
35 35
36#include <machine/intr.h> 36#include <machine/intr.h>
37#include <machine/vmparam.h> 37#include <machine/vmparam.h>
38#include <machine/pmap.h> 38#include <machine/pmap.h>
39#include <xen/xen.h> 39#include <xen/xen.h>
40#include <xen/hypervisor.h> 40#include <xen/hypervisor.h>
41//#include <xen/evtchn.h> 41//#include <xen/evtchn.h>
42#include <xen/xenpmap.h> 42#include <xen/xenpmap.h>
43#include <machine/pte.h> 43#include <machine/pte.h>
@@ -45,27 +45,26 @@ __KERNEL_RCSID(0, "$NetBSD: xenfunc.c,v  @@ -45,27 +45,26 @@ __KERNEL_RCSID(0, "$NetBSD: xenfunc.c,v
45#ifdef XENDEBUG_LOW 45#ifdef XENDEBUG_LOW
46#define __PRINTK(x) printk x 46#define __PRINTK(x) printk x
47#else 47#else
48#define __PRINTK(x) 48#define __PRINTK(x)
49#endif 49#endif
50 50
51void xen_set_ldt(vaddr_t, uint32_t); 51void xen_set_ldt(vaddr_t, uint32_t);
52 52
53void  53void
54invlpg(vaddr_t addr) 54invlpg(vaddr_t addr)
55{ 55{
56 int s = splvm(); 56 int s = splvm();
57 xpq_queue_invlpg(addr); 57 xpq_queue_invlpg(addr);
58 xpq_flush_queue(); 
59 splx(s); 58 splx(s);
60}  59}
61 60
62#ifndef __x86_64__ 61#ifndef __x86_64__
63void 62void
64lldt(u_short sel) 63lldt(u_short sel)
65{ 64{
66 struct cpu_info *ci; 65 struct cpu_info *ci;
67 66
68 ci = curcpu(); 67 ci = curcpu();
69 68
70 if (ci->ci_curldt == sel) 69 if (ci->ci_curldt == sel)
71 return; 70 return;
@@ -94,37 +93,35 @@ lcr0(u_long val) @@ -94,37 +93,35 @@ lcr0(u_long val)
94u_long 93u_long
95rcr0(void) 94rcr0(void)
96{ 95{
97 __PRINTK(("XXX rcr0 not supported\n")); 96 __PRINTK(("XXX rcr0 not supported\n"));
98 return 0; 97 return 0;
99} 98}
100 99
101#ifndef __x86_64__ 100#ifndef __x86_64__
102void 101void
103lcr3(vaddr_t val) 102lcr3(vaddr_t val)
104{ 103{
105 int s = splvm(); 104 int s = splvm();
106 xpq_queue_pt_switch(xpmap_ptom_masked(val)); 105 xpq_queue_pt_switch(xpmap_ptom_masked(val));
107 xpq_flush_queue(); 
108 splx(s); 106 splx(s);
109} 107}
110#endif 108#endif
111 109
112void 110void
113tlbflush(void) 111tlbflush(void)
114{ 112{
115 int s = splvm(); 113 int s = splvm();
116 xpq_queue_tlb_flush(); 114 xpq_queue_tlb_flush();
117 xpq_flush_queue(); 
118 splx(s); 115 splx(s);
119} 116}
120 117
121void 118void
122tlbflushg(void) 119tlbflushg(void)
123{ 120{
124 tlbflush(); 121 tlbflush();
125} 122}
126 123
127vaddr_t 124vaddr_t
128rdr6(void) 125rdr6(void)
129{ 126{
130 u_int val; 127 u_int val;