summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'trunk/2.6.22/20030_136-pae-vmalloc-sync-all.patch1')
-rw-r--r--trunk/2.6.22/20030_136-pae-vmalloc-sync-all.patch163
1 files changed, 63 insertions, 0 deletions
diff --git a/trunk/2.6.22/20030_136-pae-vmalloc-sync-all.patch1 b/trunk/2.6.22/20030_136-pae-vmalloc-sync-all.patch1
new file mode 100644
index 0000000..b75af4b
--- /dev/null
+++ b/trunk/2.6.22/20030_136-pae-vmalloc-sync-all.patch1
@@ -0,0 +1,63 @@
+# HG changeset 136 patch
+# User Keir Fraser <keir@xensource.com>
+# Date 1184403059 -3600
+# Node ID 34ebf92ad28d53f70ca02966c9f926f7d83bafbb
+# Parent 9debaf36090515b4ce54712c4641781bc263b1a6
+Subject: xen/i386: Fix vmalloc_sync_all() for PAE.
+Signed-off-by: Keir Fraser <keir@xensource.com>
+
+Acked-by: jbeulich@novell.com
+
+Index: head-2007-08-07/arch/i386/mm/fault-xen.c
+===================================================================
+--- head-2007-08-07.orig/arch/i386/mm/fault-xen.c 2007-08-07 09:47:09.000000000 +0200
++++ head-2007-08-07/arch/i386/mm/fault-xen.c 2007-08-07 09:57:59.000000000 +0200
+@@ -739,18 +739,31 @@ void vmalloc_sync_all(void)
+ * problematic: insync can only get set bits added, and updates to
+ * start are only improving performance (without affecting correctness
+ * if undone).
++ * XEN: To work on PAE, we need to iterate over PMDs rather than PGDs.
++ * This change works just fine with 2-level paging too.
+ */
+- static DECLARE_BITMAP(insync, PTRS_PER_PGD);
++#define sync_index(a) ((a) >> PMD_SHIFT)
++ static DECLARE_BITMAP(insync, PTRS_PER_PGD*PTRS_PER_PMD);
+ static unsigned long start = TASK_SIZE;
+ unsigned long address;
+
+ BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
+- for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
+- if (!test_bit(pgd_index(address), insync)) {
++ for (address = start;
++ address >= TASK_SIZE && address < hypervisor_virt_start;
++ address += 1UL << PMD_SHIFT) {
++ if (!test_bit(sync_index(address), insync)) {
+ unsigned long flags;
+ struct page *page;
+
+ spin_lock_irqsave(&pgd_lock, flags);
++ /*
++ * XEN: vmalloc_sync_one() failure path logic assumes
++ * pgd_list is non-empty.
++ */
++ if (unlikely(!pgd_list)) {
++ spin_unlock_irqrestore(&pgd_lock, flags);
++ return;
++ }
+ for (page = pgd_list; page; page =
+ (struct page *)page->index)
+ if (!vmalloc_sync_one(page_address(page),
+@@ -760,10 +773,10 @@ void vmalloc_sync_all(void)
+ }
+ spin_unlock_irqrestore(&pgd_lock, flags);
+ if (!page)
+- set_bit(pgd_index(address), insync);
++ set_bit(sync_index(address), insync);
+ }
+- if (address == start && test_bit(pgd_index(address), insync))
+- start = address + PGDIR_SIZE;
++ if (address == start && test_bit(sync_index(address), insync))
++ start = address + (1UL << PMD_SHIFT);
+ }
+ }
+ #endif