1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
|
Hi.
This patch makes a backup of zone info we use before starting the
restoration of original kernel data, and uses that info rather than the
struct zones themselves when restoring the kernel data.
Regards,
Nigel
atomic_copy.c | 74 ++++++++++++++++++++++++++++++++++++++++------------------
1 files changed, 52 insertions(+), 22 deletions(-)
diff -ruNp 9040-get-next-bit-on.patch-old/kernel/power/atomic_copy.c 9040-get-next-bit-on.patch-new/kernel/power/atomic_copy.c
--- 9040-get-next-bit-on.patch-old/kernel/power/atomic_copy.c 2005-12-27 20:52:30.000000000 +1000
+++ 9040-get-next-bit-on.patch-new/kernel/power/atomic_copy.c 2005-12-27 20:52:18.000000000 +1000
@@ -32,11 +32,12 @@ static dyn_pageflags_t __nosavedata copy
static int __nosavedata origoffset;
static int __nosavedata copyoffset;
static int __nosavedata loop;
-static __nosavedata struct zone *o_zone, *c_zone;
static __nosavedata int o_zone_num, c_zone_num;
static __nosavedata int is_resuming;
__nosavedata char resume_commandline[COMMAND_LINE_SIZE];
+static __nosavedata unsigned long *zone_nosave;
+static __nosavedata int num_zones;
static atomic_t atomic_copy_hold;
static atomic_t restore_thread_ready;
@@ -44,28 +45,59 @@ static atomic_t restore_thread_ready;
suspend2_saved_context_t suspend2_saved_context; /* temporary storage */
cpumask_t saved_affinity[NR_IRQS];
+/*
+ * Zone information might be overwritten during the copy back,
+ * so we copy the fields we need to a non-conflicting page and
+ * use it.
+ */
+static void init_nosave_zone_table(void)
+{
+ struct zone *zone;
+
+ zone_nosave = (unsigned long *) suspend2_get_nonconflicting_pages(0);
+
+ BUG_ON(!zone_nosave);
+
+ for_each_zone(zone) {
+ if (zone->spanned_pages) {
+ zone_nosave[num_zones * 3 ] = zone->zone_start_pfn;
+ zone_nosave[num_zones * 3 + 1] = zone->zone_start_pfn +
+ zone->spanned_pages;
+ zone_nosave[num_zones * 3 + 2] = is_highmem(zone);
+ }
+ num_zones++;
+ }
+}
+
/* For Suspend2, where this all has to be inlined */
-static int __get_next_bit_on(dyn_pageflags_t bitmap, struct zone **zone,
- int *zone_num, int counter)
+static int __get_next_bit_on(dyn_pageflags_t bitmap, int *zone_num, int counter)
{
unsigned long *ul_ptr = NULL;
int reset_ul_ptr = 1;
BUG_ON(counter == max_pfn);
- if (counter == -1)
- counter = pgdat_list->node_zones->zone_start_pfn - 1;
+ if (counter == -1) {
+ *zone_num = 0;
+
+ /*
+ * Test the end because the start can validly
+ * be zero.
+ */
+ while (!zone_nosave[(*zone_num) * 3 + 1])
+ *zone_num++;
+ counter = zone_nosave[*zone_num * 3] - 1;
+ }
do {
counter++;
- if (counter >= ((*zone)->zone_start_pfn + (*zone)->spanned_pages)) {
- do {
- *zone = next_zone(*zone);
+ if (counter >= zone_nosave[(*zone_num) * 3 + 1]) {
+ (*zone_num)++;
+ while (!zone_nosave[(*zone_num) * 3 + 1] && *zone_num < num_zones)
(*zone_num)++;
- } while (*zone && !(*zone)->spanned_pages);
- if (!*zone)
+ if (*zone_num == num_zones)
return max_pfn;
- counter = (*zone)->zone_start_pfn;
+ counter = zone_nosave[(*zone_num) * 3];
reset_ul_ptr = 1;
} else
if (!(counter & BIT_NUM_MASK))
@@ -73,9 +105,9 @@ static int __get_next_bit_on(dyn_pagefla
if (reset_ul_ptr) {
reset_ul_ptr = 0;
ul_ptr = PAGE_UL_PTR(bitmap, *zone_num,
- (counter - (*zone)->zone_start_pfn));
+ (counter - zone_nosave[(*zone_num) * 3]));
if (!*ul_ptr) {
- counter += BIT_NUM_MASK;
+ counter += BIT_NUM_MASK - 1;
continue;
}
}
@@ -100,6 +132,8 @@ static void copyback_prepare(void)
io_speed_save[loop/2][loop%2] =
suspend_io_time[loop/2][loop%2];
+ init_nosave_zone_table();
+
memcpy(resume_commandline, saved_command_line, COMMAND_LINE_SIZE);
suspend2_map_atomic_copy_pages();
@@ -235,19 +269,17 @@ static inline void copyback_low(void)
unsigned long *origpage;
unsigned long *copypage;
- o_zone = pgdat_list->node_zones;
o_zone_num = 0;
- c_zone = pgdat_list->node_zones;
c_zone_num = 0;
origmap = pageset1_map;
copymap = pageset1_copy_map;
- origoffset = __get_next_bit_on(origmap, &o_zone, &o_zone_num, -1);
- copyoffset = __get_next_bit_on(copymap, &c_zone, &c_zone_num, -1);
+ origoffset = __get_next_bit_on(origmap, &o_zone_num, -1);
+ copyoffset = __get_next_bit_on(copymap, &c_zone_num, -1);
while (origoffset < max_pfn) {
- if (!is_highmem(o_zone)) {
+ if (!zone_nosave[o_zone_num * 3 + 2]) {
origpage = (unsigned long *) __va(origoffset << PAGE_SHIFT);
copypage = (unsigned long *) __va(copyoffset << PAGE_SHIFT);
@@ -259,9 +291,9 @@ static inline void copyback_low(void)
}
}
- origoffset = __get_next_bit_on(origmap, &o_zone, &o_zone_num,
+ origoffset = __get_next_bit_on(origmap, &o_zone_num,
origoffset);
- copyoffset = __get_next_bit_on(copymap, &c_zone, &c_zone_num,
+ copyoffset = __get_next_bit_on(copymap, &c_zone_num,
copyoffset);
}
}
@@ -274,9 +306,7 @@ void copyback_high(void)
unsigned long *origpage;
unsigned long *copypage;
- o_zone = pgdat_list->node_zones;
o_zone_num = 0;
- c_zone = pgdat_list->node_zones;
c_zone_num = 0;
origoffset = get_next_bit_on(origmap, -1);
_______________________________________________
Suspend2-devel mailing list
Suspend2-devel@lists.suspend2.net
http://lists.suspend2.net/mailman/listinfo/suspend2-devel
|