2 * self test for change_page_attr.
4 * Clears the global bit on random pages in the direct mapping, then reverts
5 * and compares page tables forwards and afterwards.
7 #include <linux/bootmem.h>
8 #include <linux/kthread.h>
9 #include <linux/random.h>
10 #include <linux/kernel.h>
11 #include <linux/init.h>
14 #include <asm/cacheflush.h>
15 #include <asm/pgtable.h>
16 #include <asm/kdebug.h>
19 * Only print the results of the first pass:
21 static __read_mostly int print = 1;
26 LPS = (1 << PMD_SHIFT),
27 #elif defined(CONFIG_X86_PAE)
28 LPS = (1 << PMD_SHIFT),
36 long lpg, gpg, spg, exec;
37 long min_exec, max_exec;
40 static int print_split(struct split_state *s)
42 long i, expected, missed = 0;
45 s->lpg = s->gpg = s->spg = s->exec = 0;
48 for (i = 0; i < max_pfn_mapped; ) {
49 unsigned long addr = (unsigned long)__va(i << PAGE_SHIFT);
53 pte = lookup_address(addr, &level);
60 if (level == PG_LEVEL_1G && sizeof(long) == 8) {
63 } else if (level == PG_LEVEL_2M) {
64 if (!(pte_val(*pte) & _PAGE_PSE)) {
66 "%lx level %d but not PSE %Lx\n",
67 addr, level, (u64)pte_val(*pte));
76 if (!(pte_val(*pte) & _PAGE_NX)) {
78 if (addr < s->min_exec)
80 if (addr > s->max_exec)
86 " 4k %lu large %lu gb %lu x %lu[%lx-%lx] miss %lu\n",
87 s->spg, s->lpg, s->gpg, s->exec,
88 s->min_exec != ~0UL ? s->min_exec : 0,
92 expected = (s->gpg*GPS + s->lpg*LPS)/PAGE_SIZE + s->spg + missed;
94 printk(KERN_ERR "CPA max_pfn_mapped %lu but expected %lu\n",
95 max_pfn_mapped, expected);
101 static unsigned long addr[NTEST];
102 static unsigned int len[NTEST];
104 /* Change the global bit on random pages in the direct mapping */
105 static int pageattr_test(void)
107 struct split_state sa, sb, sc;
116 printk(KERN_INFO "CPA self-test:\n");
118 bm = vmalloc((max_pfn_mapped + 7) / 8);
120 printk(KERN_ERR "CPA Cannot vmalloc bitmap\n");
123 memset(bm, 0, (max_pfn_mapped + 7) / 8);
125 failed += print_split(&sa);
128 for (i = 0; i < NTEST; i++) {
129 unsigned long pfn = random32() % max_pfn_mapped;
131 addr[i] = (unsigned long)__va(pfn << PAGE_SHIFT);
132 len[i] = random32() % 100;
133 len[i] = min_t(unsigned long, len[i], max_pfn_mapped - pfn - 1);
139 pte0 = pfn_pte(0, __pgprot(0)); /* shut gcc up */
141 for (k = 0; k < len[i]; k++) {
142 pte = lookup_address(addr[i] + k*PAGE_SIZE, &level);
143 if (!pte || pgprot_val(pte_pgprot(*pte)) == 0 ||
144 !(pte_val(*pte) & _PAGE_PRESENT)) {
151 if (pgprot_val(pte_pgprot(*pte)) !=
152 pgprot_val(pte_pgprot(pte0))) {
157 if (test_bit(pfn + k, bm)) {
161 __set_bit(pfn + k, bm);
163 if (!addr[i] || !pte || !k) {
168 err = change_page_attr_clear(addr[i], len[i],
169 __pgprot(_PAGE_GLOBAL));
171 printk(KERN_ERR "CPA %d failed %d\n", i, err);
175 pte = lookup_address(addr[i], &level);
176 if (!pte || pte_global(*pte) || pte_huge(*pte)) {
177 printk(KERN_ERR "CPA %lx: bad pte %Lx\n", addr[i],
178 pte ? (u64)pte_val(*pte) : 0ULL);
181 if (level != PG_LEVEL_4K) {
182 printk(KERN_ERR "CPA %lx: unexpected level %d\n",
190 failed += print_split(&sb);
192 for (i = 0; i < NTEST; i++) {
195 pte = lookup_address(addr[i], &level);
197 printk(KERN_ERR "CPA lookup of %lx failed\n", addr[i]);
201 err = change_page_attr_set(addr[i], len[i],
202 __pgprot(_PAGE_GLOBAL));
204 printk(KERN_ERR "CPA reverting failed: %d\n", err);
207 pte = lookup_address(addr[i], &level);
208 if (!pte || !pte_global(*pte)) {
209 printk(KERN_ERR "CPA %lx: bad pte after revert %Lx\n",
210 addr[i], pte ? (u64)pte_val(*pte) : 0ULL);
216 failed += print_split(&sc);
219 printk(KERN_ERR "NOT PASSED. Please report.\n");
224 printk(KERN_INFO "ok.\n");
230 static int do_pageattr_test(void *__unused)
232 while (!kthread_should_stop()) {
233 schedule_timeout_interruptible(HZ*30);
234 if (pageattr_test() < 0)
242 static int start_pageattr_test(void)
244 struct task_struct *p;
246 p = kthread_create(do_pageattr_test, NULL, "pageattr-test");
255 module_init(start_pageattr_test);