Helpers for the PGSHIFT of the largest jumbo page Other parts of the kernel, including EPT/VMX, will want to know this.
diff --git a/kern/arch/riscv/pmap.c b/kern/arch/riscv/pmap.c index 33da797..72e5406 100644 --- a/kern/arch/riscv/pmap.c +++ b/kern/arch/riscv/pmap.c
@@ -120,3 +120,10 @@ { *pd = 0; } + +/* Returns the page shift of the largest jumbo supported */ +int arch_max_jumbo_page_shift(void) +{ + #warning "What jumbo page sizes does RISC support?" + return PGSHIFT; +}
diff --git a/kern/arch/x86/pmap64.c b/kern/arch/x86/pmap64.c index 5b76771..a82c6f0 100644 --- a/kern/arch/x86/pmap64.c +++ b/kern/arch/x86/pmap64.c
@@ -36,7 +36,6 @@ physaddr_t boot_cr3; segdesc_t *gdt; pseudodesc_t gdt_pd; -unsigned int max_jumbo_shift; #define PG_WALK_SHIFT_MASK 0x00ff /* first byte = target shift */ #define PG_WALK_CREATE 0x0100 @@ -428,7 +427,7 @@ * have a slimmed down page table. */ void vm_init(void) { - uint32_t edx; + int max_jumbo_shift; kpte_t *boot_kpt = KADDR(get_boot_pml4()); boot_cr3 = get_boot_pml4(); @@ -437,8 +436,7 @@ gdt = KADDR(get_gdt64()); /* We need to limit our mappings on machines that don't support 1GB pages */ - cpuid(0x80000001, 0x0, 0, 0, 0, &edx); - max_jumbo_shift = edx & (1 << 26) ? PML3_SHIFT : PML2_SHIFT; + max_jumbo_shift = arch_max_jumbo_page_shift(); check_syms_va(); /* KERNBASE mapping: we already have 512 GB complete (one full PML3_REACH). * It's okay if we have extra, just need to make sure we reach max_paddr. */ @@ -577,6 +575,14 @@ pd->epte = 0; } +/* Returns the page shift of the largest jumbo supported */ +int arch_max_jumbo_page_shift(void) +{ + uint32_t edx; + cpuid(0x80000001, 0x0, 0, 0, 0, &edx); + return edx & (1 << 26) ? PML3_SHIFT : PML2_SHIFT; +} + /* Debugging */ static int print_pte(kpte_t *kpte, uintptr_t kva, int shift, bool visited_subs, void *data) @@ -605,7 +611,7 @@ printk("Printing the entire page table set for %p, DFS\n", pgdir); /* Need to be careful we avoid VPT/UVPT, o/w we'll recurse */ pml_for_each(pgdir, 0, UVPT, print_pte, 0); - if (max_jumbo_shift < PML3_SHIFT) + if (arch_max_jumbo_page_shift() < PML3_SHIFT) printk("(skipping kernbase mapping - too many entries)\n"); else pml_for_each(pgdir, KERNBASE, VPT - KERNBASE, print_pte, 0);
diff --git a/kern/include/pmap.h b/kern/include/pmap.h index bb7e9ad..e7dbd8d 100644 --- a/kern/include/pmap.h +++ b/kern/include/pmap.h
@@ -101,6 +101,7 @@ int arch_pgdir_setup(pgdir_t boot_copy, pgdir_t *new_pd); physaddr_t arch_pgdir_get_cr3(pgdir_t pd); void arch_pgdir_clear(pgdir_t *pd); +int arch_max_jumbo_page_shift(void); static inline page_t *ppn2page(size_t ppn) {