X-Git-Url: https://nv-tegra.nvidia.com/r/gitweb?p=linux-2.6.git;a=blobdiff_plain;f=arch%2Fia64%2Fmm%2Finit.c;h=1281c609ee98b27ccc94e246424b7de653910ff0;hp=4eb2f52b87a16b436007740a56abaf79574d51fa;hb=c85b2a5fe200d744a814d23c258460d4fc98a546;hpb=ad597bd518559f59ede8d01262cdf4467e13282e;ds=sidebyside diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 4eb2f52b87a..1281c609ee9 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -382,13 +382,22 @@ ia64_mmu_init (void *my_cpu_data) if (impl_va_bits < 51 || impl_va_bits > 61) panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1); + /* + * mapped_space_bits - PAGE_SHIFT is the total number of ptes we need, + * which must fit into "vmlpt_bits - pte_bits" slots. Second half of + * the test makes sure that our mapped space doesn't overlap the + * unimplemented hole in the middle of the region. + */ + if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) || + (mapped_space_bits > impl_va_bits - 1)) + panic("Cannot build a big enough virtual-linear page table" + " to cover mapped address space.\n" + " Try using a smaller page size.\n"); + /* place the VMLPT at the end of each page-table mapped region: */ pta = POW2(61) - POW2(vmlpt_bits); - if (POW2(mapped_space_bits) >= pta) - panic("mm/init: overlap between virtually mapped linear page table and " - "mapped kernel space!"); /* * Set the (virtually mapped linear) page table address. Bit * 8 selects between the short and long format, bits 2-7 the @@ -597,7 +606,8 @@ mem_init (void) kclist_add(&kcore_kernel, _stext, _end - _stext); for_each_pgdat(pgdat) - totalram_pages += free_all_bootmem_node(pgdat); + if (pgdat->bdata->node_bootmem_map) + totalram_pages += free_all_bootmem_node(pgdat); reserved_pages = 0; efi_memmap_walk(count_reserved_pages, &reserved_pages);