[PARISC] Add CONFIG_DEBUG_RODATA to protect read-only data
[linux-2.6.git] / arch / parisc / mm / init.c
1 /*
2  *  linux/arch/parisc/mm/init.c
3  *
4  *  Copyright (C) 1995  Linus Torvalds
5  *  Copyright 1999 SuSE GmbH
6  *    changed by Philipp Rumpf
7  *  Copyright 1999 Philipp Rumpf (prumpf@tux.org)
8  *  Copyright 2004 Randolph Chung (tausq@debian.org)
9  *
10  */
11
12 #include <linux/config.h>
13
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/bootmem.h>
17 #include <linux/delay.h>
18 #include <linux/init.h>
19 #include <linux/pci.h>          /* for hppa_dma_ops and pcxl_dma_ops */
20 #include <linux/initrd.h>
21 #include <linux/swap.h>
22 #include <linux/unistd.h>
23 #include <linux/nodemask.h>     /* for node_online_map */
24 #include <linux/pagemap.h>      /* for release_pages and page_cache_release */
25
26 #include <asm/pgalloc.h>
27 #include <asm/tlb.h>
28 #include <asm/pdc_chassis.h>
29 #include <asm/mmzone.h>
30
31 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
32
33 extern char _text;      /* start of kernel code, defined by linker */
34 extern int  data_start;
35 extern char _end;       /* end of BSS, defined by linker */
36 extern char __init_begin, __init_end;
37
38 #ifdef CONFIG_DISCONTIGMEM
39 struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
40 bootmem_data_t bmem_data[MAX_NUMNODES] __read_mostly;
41 unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
42 #endif
43
44 static struct resource data_resource = {
45         .name   = "Kernel data",
46         .flags  = IORESOURCE_BUSY | IORESOURCE_MEM,
47 };
48
49 static struct resource code_resource = {
50         .name   = "Kernel code",
51         .flags  = IORESOURCE_BUSY | IORESOURCE_MEM,
52 };
53
54 static struct resource pdcdata_resource = {
55         .name   = "PDC data (Page Zero)",
56         .start  = 0,
57         .end    = 0x9ff,
58         .flags  = IORESOURCE_BUSY | IORESOURCE_MEM,
59 };
60
61 static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly;
62
63 /* The following array is initialized from the firmware specific
64  * information retrieved in kernel/inventory.c.
65  */
66
67 physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly;
68 int npmem_ranges __read_mostly;
69
70 #ifdef __LP64__
71 #define MAX_MEM         (~0UL)
72 #else /* !__LP64__ */
73 #define MAX_MEM         (3584U*1024U*1024U)
74 #endif /* !__LP64__ */
75
76 static unsigned long mem_limit __read_mostly = MAX_MEM;
77
78 static void __init mem_limit_func(void)
79 {
80         char *cp, *end;
81         unsigned long limit;
82         extern char saved_command_line[];
83
84         /* We need this before __setup() functions are called */
85
86         limit = MAX_MEM;
87         for (cp = saved_command_line; *cp; ) {
88                 if (memcmp(cp, "mem=", 4) == 0) {
89                         cp += 4;
90                         limit = memparse(cp, &end);
91                         if (end != cp)
92                                 break;
93                         cp = end;
94                 } else {
95                         while (*cp != ' ' && *cp)
96                                 ++cp;
97                         while (*cp == ' ')
98                                 ++cp;
99                 }
100         }
101
102         if (limit < mem_limit)
103                 mem_limit = limit;
104 }
105
106 #define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
107
108 static void __init setup_bootmem(void)
109 {
110         unsigned long bootmap_size;
111         unsigned long mem_max;
112         unsigned long bootmap_pages;
113         unsigned long bootmap_start_pfn;
114         unsigned long bootmap_pfn;
115 #ifndef CONFIG_DISCONTIGMEM
116         physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
117         int npmem_holes;
118 #endif
119         int i, sysram_resource_count;
120
121         disable_sr_hashing(); /* Turn off space register hashing */
122
123         /*
124          * Sort the ranges. Since the number of ranges is typically
125          * small, and performance is not an issue here, just do
126          * a simple insertion sort.
127          */
128
129         for (i = 1; i < npmem_ranges; i++) {
130                 int j;
131
132                 for (j = i; j > 0; j--) {
133                         unsigned long tmp;
134
135                         if (pmem_ranges[j-1].start_pfn <
136                             pmem_ranges[j].start_pfn) {
137
138                                 break;
139                         }
140                         tmp = pmem_ranges[j-1].start_pfn;
141                         pmem_ranges[j-1].start_pfn = pmem_ranges[j].start_pfn;
142                         pmem_ranges[j].start_pfn = tmp;
143                         tmp = pmem_ranges[j-1].pages;
144                         pmem_ranges[j-1].pages = pmem_ranges[j].pages;
145                         pmem_ranges[j].pages = tmp;
146                 }
147         }
148
149 #ifndef CONFIG_DISCONTIGMEM
150         /*
151          * Throw out ranges that are too far apart (controlled by
152          * MAX_GAP).
153          */
154
155         for (i = 1; i < npmem_ranges; i++) {
156                 if (pmem_ranges[i].start_pfn -
157                         (pmem_ranges[i-1].start_pfn +
158                          pmem_ranges[i-1].pages) > MAX_GAP) {
159                         npmem_ranges = i;
160                         printk("Large gap in memory detected (%ld pages). "
161                                "Consider turning on CONFIG_DISCONTIGMEM\n",
162                                pmem_ranges[i].start_pfn -
163                                (pmem_ranges[i-1].start_pfn +
164                                 pmem_ranges[i-1].pages));
165                         break;
166                 }
167         }
168 #endif
169
170         if (npmem_ranges > 1) {
171
172                 /* Print the memory ranges */
173
174                 printk(KERN_INFO "Memory Ranges:\n");
175
176                 for (i = 0; i < npmem_ranges; i++) {
177                         unsigned long start;
178                         unsigned long size;
179
180                         size = (pmem_ranges[i].pages << PAGE_SHIFT);
181                         start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
182                         printk(KERN_INFO "%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
183                                 i,start, start + (size - 1), size >> 20);
184                 }
185         }
186
187         sysram_resource_count = npmem_ranges;
188         for (i = 0; i < sysram_resource_count; i++) {
189                 struct resource *res = &sysram_resources[i];
190                 res->name = "System RAM";
191                 res->start = pmem_ranges[i].start_pfn << PAGE_SHIFT;
192                 res->end = res->start + (pmem_ranges[i].pages << PAGE_SHIFT)-1;
193                 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
194                 request_resource(&iomem_resource, res);
195         }
196
197         /*
198          * For 32 bit kernels we limit the amount of memory we can
199          * support, in order to preserve enough kernel address space
200          * for other purposes. For 64 bit kernels we don't normally
201          * limit the memory, but this mechanism can be used to
202          * artificially limit the amount of memory (and it is written
203          * to work with multiple memory ranges).
204          */
205
206         mem_limit_func();       /* check for "mem=" argument */
207
208         mem_max = 0;
209         num_physpages = 0;
210         for (i = 0; i < npmem_ranges; i++) {
211                 unsigned long rsize;
212
213                 rsize = pmem_ranges[i].pages << PAGE_SHIFT;
214                 if ((mem_max + rsize) > mem_limit) {
215                         printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20);
216                         if (mem_max == mem_limit)
217                                 npmem_ranges = i;
218                         else {
219                                 pmem_ranges[i].pages =   (mem_limit >> PAGE_SHIFT)
220                                                        - (mem_max >> PAGE_SHIFT);
221                                 npmem_ranges = i + 1;
222                                 mem_max = mem_limit;
223                         }
224                 num_physpages += pmem_ranges[i].pages;
225                         break;
226                 }
227             num_physpages += pmem_ranges[i].pages;
228                 mem_max += rsize;
229         }
230
231         printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20);
232
233 #ifndef CONFIG_DISCONTIGMEM
234         /* Merge the ranges, keeping track of the holes */
235
236         {
237                 unsigned long end_pfn;
238                 unsigned long hole_pages;
239
240                 npmem_holes = 0;
241                 end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages;
242                 for (i = 1; i < npmem_ranges; i++) {
243
244                         hole_pages = pmem_ranges[i].start_pfn - end_pfn;
245                         if (hole_pages) {
246                                 pmem_holes[npmem_holes].start_pfn = end_pfn;
247                                 pmem_holes[npmem_holes++].pages = hole_pages;
248                                 end_pfn += hole_pages;
249                         }
250                         end_pfn += pmem_ranges[i].pages;
251                 }
252
253                 pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn;
254                 npmem_ranges = 1;
255         }
256 #endif
257
258         bootmap_pages = 0;
259         for (i = 0; i < npmem_ranges; i++)
260                 bootmap_pages += bootmem_bootmap_pages(pmem_ranges[i].pages);
261
262         bootmap_start_pfn = PAGE_ALIGN(__pa((unsigned long) &_end)) >> PAGE_SHIFT;
263
264 #ifdef CONFIG_DISCONTIGMEM
265         for (i = 0; i < MAX_PHYSMEM_RANGES; i++) {
266                 memset(NODE_DATA(i), 0, sizeof(pg_data_t));
267                 NODE_DATA(i)->bdata = &bmem_data[i];
268         }
269         memset(pfnnid_map, 0xff, sizeof(pfnnid_map));
270
271         for (i = 0; i < npmem_ranges; i++)
272                 node_set_online(i);
273 #endif
274
275         /*
276          * Initialize and free the full range of memory in each range.
277          * Note that the only writing these routines do are to the bootmap,
278          * and we've made sure to locate the bootmap properly so that they
279          * won't be writing over anything important.
280          */
281
282         bootmap_pfn = bootmap_start_pfn;
283         max_pfn = 0;
284         for (i = 0; i < npmem_ranges; i++) {
285                 unsigned long start_pfn;
286                 unsigned long npages;
287
288                 start_pfn = pmem_ranges[i].start_pfn;
289                 npages = pmem_ranges[i].pages;
290
291                 bootmap_size = init_bootmem_node(NODE_DATA(i),
292                                                 bootmap_pfn,
293                                                 start_pfn,
294                                                 (start_pfn + npages) );
295                 free_bootmem_node(NODE_DATA(i),
296                                   (start_pfn << PAGE_SHIFT),
297                                   (npages << PAGE_SHIFT) );
298                 bootmap_pfn += (bootmap_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
299                 if ((start_pfn + npages) > max_pfn)
300                         max_pfn = start_pfn + npages;
301         }
302
303         /* IOMMU is always used to access "high mem" on those boxes
304          * that can support enough mem that a PCI device couldn't
305          * directly DMA to any physical addresses.
306          * ISA DMA support will need to revisit this.
307          */
308         max_low_pfn = max_pfn;
309
310         if ((bootmap_pfn - bootmap_start_pfn) != bootmap_pages) {
311                 printk(KERN_WARNING "WARNING! bootmap sizing is messed up!\n");
312                 BUG();
313         }
314
315         /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
316
317 #define PDC_CONSOLE_IO_IODC_SIZE 32768
318
319         reserve_bootmem_node(NODE_DATA(0), 0UL,
320                         (unsigned long)(PAGE0->mem_free + PDC_CONSOLE_IO_IODC_SIZE));
321         reserve_bootmem_node(NODE_DATA(0),__pa((unsigned long)&_text),
322                         (unsigned long)(&_end - &_text));
323         reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT),
324                         ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT));
325
326 #ifndef CONFIG_DISCONTIGMEM
327
328         /* reserve the holes */
329
330         for (i = 0; i < npmem_holes; i++) {
331                 reserve_bootmem_node(NODE_DATA(0),
332                                 (pmem_holes[i].start_pfn << PAGE_SHIFT),
333                                 (pmem_holes[i].pages << PAGE_SHIFT));
334         }
335 #endif
336
337 #ifdef CONFIG_BLK_DEV_INITRD
338         if (initrd_start) {
339                 printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end);
340                 if (__pa(initrd_start) < mem_max) {
341                         unsigned long initrd_reserve;
342
343                         if (__pa(initrd_end) > mem_max) {
344                                 initrd_reserve = mem_max - __pa(initrd_start);
345                         } else {
346                                 initrd_reserve = initrd_end - initrd_start;
347                         }
348                         initrd_below_start_ok = 1;
349                         printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
350
351                         reserve_bootmem_node(NODE_DATA(0),__pa(initrd_start), initrd_reserve);
352                 }
353         }
354 #endif
355
356         data_resource.start =  virt_to_phys(&data_start);
357         data_resource.end = virt_to_phys(&_end)-1;
358         code_resource.start = virt_to_phys(&_text);
359         code_resource.end = virt_to_phys(&data_start)-1;
360
361         /* We don't know which region the kernel will be in, so try
362          * all of them.
363          */
364         for (i = 0; i < sysram_resource_count; i++) {
365                 struct resource *res = &sysram_resources[i];
366                 request_resource(res, &code_resource);
367                 request_resource(res, &data_resource);
368         }
369         request_resource(&sysram_resources[0], &pdcdata_resource);
370 }
371
372 void free_initmem(void)
373 {
374         /* FIXME: */
375 #if 0
376         printk(KERN_INFO "NOT FREEING INITMEM (%dk)\n",
377                         (&__init_end - &__init_begin) >> 10);
378         return;
379 #else
380         unsigned long addr;
381         
382         printk(KERN_INFO "Freeing unused kernel memory: ");
383
384 #if 1
385         /* Attempt to catch anyone trying to execute code here
386          * by filling the page with BRK insns.
387          * 
388          * If we disable interrupts for all CPUs, then IPI stops working.
389          * Kinda breaks the global cache flushing.
390          */
391         local_irq_disable();
392
393         memset(&__init_begin, 0x00, 
394                 (unsigned long)&__init_end - (unsigned long)&__init_begin);
395
396         flush_data_cache();
397         asm volatile("sync" : : );
398         flush_icache_range((unsigned long)&__init_begin, (unsigned long)&__init_end);
399         asm volatile("sync" : : );
400
401         local_irq_enable();
402 #endif
403         
404         addr = (unsigned long)(&__init_begin);
405         for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
406                 ClearPageReserved(virt_to_page(addr));
407                 set_page_count(virt_to_page(addr), 1);
408                 free_page(addr);
409                 num_physpages++;
410                 totalram_pages++;
411         }
412
413         /* set up a new led state on systems shipped LED State panel */
414         pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
415         
416         printk("%luk freed\n", (unsigned long)(&__init_end - &__init_begin) >> 10);
417 #endif
418 }
419
420
421 #ifdef CONFIG_DEBUG_RODATA
422 void mark_rodata_ro(void)
423 {
424         extern char __start_rodata, __end_rodata;
425         /* rodata memory was already mapped with KERNEL_RO access rights by
426            pagetable_init() and map_pages(). No need to do additional stuff here */
427         printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n",
428                 (unsigned long)(&__end_rodata - &__start_rodata) >> 10);
429 }
430 #endif
431
432
433 /*
434  * Just an arbitrary offset to serve as a "hole" between mapping areas
435  * (between top of physical memory and a potential pcxl dma mapping
436  * area, and below the vmalloc mapping area).
437  *
438  * The current 32K value just means that there will be a 32K "hole"
439  * between mapping areas. That means that  any out-of-bounds memory
440  * accesses will hopefully be caught. The vmalloc() routines leaves
441  * a hole of 4kB between each vmalloced area for the same reason.
442  */
443
444  /* Leave room for gateway page expansion */
445 #if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
446 #error KERNEL_MAP_START is in gateway reserved region
447 #endif
448 #define MAP_START (KERNEL_MAP_START)
449
450 #define VM_MAP_OFFSET  (32*1024)
451 #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
452                                      & ~(VM_MAP_OFFSET-1)))
453
454 void *vmalloc_start __read_mostly;
455 EXPORT_SYMBOL(vmalloc_start);
456
457 #ifdef CONFIG_PA11
458 unsigned long pcxl_dma_start __read_mostly;
459 #endif
460
461 void __init mem_init(void)
462 {
463         high_memory = __va((max_pfn << PAGE_SHIFT));
464
465 #ifndef CONFIG_DISCONTIGMEM
466         max_mapnr = page_to_pfn(virt_to_page(high_memory - 1)) + 1;
467         totalram_pages += free_all_bootmem();
468 #else
469         {
470                 int i;
471
472                 for (i = 0; i < npmem_ranges; i++)
473                         totalram_pages += free_all_bootmem_node(NODE_DATA(i));
474         }
475 #endif
476
477         printk(KERN_INFO "Memory: %luk available\n", num_physpages << (PAGE_SHIFT-10));
478
479 #ifdef CONFIG_PA11
480         if (hppa_dma_ops == &pcxl_dma_ops) {
481                 pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
482                 vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start + PCXL_DMA_MAP_SIZE);
483         } else {
484                 pcxl_dma_start = 0;
485                 vmalloc_start = SET_MAP_OFFSET(MAP_START);
486         }
487 #else
488         vmalloc_start = SET_MAP_OFFSET(MAP_START);
489 #endif
490
491 }
492
493 unsigned long *empty_zero_page __read_mostly;
494
495 void show_mem(void)
496 {
497         int i,free = 0,total = 0,reserved = 0;
498         int shared = 0, cached = 0;
499
500         printk(KERN_INFO "Mem-info:\n");
501         show_free_areas();
502         printk(KERN_INFO "Free swap:     %6ldkB\n",
503                                 nr_swap_pages<<(PAGE_SHIFT-10));
504 #ifndef CONFIG_DISCONTIGMEM
505         i = max_mapnr;
506         while (i-- > 0) {
507                 total++;
508                 if (PageReserved(mem_map+i))
509                         reserved++;
510                 else if (PageSwapCache(mem_map+i))
511                         cached++;
512                 else if (!page_count(&mem_map[i]))
513                         free++;
514                 else
515                         shared += page_count(&mem_map[i]) - 1;
516         }
517 #else
518         for (i = 0; i < npmem_ranges; i++) {
519                 int j;
520
521                 for (j = node_start_pfn(i); j < node_end_pfn(i); j++) {
522                         struct page *p;
523                         unsigned long flags;
524
525                         pgdat_resize_lock(NODE_DATA(i), &flags);
526                         p = nid_page_nr(i, j) - node_start_pfn(i);
527
528                         total++;
529                         if (PageReserved(p))
530                                 reserved++;
531                         else if (PageSwapCache(p))
532                                 cached++;
533                         else if (!page_count(p))
534                                 free++;
535                         else
536                                 shared += page_count(p) - 1;
537                         pgdat_resize_unlock(NODE_DATA(i), &flags);
538                 }
539         }
540 #endif
541         printk(KERN_INFO "%d pages of RAM\n", total);
542         printk(KERN_INFO "%d reserved pages\n", reserved);
543         printk(KERN_INFO "%d pages shared\n", shared);
544         printk(KERN_INFO "%d pages swap cached\n", cached);
545
546
547 #ifdef CONFIG_DISCONTIGMEM
548         {
549                 struct zonelist *zl;
550                 int i, j, k;
551
552                 for (i = 0; i < npmem_ranges; i++) {
553                         for (j = 0; j < MAX_NR_ZONES; j++) {
554                                 zl = NODE_DATA(i)->node_zonelists + j;
555
556                                 printk("Zone list for zone %d on node %d: ", j, i);
557                                 for (k = 0; zl->zones[k] != NULL; k++) 
558                                         printk("[%d/%s] ", zl->zones[k]->zone_pgdat->node_id, zl->zones[k]->name);
559                                 printk("\n");
560                         }
561                 }
562         }
563 #endif
564 }
565
566
567 static void __init map_pages(unsigned long start_vaddr, unsigned long start_paddr, unsigned long size, pgprot_t pgprot)
568 {
569         pgd_t *pg_dir;
570         pmd_t *pmd;
571         pte_t *pg_table;
572         unsigned long end_paddr;
573         unsigned long start_pmd;
574         unsigned long start_pte;
575         unsigned long tmp1;
576         unsigned long tmp2;
577         unsigned long address;
578         unsigned long ro_start;
579         unsigned long ro_end;
580         unsigned long fv_addr;
581         unsigned long gw_addr;
582         extern const unsigned long fault_vector_20;
583         extern void * const linux_gateway_page;
584
585         ro_start = __pa((unsigned long)&_text);
586         ro_end   = __pa((unsigned long)&data_start);
587         fv_addr  = __pa((unsigned long)&fault_vector_20) & PAGE_MASK;
588         gw_addr  = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK;
589
590         end_paddr = start_paddr + size;
591
592         pg_dir = pgd_offset_k(start_vaddr);
593
594 #if PTRS_PER_PMD == 1
595         start_pmd = 0;
596 #else
597         start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
598 #endif
599         start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
600
601         address = start_paddr;
602         while (address < end_paddr) {
603 #if PTRS_PER_PMD == 1
604                 pmd = (pmd_t *)__pa(pg_dir);
605 #else
606                 pmd = (pmd_t *)pgd_address(*pg_dir);
607
608                 /*
609                  * pmd is physical at this point
610                  */
611
612                 if (!pmd) {
613                         pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE << PMD_ORDER);
614                         pmd = (pmd_t *) __pa(pmd);
615                 }
616
617                 pgd_populate(NULL, pg_dir, __va(pmd));
618 #endif
619                 pg_dir++;
620
621                 /* now change pmd to kernel virtual addresses */
622
623                 pmd = (pmd_t *)__va(pmd) + start_pmd;
624                 for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++,pmd++) {
625
626                         /*
627                          * pg_table is physical at this point
628                          */
629
630                         pg_table = (pte_t *)pmd_address(*pmd);
631                         if (!pg_table) {
632                                 pg_table = (pte_t *)
633                                         alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE);
634                                 pg_table = (pte_t *) __pa(pg_table);
635                         }
636
637                         pmd_populate_kernel(NULL, pmd, __va(pg_table));
638
639                         /* now change pg_table to kernel virtual addresses */
640
641                         pg_table = (pte_t *) __va(pg_table) + start_pte;
642                         for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++,pg_table++) {
643                                 pte_t pte;
644
645                                 /*
646                                  * Map the fault vector writable so we can
647                                  * write the HPMC checksum.
648                                  */
649                                 if (address >= ro_start && address < ro_end
650                                                         && address != fv_addr
651                                                         && address != gw_addr)
652                                     pte = __mk_pte(address, PAGE_KERNEL_RO);
653                                 else
654                                     pte = __mk_pte(address, pgprot);
655
656                                 if (address >= end_paddr)
657                                         pte_val(pte) = 0;
658
659                                 set_pte(pg_table, pte);
660
661                                 address += PAGE_SIZE;
662                         }
663                         start_pte = 0;
664
665                         if (address >= end_paddr)
666                             break;
667                 }
668                 start_pmd = 0;
669         }
670 }
671
672 /*
673  * pagetable_init() sets up the page tables
674  *
675  * Note that gateway_init() places the Linux gateway page at page 0.
676  * Since gateway pages cannot be dereferenced this has the desirable
677  * side effect of trapping those pesky NULL-reference errors in the
678  * kernel.
679  */
680 static void __init pagetable_init(void)
681 {
682         int range;
683
684         /* Map each physical memory range to its kernel vaddr */
685
686         for (range = 0; range < npmem_ranges; range++) {
687                 unsigned long start_paddr;
688                 unsigned long end_paddr;
689                 unsigned long size;
690
691                 start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
692                 end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT);
693                 size = pmem_ranges[range].pages << PAGE_SHIFT;
694
695                 map_pages((unsigned long)__va(start_paddr), start_paddr,
696                         size, PAGE_KERNEL);
697         }
698
699 #ifdef CONFIG_BLK_DEV_INITRD
700         if (initrd_end && initrd_end > mem_limit) {
701                 printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
702                 map_pages(initrd_start, __pa(initrd_start),
703                         initrd_end - initrd_start, PAGE_KERNEL);
704         }
705 #endif
706
707         empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
708         memset(empty_zero_page, 0, PAGE_SIZE);
709 }
710
711 static void __init gateway_init(void)
712 {
713         unsigned long linux_gateway_page_addr;
714         /* FIXME: This is 'const' in order to trick the compiler
715            into not treating it as DP-relative data. */
716         extern void * const linux_gateway_page;
717
718         linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK;
719
720         /*
721          * Setup Linux Gateway page.
722          *
723          * The Linux gateway page will reside in kernel space (on virtual
724          * page 0), so it doesn't need to be aliased into user space.
725          */
726
727         map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page),
728                 PAGE_SIZE, PAGE_GATEWAY);
729 }
730
731 #ifdef CONFIG_HPUX
732 void
733 map_hpux_gateway_page(struct task_struct *tsk, struct mm_struct *mm)
734 {
735         pgd_t *pg_dir;
736         pmd_t *pmd;
737         pte_t *pg_table;
738         unsigned long start_pmd;
739         unsigned long start_pte;
740         unsigned long address;
741         unsigned long hpux_gw_page_addr;
742         /* FIXME: This is 'const' in order to trick the compiler
743            into not treating it as DP-relative data. */
744         extern void * const hpux_gateway_page;
745
746         hpux_gw_page_addr = HPUX_GATEWAY_ADDR & PAGE_MASK;
747
748         /*
749          * Setup HP-UX Gateway page.
750          *
751          * The HP-UX gateway page resides in the user address space,
752          * so it needs to be aliased into each process.
753          */
754
755         pg_dir = pgd_offset(mm,hpux_gw_page_addr);
756
757 #if PTRS_PER_PMD == 1
758         start_pmd = 0;
759 #else
760         start_pmd = ((hpux_gw_page_addr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
761 #endif
762         start_pte = ((hpux_gw_page_addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
763
764         address = __pa(&hpux_gateway_page);
765 #if PTRS_PER_PMD == 1
766         pmd = (pmd_t *)__pa(pg_dir);
767 #else
768         pmd = (pmd_t *) pgd_address(*pg_dir);
769
770         /*
771          * pmd is physical at this point
772          */
773
774         if (!pmd) {
775                 pmd = (pmd_t *) get_zeroed_page(GFP_KERNEL);
776                 pmd = (pmd_t *) __pa(pmd);
777         }
778
779         __pgd_val_set(*pg_dir, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pmd);
780 #endif
781         /* now change pmd to kernel virtual addresses */
782
783         pmd = (pmd_t *)__va(pmd) + start_pmd;
784
785         /*
786          * pg_table is physical at this point
787          */
788
789         pg_table = (pte_t *) pmd_address(*pmd);
790         if (!pg_table)
791                 pg_table = (pte_t *) __pa(get_zeroed_page(GFP_KERNEL));
792
793         __pmd_val_set(*pmd, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pg_table);
794
795         /* now change pg_table to kernel virtual addresses */
796
797         pg_table = (pte_t *) __va(pg_table) + start_pte;
798         set_pte(pg_table, __mk_pte(address, PAGE_GATEWAY));
799 }
800 EXPORT_SYMBOL(map_hpux_gateway_page);
801 #endif
802
803 void __init paging_init(void)
804 {
805         int i;
806
807         setup_bootmem();
808         pagetable_init();
809         gateway_init();
810         flush_cache_all_local(); /* start with known state */
811         flush_tlb_all_local(NULL);
812
813         for (i = 0; i < npmem_ranges; i++) {
814                 unsigned long zones_size[MAX_NR_ZONES] = { 0, 0, 0 };
815
816                 /* We have an IOMMU, so all memory can go into a single
817                    ZONE_DMA zone. */
818                 zones_size[ZONE_DMA] = pmem_ranges[i].pages;
819
820 #ifdef CONFIG_DISCONTIGMEM
821                 /* Need to initialize the pfnnid_map before we can initialize
822                    the zone */
823                 {
824                     int j;
825                     for (j = (pmem_ranges[i].start_pfn >> PFNNID_SHIFT);
826                          j <= ((pmem_ranges[i].start_pfn + pmem_ranges[i].pages) >> PFNNID_SHIFT);
827                          j++) {
828                         pfnnid_map[j] = i;
829                     }
830                 }
831 #endif
832
833                 free_area_init_node(i, NODE_DATA(i), zones_size,
834                                 pmem_ranges[i].start_pfn, NULL);
835         }
836 }
837
838 #ifdef CONFIG_PA20
839
840 /*
841  * Currently, all PA20 chips have 18 bit protection id's, which is the
842  * limiting factor (space ids are 32 bits).
843  */
844
845 #define NR_SPACE_IDS 262144
846
847 #else
848
849 /*
850  * Currently we have a one-to-one relationship between space id's and
851  * protection id's. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only
852  * support 15 bit protection id's, so that is the limiting factor.
853  * PCXT' has 18 bit protection id's, but only 16 bit spaceids, so it's
854  * probably not worth the effort for a special case here.
855  */
856
857 #define NR_SPACE_IDS 32768
858
859 #endif  /* !CONFIG_PA20 */
860
861 #define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)
862 #define SID_ARRAY_SIZE  (NR_SPACE_IDS / (8 * sizeof(long)))
863
864 static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */
865 static unsigned long dirty_space_id[SID_ARRAY_SIZE];
866 static unsigned long space_id_index;
867 static unsigned long free_space_ids = NR_SPACE_IDS - 1;
868 static unsigned long dirty_space_ids = 0;
869
870 static DEFINE_SPINLOCK(sid_lock);
871
872 unsigned long alloc_sid(void)
873 {
874         unsigned long index;
875
876         spin_lock(&sid_lock);
877
878         if (free_space_ids == 0) {
879                 if (dirty_space_ids != 0) {
880                         spin_unlock(&sid_lock);
881                         flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
882                         spin_lock(&sid_lock);
883                 }
884                 if (free_space_ids == 0)
885                         BUG();
886         }
887
888         free_space_ids--;
889
890         index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index);
891         space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1)));
892         space_id_index = index;
893
894         spin_unlock(&sid_lock);
895
896         return index << SPACEID_SHIFT;
897 }
898
899 void free_sid(unsigned long spaceid)
900 {
901         unsigned long index = spaceid >> SPACEID_SHIFT;
902         unsigned long *dirty_space_offset;
903
904         dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG);
905         index &= (BITS_PER_LONG - 1);
906
907         spin_lock(&sid_lock);
908
909         if (*dirty_space_offset & (1L << index))
910             BUG(); /* attempt to free space id twice */
911
912         *dirty_space_offset |= (1L << index);
913         dirty_space_ids++;
914
915         spin_unlock(&sid_lock);
916 }
917
918
919 #ifdef CONFIG_SMP
920 static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array)
921 {
922         int i;
923
924         /* NOTE: sid_lock must be held upon entry */
925
926         *ndirtyptr = dirty_space_ids;
927         if (dirty_space_ids != 0) {
928             for (i = 0; i < SID_ARRAY_SIZE; i++) {
929                 dirty_array[i] = dirty_space_id[i];
930                 dirty_space_id[i] = 0;
931             }
932             dirty_space_ids = 0;
933         }
934
935         return;
936 }
937
938 static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array)
939 {
940         int i;
941
942         /* NOTE: sid_lock must be held upon entry */
943
944         if (ndirty != 0) {
945                 for (i = 0; i < SID_ARRAY_SIZE; i++) {
946                         space_id[i] ^= dirty_array[i];
947                 }
948
949                 free_space_ids += ndirty;
950                 space_id_index = 0;
951         }
952 }
953
954 #else /* CONFIG_SMP */
955
956 static void recycle_sids(void)
957 {
958         int i;
959
960         /* NOTE: sid_lock must be held upon entry */
961
962         if (dirty_space_ids != 0) {
963                 for (i = 0; i < SID_ARRAY_SIZE; i++) {
964                         space_id[i] ^= dirty_space_id[i];
965                         dirty_space_id[i] = 0;
966                 }
967
968                 free_space_ids += dirty_space_ids;
969                 dirty_space_ids = 0;
970                 space_id_index = 0;
971         }
972 }
973 #endif
974
975 /*
976  * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is
977  * purged, we can safely reuse the space ids that were released but
978  * not flushed from the tlb.
979  */
980
981 #ifdef CONFIG_SMP
982
983 static unsigned long recycle_ndirty;
984 static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];
985 static unsigned int recycle_inuse = 0;
986
987 void flush_tlb_all(void)
988 {
989         int do_recycle;
990
991         do_recycle = 0;
992         spin_lock(&sid_lock);
993         if (dirty_space_ids > RECYCLE_THRESHOLD) {
994             if (recycle_inuse) {
995                 BUG();  /* FIXME: Use a semaphore/wait queue here */
996             }
997             get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
998             recycle_inuse++;
999             do_recycle++;
1000         }
1001         spin_unlock(&sid_lock);
1002         on_each_cpu(flush_tlb_all_local, NULL, 1, 1);
1003         if (do_recycle) {
1004             spin_lock(&sid_lock);
1005             recycle_sids(recycle_ndirty,recycle_dirty_array);
1006             recycle_inuse = 0;
1007             spin_unlock(&sid_lock);
1008         }
1009 }
1010 #else
1011 void flush_tlb_all(void)
1012 {
1013         spin_lock(&sid_lock);
1014         flush_tlb_all_local(NULL);
1015         recycle_sids();
1016         spin_unlock(&sid_lock);
1017 }
1018 #endif
1019
1020 #ifdef CONFIG_BLK_DEV_INITRD
1021 void free_initrd_mem(unsigned long start, unsigned long end)
1022 {
1023 #if 0
1024         if (start < end)
1025                 printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
1026         for (; start < end; start += PAGE_SIZE) {
1027                 ClearPageReserved(virt_to_page(start));
1028                 set_page_count(virt_to_page(start), 1);
1029                 free_page(start);
1030                 num_physpages++;
1031                 totalram_pages++;
1032         }
1033 #endif
1034 }
1035 #endif