Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6.git] / arch / parisc / mm / init.c
1 /*
2  *  linux/arch/parisc/mm/init.c
3  *
4  *  Copyright (C) 1995  Linus Torvalds
5  *  Copyright 1999 SuSE GmbH
6  *    changed by Philipp Rumpf
7  *  Copyright 1999 Philipp Rumpf (prumpf@tux.org)
8  *  Copyright 2004 Randolph Chung (tausq@debian.org)
9  *  Copyright 2006-2007 Helge Deller (deller@gmx.de)
10  *
11  */
12
13
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/bootmem.h>
17 #include <linux/delay.h>
18 #include <linux/init.h>
19 #include <linux/pci.h>          /* for hppa_dma_ops and pcxl_dma_ops */
20 #include <linux/initrd.h>
21 #include <linux/swap.h>
22 #include <linux/unistd.h>
23 #include <linux/nodemask.h>     /* for node_online_map */
24 #include <linux/pagemap.h>      /* for release_pages and page_cache_release */
25
26 #include <asm/pgalloc.h>
27 #include <asm/pgtable.h>
28 #include <asm/tlb.h>
29 #include <asm/pdc_chassis.h>
30 #include <asm/mmzone.h>
31 #include <asm/sections.h>
32
33 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
34
35 extern int  data_start;
36
37 #ifdef CONFIG_DISCONTIGMEM
38 struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
39 unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
40 #endif
41
42 static struct resource data_resource = {
43         .name   = "Kernel data",
44         .flags  = IORESOURCE_BUSY | IORESOURCE_MEM,
45 };
46
47 static struct resource code_resource = {
48         .name   = "Kernel code",
49         .flags  = IORESOURCE_BUSY | IORESOURCE_MEM,
50 };
51
52 static struct resource pdcdata_resource = {
53         .name   = "PDC data (Page Zero)",
54         .start  = 0,
55         .end    = 0x9ff,
56         .flags  = IORESOURCE_BUSY | IORESOURCE_MEM,
57 };
58
59 static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly;
60
61 /* The following array is initialized from the firmware specific
62  * information retrieved in kernel/inventory.c.
63  */
64
65 physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly;
66 int npmem_ranges __read_mostly;
67
68 #ifdef CONFIG_64BIT
69 #define MAX_MEM         (~0UL)
70 #else /* !CONFIG_64BIT */
71 #define MAX_MEM         (3584U*1024U*1024U)
72 #endif /* !CONFIG_64BIT */
73
74 static unsigned long mem_limit __read_mostly = MAX_MEM;
75
76 static void __init mem_limit_func(void)
77 {
78         char *cp, *end;
79         unsigned long limit;
80
81         /* We need this before __setup() functions are called */
82
83         limit = MAX_MEM;
84         for (cp = boot_command_line; *cp; ) {
85                 if (memcmp(cp, "mem=", 4) == 0) {
86                         cp += 4;
87                         limit = memparse(cp, &end);
88                         if (end != cp)
89                                 break;
90                         cp = end;
91                 } else {
92                         while (*cp != ' ' && *cp)
93                                 ++cp;
94                         while (*cp == ' ')
95                                 ++cp;
96                 }
97         }
98
99         if (limit < mem_limit)
100                 mem_limit = limit;
101 }
102
103 #define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
104
105 static void __init setup_bootmem(void)
106 {
107         unsigned long bootmap_size;
108         unsigned long mem_max;
109         unsigned long bootmap_pages;
110         unsigned long bootmap_start_pfn;
111         unsigned long bootmap_pfn;
112 #ifndef CONFIG_DISCONTIGMEM
113         physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
114         int npmem_holes;
115 #endif
116         int i, sysram_resource_count;
117
118         disable_sr_hashing(); /* Turn off space register hashing */
119
120         /*
121          * Sort the ranges. Since the number of ranges is typically
122          * small, and performance is not an issue here, just do
123          * a simple insertion sort.
124          */
125
126         for (i = 1; i < npmem_ranges; i++) {
127                 int j;
128
129                 for (j = i; j > 0; j--) {
130                         unsigned long tmp;
131
132                         if (pmem_ranges[j-1].start_pfn <
133                             pmem_ranges[j].start_pfn) {
134
135                                 break;
136                         }
137                         tmp = pmem_ranges[j-1].start_pfn;
138                         pmem_ranges[j-1].start_pfn = pmem_ranges[j].start_pfn;
139                         pmem_ranges[j].start_pfn = tmp;
140                         tmp = pmem_ranges[j-1].pages;
141                         pmem_ranges[j-1].pages = pmem_ranges[j].pages;
142                         pmem_ranges[j].pages = tmp;
143                 }
144         }
145
146 #ifndef CONFIG_DISCONTIGMEM
147         /*
148          * Throw out ranges that are too far apart (controlled by
149          * MAX_GAP).
150          */
151
152         for (i = 1; i < npmem_ranges; i++) {
153                 if (pmem_ranges[i].start_pfn -
154                         (pmem_ranges[i-1].start_pfn +
155                          pmem_ranges[i-1].pages) > MAX_GAP) {
156                         npmem_ranges = i;
157                         printk("Large gap in memory detected (%ld pages). "
158                                "Consider turning on CONFIG_DISCONTIGMEM\n",
159                                pmem_ranges[i].start_pfn -
160                                (pmem_ranges[i-1].start_pfn +
161                                 pmem_ranges[i-1].pages));
162                         break;
163                 }
164         }
165 #endif
166
167         if (npmem_ranges > 1) {
168
169                 /* Print the memory ranges */
170
171                 printk(KERN_INFO "Memory Ranges:\n");
172
173                 for (i = 0; i < npmem_ranges; i++) {
174                         unsigned long start;
175                         unsigned long size;
176
177                         size = (pmem_ranges[i].pages << PAGE_SHIFT);
178                         start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
179                         printk(KERN_INFO "%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
180                                 i,start, start + (size - 1), size >> 20);
181                 }
182         }
183
184         sysram_resource_count = npmem_ranges;
185         for (i = 0; i < sysram_resource_count; i++) {
186                 struct resource *res = &sysram_resources[i];
187                 res->name = "System RAM";
188                 res->start = pmem_ranges[i].start_pfn << PAGE_SHIFT;
189                 res->end = res->start + (pmem_ranges[i].pages << PAGE_SHIFT)-1;
190                 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
191                 request_resource(&iomem_resource, res);
192         }
193
194         /*
195          * For 32 bit kernels we limit the amount of memory we can
196          * support, in order to preserve enough kernel address space
197          * for other purposes. For 64 bit kernels we don't normally
198          * limit the memory, but this mechanism can be used to
199          * artificially limit the amount of memory (and it is written
200          * to work with multiple memory ranges).
201          */
202
203         mem_limit_func();       /* check for "mem=" argument */
204
205         mem_max = 0;
206         num_physpages = 0;
207         for (i = 0; i < npmem_ranges; i++) {
208                 unsigned long rsize;
209
210                 rsize = pmem_ranges[i].pages << PAGE_SHIFT;
211                 if ((mem_max + rsize) > mem_limit) {
212                         printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20);
213                         if (mem_max == mem_limit)
214                                 npmem_ranges = i;
215                         else {
216                                 pmem_ranges[i].pages =   (mem_limit >> PAGE_SHIFT)
217                                                        - (mem_max >> PAGE_SHIFT);
218                                 npmem_ranges = i + 1;
219                                 mem_max = mem_limit;
220                         }
221                 num_physpages += pmem_ranges[i].pages;
222                         break;
223                 }
224             num_physpages += pmem_ranges[i].pages;
225                 mem_max += rsize;
226         }
227
228         printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20);
229
230 #ifndef CONFIG_DISCONTIGMEM
231         /* Merge the ranges, keeping track of the holes */
232
233         {
234                 unsigned long end_pfn;
235                 unsigned long hole_pages;
236
237                 npmem_holes = 0;
238                 end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages;
239                 for (i = 1; i < npmem_ranges; i++) {
240
241                         hole_pages = pmem_ranges[i].start_pfn - end_pfn;
242                         if (hole_pages) {
243                                 pmem_holes[npmem_holes].start_pfn = end_pfn;
244                                 pmem_holes[npmem_holes++].pages = hole_pages;
245                                 end_pfn += hole_pages;
246                         }
247                         end_pfn += pmem_ranges[i].pages;
248                 }
249
250                 pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn;
251                 npmem_ranges = 1;
252         }
253 #endif
254
255         bootmap_pages = 0;
256         for (i = 0; i < npmem_ranges; i++)
257                 bootmap_pages += bootmem_bootmap_pages(pmem_ranges[i].pages);
258
259         bootmap_start_pfn = PAGE_ALIGN(__pa((unsigned long) &_end)) >> PAGE_SHIFT;
260
261 #ifdef CONFIG_DISCONTIGMEM
262         for (i = 0; i < MAX_PHYSMEM_RANGES; i++) {
263                 memset(NODE_DATA(i), 0, sizeof(pg_data_t));
264                 NODE_DATA(i)->bdata = &bootmem_node_data[i];
265         }
266         memset(pfnnid_map, 0xff, sizeof(pfnnid_map));
267
268         for (i = 0; i < npmem_ranges; i++)
269                 node_set_online(i);
270 #endif
271
272         /*
273          * Initialize and free the full range of memory in each range.
274          * Note that the only writing these routines do are to the bootmap,
275          * and we've made sure to locate the bootmap properly so that they
276          * won't be writing over anything important.
277          */
278
279         bootmap_pfn = bootmap_start_pfn;
280         max_pfn = 0;
281         for (i = 0; i < npmem_ranges; i++) {
282                 unsigned long start_pfn;
283                 unsigned long npages;
284
285                 start_pfn = pmem_ranges[i].start_pfn;
286                 npages = pmem_ranges[i].pages;
287
288                 bootmap_size = init_bootmem_node(NODE_DATA(i),
289                                                 bootmap_pfn,
290                                                 start_pfn,
291                                                 (start_pfn + npages) );
292                 free_bootmem_node(NODE_DATA(i),
293                                   (start_pfn << PAGE_SHIFT),
294                                   (npages << PAGE_SHIFT) );
295                 bootmap_pfn += (bootmap_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
296                 if ((start_pfn + npages) > max_pfn)
297                         max_pfn = start_pfn + npages;
298         }
299
300         /* IOMMU is always used to access "high mem" on those boxes
301          * that can support enough mem that a PCI device couldn't
302          * directly DMA to any physical addresses.
303          * ISA DMA support will need to revisit this.
304          */
305         max_low_pfn = max_pfn;
306
307         /* bootmap sizing messed up? */
308         BUG_ON((bootmap_pfn - bootmap_start_pfn) != bootmap_pages);
309
310         /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
311
312 #define PDC_CONSOLE_IO_IODC_SIZE 32768
313
314         reserve_bootmem_node(NODE_DATA(0), 0UL,
315                         (unsigned long)(PAGE0->mem_free +
316                                 PDC_CONSOLE_IO_IODC_SIZE), BOOTMEM_DEFAULT);
317         reserve_bootmem_node(NODE_DATA(0), __pa((unsigned long)_text),
318                         (unsigned long)(_end - _text), BOOTMEM_DEFAULT);
319         reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT),
320                         ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT),
321                         BOOTMEM_DEFAULT);
322
323 #ifndef CONFIG_DISCONTIGMEM
324
325         /* reserve the holes */
326
327         for (i = 0; i < npmem_holes; i++) {
328                 reserve_bootmem_node(NODE_DATA(0),
329                                 (pmem_holes[i].start_pfn << PAGE_SHIFT),
330                                 (pmem_holes[i].pages << PAGE_SHIFT),
331                                 BOOTMEM_DEFAULT);
332         }
333 #endif
334
335 #ifdef CONFIG_BLK_DEV_INITRD
336         if (initrd_start) {
337                 printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end);
338                 if (__pa(initrd_start) < mem_max) {
339                         unsigned long initrd_reserve;
340
341                         if (__pa(initrd_end) > mem_max) {
342                                 initrd_reserve = mem_max - __pa(initrd_start);
343                         } else {
344                                 initrd_reserve = initrd_end - initrd_start;
345                         }
346                         initrd_below_start_ok = 1;
347                         printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
348
349                         reserve_bootmem_node(NODE_DATA(0), __pa(initrd_start),
350                                         initrd_reserve, BOOTMEM_DEFAULT);
351                 }
352         }
353 #endif
354
355         data_resource.start =  virt_to_phys(&data_start);
356         data_resource.end = virt_to_phys(_end) - 1;
357         code_resource.start = virt_to_phys(_text);
358         code_resource.end = virt_to_phys(&data_start)-1;
359
360         /* We don't know which region the kernel will be in, so try
361          * all of them.
362          */
363         for (i = 0; i < sysram_resource_count; i++) {
364                 struct resource *res = &sysram_resources[i];
365                 request_resource(res, &code_resource);
366                 request_resource(res, &data_resource);
367         }
368         request_resource(&sysram_resources[0], &pdcdata_resource);
369 }
370
371 void free_initmem(void)
372 {
373         unsigned long addr;
374         unsigned long init_begin = (unsigned long)__init_begin;
375         unsigned long init_end = (unsigned long)__init_end;
376
377 #ifdef CONFIG_DEBUG_KERNEL
378         /* Attempt to catch anyone trying to execute code here
379          * by filling the page with BRK insns.
380          */
381         memset((void *)init_begin, 0x00, init_end - init_begin);
382         flush_icache_range(init_begin, init_end);
383 #endif
384         
385         /* align __init_begin and __init_end to page size,
386            ignoring linker script where we might have tried to save RAM */
387         init_begin = PAGE_ALIGN(init_begin);
388         init_end = PAGE_ALIGN(init_end);
389         for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) {
390                 ClearPageReserved(virt_to_page(addr));
391                 init_page_count(virt_to_page(addr));
392                 free_page(addr);
393                 num_physpages++;
394                 totalram_pages++;
395         }
396
397         /* set up a new led state on systems shipped LED State panel */
398         pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
399         
400         printk(KERN_INFO "Freeing unused kernel memory: %luk freed\n",
401                 (init_end - init_begin) >> 10);
402 }
403
404
405 #ifdef CONFIG_DEBUG_RODATA
406 void mark_rodata_ro(void)
407 {
408         /* rodata memory was already mapped with KERNEL_RO access rights by
409            pagetable_init() and map_pages(). No need to do additional stuff here */
410         printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n",
411                 (unsigned long)(__end_rodata - __start_rodata) >> 10);
412 }
413 #endif
414
415
416 /*
417  * Just an arbitrary offset to serve as a "hole" between mapping areas
418  * (between top of physical memory and a potential pcxl dma mapping
419  * area, and below the vmalloc mapping area).
420  *
421  * The current 32K value just means that there will be a 32K "hole"
422  * between mapping areas. That means that  any out-of-bounds memory
423  * accesses will hopefully be caught. The vmalloc() routines leaves
424  * a hole of 4kB between each vmalloced area for the same reason.
425  */
426
427  /* Leave room for gateway page expansion */
428 #if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
429 #error KERNEL_MAP_START is in gateway reserved region
430 #endif
431 #define MAP_START (KERNEL_MAP_START)
432
433 #define VM_MAP_OFFSET  (32*1024)
434 #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
435                                      & ~(VM_MAP_OFFSET-1)))
436
437 void *parisc_vmalloc_start __read_mostly;
438 EXPORT_SYMBOL(parisc_vmalloc_start);
439
440 #ifdef CONFIG_PA11
441 unsigned long pcxl_dma_start __read_mostly;
442 #endif
443
444 void __init mem_init(void)
445 {
446         int codesize, reservedpages, datasize, initsize;
447
448         /* Do sanity checks on page table constants */
449         BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t));
450         BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t));
451         BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t));
452         BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD
453                         > BITS_PER_LONG);
454
455         high_memory = __va((max_pfn << PAGE_SHIFT));
456
457 #ifndef CONFIG_DISCONTIGMEM
458         max_mapnr = page_to_pfn(virt_to_page(high_memory - 1)) + 1;
459         totalram_pages += free_all_bootmem();
460 #else
461         {
462                 int i;
463
464                 for (i = 0; i < npmem_ranges; i++)
465                         totalram_pages += free_all_bootmem_node(NODE_DATA(i));
466         }
467 #endif
468
469         codesize = (unsigned long)_etext - (unsigned long)_text;
470         datasize = (unsigned long)_edata - (unsigned long)_etext;
471         initsize = (unsigned long)__init_end - (unsigned long)__init_begin;
472
473         reservedpages = 0;
474 {
475         unsigned long pfn;
476 #ifdef CONFIG_DISCONTIGMEM
477         int i;
478
479         for (i = 0; i < npmem_ranges; i++) {
480                 for (pfn = node_start_pfn(i); pfn < node_end_pfn(i); pfn++) {
481                         if (PageReserved(pfn_to_page(pfn)))
482                                 reservedpages++;
483                 }
484         }
485 #else /* !CONFIG_DISCONTIGMEM */
486         for (pfn = 0; pfn < max_pfn; pfn++) {
487                 /*
488                  * Only count reserved RAM pages
489                  */
490                 if (PageReserved(pfn_to_page(pfn)))
491                         reservedpages++;
492         }
493 #endif
494 }
495
496 #ifdef CONFIG_PA11
497         if (hppa_dma_ops == &pcxl_dma_ops) {
498                 pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
499                 parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start
500                                                 + PCXL_DMA_MAP_SIZE);
501         } else {
502                 pcxl_dma_start = 0;
503                 parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
504         }
505 #else
506         parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
507 #endif
508
509         printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
510                 nr_free_pages() << (PAGE_SHIFT-10),
511                 num_physpages << (PAGE_SHIFT-10),
512                 codesize >> 10,
513                 reservedpages << (PAGE_SHIFT-10),
514                 datasize >> 10,
515                 initsize >> 10
516         );
517
518 #ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */
519         printk("virtual kernel memory layout:\n"
520                "    vmalloc : 0x%p - 0x%p   (%4ld MB)\n"
521                "    memory  : 0x%p - 0x%p   (%4ld MB)\n"
522                "      .init : 0x%p - 0x%p   (%4ld kB)\n"
523                "      .data : 0x%p - 0x%p   (%4ld kB)\n"
524                "      .text : 0x%p - 0x%p   (%4ld kB)\n",
525
526                (void*)VMALLOC_START, (void*)VMALLOC_END,
527                (VMALLOC_END - VMALLOC_START) >> 20,
528
529                __va(0), high_memory,
530                ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
531
532                __init_begin, __init_end,
533                ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10,
534
535                _etext, _edata,
536                ((unsigned long)_edata - (unsigned long)_etext) >> 10,
537
538                _text, _etext,
539                ((unsigned long)_etext - (unsigned long)_text) >> 10);
540 #endif
541 }
542
543 unsigned long *empty_zero_page __read_mostly;
544 EXPORT_SYMBOL(empty_zero_page);
545
546 void show_mem(void)
547 {
548         int i,free = 0,total = 0,reserved = 0;
549         int shared = 0, cached = 0;
550
551         printk(KERN_INFO "Mem-info:\n");
552         show_free_areas();
553 #ifndef CONFIG_DISCONTIGMEM
554         i = max_mapnr;
555         while (i-- > 0) {
556                 total++;
557                 if (PageReserved(mem_map+i))
558                         reserved++;
559                 else if (PageSwapCache(mem_map+i))
560                         cached++;
561                 else if (!page_count(&mem_map[i]))
562                         free++;
563                 else
564                         shared += page_count(&mem_map[i]) - 1;
565         }
566 #else
567         for (i = 0; i < npmem_ranges; i++) {
568                 int j;
569
570                 for (j = node_start_pfn(i); j < node_end_pfn(i); j++) {
571                         struct page *p;
572                         unsigned long flags;
573
574                         pgdat_resize_lock(NODE_DATA(i), &flags);
575                         p = nid_page_nr(i, j) - node_start_pfn(i);
576
577                         total++;
578                         if (PageReserved(p))
579                                 reserved++;
580                         else if (PageSwapCache(p))
581                                 cached++;
582                         else if (!page_count(p))
583                                 free++;
584                         else
585                                 shared += page_count(p) - 1;
586                         pgdat_resize_unlock(NODE_DATA(i), &flags);
587                 }
588         }
589 #endif
590         printk(KERN_INFO "%d pages of RAM\n", total);
591         printk(KERN_INFO "%d reserved pages\n", reserved);
592         printk(KERN_INFO "%d pages shared\n", shared);
593         printk(KERN_INFO "%d pages swap cached\n", cached);
594
595
596 #ifdef CONFIG_DISCONTIGMEM
597         {
598                 struct zonelist *zl;
599                 int i, j;
600
601                 for (i = 0; i < npmem_ranges; i++) {
602                         zl = node_zonelist(i, 0);
603                         for (j = 0; j < MAX_NR_ZONES; j++) {
604                                 struct zoneref *z;
605                                 struct zone *zone;
606
607                                 printk("Zone list for zone %d on node %d: ", j, i);
608                                 for_each_zone_zonelist(zone, z, zl, j)
609                                         printk("[%d/%s] ", zone_to_nid(zone),
610                                                                 zone->name);
611                                 printk("\n");
612                         }
613                 }
614         }
615 #endif
616 }
617
618
619 static void __init map_pages(unsigned long start_vaddr, unsigned long start_paddr, unsigned long size, pgprot_t pgprot)
620 {
621         pgd_t *pg_dir;
622         pmd_t *pmd;
623         pte_t *pg_table;
624         unsigned long end_paddr;
625         unsigned long start_pmd;
626         unsigned long start_pte;
627         unsigned long tmp1;
628         unsigned long tmp2;
629         unsigned long address;
630         unsigned long ro_start;
631         unsigned long ro_end;
632         unsigned long fv_addr;
633         unsigned long gw_addr;
634         extern const unsigned long fault_vector_20;
635         extern void * const linux_gateway_page;
636
637         ro_start = __pa((unsigned long)_text);
638         ro_end   = __pa((unsigned long)&data_start);
639         fv_addr  = __pa((unsigned long)&fault_vector_20) & PAGE_MASK;
640         gw_addr  = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK;
641
642         end_paddr = start_paddr + size;
643
644         pg_dir = pgd_offset_k(start_vaddr);
645
646 #if PTRS_PER_PMD == 1
647         start_pmd = 0;
648 #else
649         start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
650 #endif
651         start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
652
653         address = start_paddr;
654         while (address < end_paddr) {
655 #if PTRS_PER_PMD == 1
656                 pmd = (pmd_t *)__pa(pg_dir);
657 #else
658                 pmd = (pmd_t *)pgd_address(*pg_dir);
659
660                 /*
661                  * pmd is physical at this point
662                  */
663
664                 if (!pmd) {
665                         pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE << PMD_ORDER);
666                         pmd = (pmd_t *) __pa(pmd);
667                 }
668
669                 pgd_populate(NULL, pg_dir, __va(pmd));
670 #endif
671                 pg_dir++;
672
673                 /* now change pmd to kernel virtual addresses */
674
675                 pmd = (pmd_t *)__va(pmd) + start_pmd;
676                 for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++,pmd++) {
677
678                         /*
679                          * pg_table is physical at this point
680                          */
681
682                         pg_table = (pte_t *)pmd_address(*pmd);
683                         if (!pg_table) {
684                                 pg_table = (pte_t *)
685                                         alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE);
686                                 pg_table = (pte_t *) __pa(pg_table);
687                         }
688
689                         pmd_populate_kernel(NULL, pmd, __va(pg_table));
690
691                         /* now change pg_table to kernel virtual addresses */
692
693                         pg_table = (pte_t *) __va(pg_table) + start_pte;
694                         for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++,pg_table++) {
695                                 pte_t pte;
696
697                                 /*
698                                  * Map the fault vector writable so we can
699                                  * write the HPMC checksum.
700                                  */
701 #if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
702                                 if (address >= ro_start && address < ro_end
703                                                         && address != fv_addr
704                                                         && address != gw_addr)
705                                     pte = __mk_pte(address, PAGE_KERNEL_RO);
706                                 else
707 #endif
708                                     pte = __mk_pte(address, pgprot);
709
710                                 if (address >= end_paddr)
711                                         pte_val(pte) = 0;
712
713                                 set_pte(pg_table, pte);
714
715                                 address += PAGE_SIZE;
716                         }
717                         start_pte = 0;
718
719                         if (address >= end_paddr)
720                             break;
721                 }
722                 start_pmd = 0;
723         }
724 }
725
726 /*
727  * pagetable_init() sets up the page tables
728  *
729  * Note that gateway_init() places the Linux gateway page at page 0.
730  * Since gateway pages cannot be dereferenced this has the desirable
731  * side effect of trapping those pesky NULL-reference errors in the
732  * kernel.
733  */
734 static void __init pagetable_init(void)
735 {
736         int range;
737
738         /* Map each physical memory range to its kernel vaddr */
739
740         for (range = 0; range < npmem_ranges; range++) {
741                 unsigned long start_paddr;
742                 unsigned long end_paddr;
743                 unsigned long size;
744
745                 start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
746                 end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT);
747                 size = pmem_ranges[range].pages << PAGE_SHIFT;
748
749                 map_pages((unsigned long)__va(start_paddr), start_paddr,
750                         size, PAGE_KERNEL);
751         }
752
753 #ifdef CONFIG_BLK_DEV_INITRD
754         if (initrd_end && initrd_end > mem_limit) {
755                 printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
756                 map_pages(initrd_start, __pa(initrd_start),
757                         initrd_end - initrd_start, PAGE_KERNEL);
758         }
759 #endif
760
761         empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
762         memset(empty_zero_page, 0, PAGE_SIZE);
763 }
764
765 static void __init gateway_init(void)
766 {
767         unsigned long linux_gateway_page_addr;
768         /* FIXME: This is 'const' in order to trick the compiler
769            into not treating it as DP-relative data. */
770         extern void * const linux_gateway_page;
771
772         linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK;
773
774         /*
775          * Setup Linux Gateway page.
776          *
777          * The Linux gateway page will reside in kernel space (on virtual
778          * page 0), so it doesn't need to be aliased into user space.
779          */
780
781         map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page),
782                 PAGE_SIZE, PAGE_GATEWAY);
783 }
784
785 #ifdef CONFIG_HPUX
786 void
787 map_hpux_gateway_page(struct task_struct *tsk, struct mm_struct *mm)
788 {
789         pgd_t *pg_dir;
790         pmd_t *pmd;
791         pte_t *pg_table;
792         unsigned long start_pmd;
793         unsigned long start_pte;
794         unsigned long address;
795         unsigned long hpux_gw_page_addr;
796         /* FIXME: This is 'const' in order to trick the compiler
797            into not treating it as DP-relative data. */
798         extern void * const hpux_gateway_page;
799
800         hpux_gw_page_addr = HPUX_GATEWAY_ADDR & PAGE_MASK;
801
802         /*
803          * Setup HP-UX Gateway page.
804          *
805          * The HP-UX gateway page resides in the user address space,
806          * so it needs to be aliased into each process.
807          */
808
809         pg_dir = pgd_offset(mm,hpux_gw_page_addr);
810
811 #if PTRS_PER_PMD == 1
812         start_pmd = 0;
813 #else
814         start_pmd = ((hpux_gw_page_addr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
815 #endif
816         start_pte = ((hpux_gw_page_addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
817
818         address = __pa(&hpux_gateway_page);
819 #if PTRS_PER_PMD == 1
820         pmd = (pmd_t *)__pa(pg_dir);
821 #else
822         pmd = (pmd_t *) pgd_address(*pg_dir);
823
824         /*
825          * pmd is physical at this point
826          */
827
828         if (!pmd) {
829                 pmd = (pmd_t *) get_zeroed_page(GFP_KERNEL);
830                 pmd = (pmd_t *) __pa(pmd);
831         }
832
833         __pgd_val_set(*pg_dir, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pmd);
834 #endif
835         /* now change pmd to kernel virtual addresses */
836
837         pmd = (pmd_t *)__va(pmd) + start_pmd;
838
839         /*
840          * pg_table is physical at this point
841          */
842
843         pg_table = (pte_t *) pmd_address(*pmd);
844         if (!pg_table)
845                 pg_table = (pte_t *) __pa(get_zeroed_page(GFP_KERNEL));
846
847         __pmd_val_set(*pmd, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pg_table);
848
849         /* now change pg_table to kernel virtual addresses */
850
851         pg_table = (pte_t *) __va(pg_table) + start_pte;
852         set_pte(pg_table, __mk_pte(address, PAGE_GATEWAY));
853 }
854 EXPORT_SYMBOL(map_hpux_gateway_page);
855 #endif
856
857 void __init paging_init(void)
858 {
859         int i;
860
861         setup_bootmem();
862         pagetable_init();
863         gateway_init();
864         flush_cache_all_local(); /* start with known state */
865         flush_tlb_all_local(NULL);
866
867         for (i = 0; i < npmem_ranges; i++) {
868                 unsigned long zones_size[MAX_NR_ZONES] = { 0, };
869
870                 zones_size[ZONE_NORMAL] = pmem_ranges[i].pages;
871
872 #ifdef CONFIG_DISCONTIGMEM
873                 /* Need to initialize the pfnnid_map before we can initialize
874                    the zone */
875                 {
876                     int j;
877                     for (j = (pmem_ranges[i].start_pfn >> PFNNID_SHIFT);
878                          j <= ((pmem_ranges[i].start_pfn + pmem_ranges[i].pages) >> PFNNID_SHIFT);
879                          j++) {
880                         pfnnid_map[j] = i;
881                     }
882                 }
883 #endif
884
885                 free_area_init_node(i, zones_size,
886                                 pmem_ranges[i].start_pfn, NULL);
887         }
888 }
889
890 #ifdef CONFIG_PA20
891
892 /*
893  * Currently, all PA20 chips have 18 bit protection IDs, which is the
894  * limiting factor (space ids are 32 bits).
895  */
896
897 #define NR_SPACE_IDS 262144
898
899 #else
900
901 /*
902  * Currently we have a one-to-one relationship between space IDs and
903  * protection IDs. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only
904  * support 15 bit protection IDs, so that is the limiting factor.
905  * PCXT' has 18 bit protection IDs, but only 16 bit spaceids, so it's
906  * probably not worth the effort for a special case here.
907  */
908
909 #define NR_SPACE_IDS 32768
910
911 #endif  /* !CONFIG_PA20 */
912
913 #define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)
914 #define SID_ARRAY_SIZE  (NR_SPACE_IDS / (8 * sizeof(long)))
915
916 static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */
917 static unsigned long dirty_space_id[SID_ARRAY_SIZE];
918 static unsigned long space_id_index;
919 static unsigned long free_space_ids = NR_SPACE_IDS - 1;
920 static unsigned long dirty_space_ids = 0;
921
922 static DEFINE_SPINLOCK(sid_lock);
923
924 unsigned long alloc_sid(void)
925 {
926         unsigned long index;
927
928         spin_lock(&sid_lock);
929
930         if (free_space_ids == 0) {
931                 if (dirty_space_ids != 0) {
932                         spin_unlock(&sid_lock);
933                         flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
934                         spin_lock(&sid_lock);
935                 }
936                 BUG_ON(free_space_ids == 0);
937         }
938
939         free_space_ids--;
940
941         index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index);
942         space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1)));
943         space_id_index = index;
944
945         spin_unlock(&sid_lock);
946
947         return index << SPACEID_SHIFT;
948 }
949
950 void free_sid(unsigned long spaceid)
951 {
952         unsigned long index = spaceid >> SPACEID_SHIFT;
953         unsigned long *dirty_space_offset;
954
955         dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG);
956         index &= (BITS_PER_LONG - 1);
957
958         spin_lock(&sid_lock);
959
960         BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */
961
962         *dirty_space_offset |= (1L << index);
963         dirty_space_ids++;
964
965         spin_unlock(&sid_lock);
966 }
967
968
969 #ifdef CONFIG_SMP
970 static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array)
971 {
972         int i;
973
974         /* NOTE: sid_lock must be held upon entry */
975
976         *ndirtyptr = dirty_space_ids;
977         if (dirty_space_ids != 0) {
978             for (i = 0; i < SID_ARRAY_SIZE; i++) {
979                 dirty_array[i] = dirty_space_id[i];
980                 dirty_space_id[i] = 0;
981             }
982             dirty_space_ids = 0;
983         }
984
985         return;
986 }
987
988 static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array)
989 {
990         int i;
991
992         /* NOTE: sid_lock must be held upon entry */
993
994         if (ndirty != 0) {
995                 for (i = 0; i < SID_ARRAY_SIZE; i++) {
996                         space_id[i] ^= dirty_array[i];
997                 }
998
999                 free_space_ids += ndirty;
1000                 space_id_index = 0;
1001         }
1002 }
1003
1004 #else /* CONFIG_SMP */
1005
1006 static void recycle_sids(void)
1007 {
1008         int i;
1009
1010         /* NOTE: sid_lock must be held upon entry */
1011
1012         if (dirty_space_ids != 0) {
1013                 for (i = 0; i < SID_ARRAY_SIZE; i++) {
1014                         space_id[i] ^= dirty_space_id[i];
1015                         dirty_space_id[i] = 0;
1016                 }
1017
1018                 free_space_ids += dirty_space_ids;
1019                 dirty_space_ids = 0;
1020                 space_id_index = 0;
1021         }
1022 }
1023 #endif
1024
1025 /*
1026  * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is
1027  * purged, we can safely reuse the space ids that were released but
1028  * not flushed from the tlb.
1029  */
1030
1031 #ifdef CONFIG_SMP
1032
1033 static unsigned long recycle_ndirty;
1034 static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];
1035 static unsigned int recycle_inuse;
1036
1037 void flush_tlb_all(void)
1038 {
1039         int do_recycle;
1040
1041         do_recycle = 0;
1042         spin_lock(&sid_lock);
1043         if (dirty_space_ids > RECYCLE_THRESHOLD) {
1044             BUG_ON(recycle_inuse);  /* FIXME: Use a semaphore/wait queue here */
1045             get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
1046             recycle_inuse++;
1047             do_recycle++;
1048         }
1049         spin_unlock(&sid_lock);
1050         on_each_cpu(flush_tlb_all_local, NULL, 1);
1051         if (do_recycle) {
1052             spin_lock(&sid_lock);
1053             recycle_sids(recycle_ndirty,recycle_dirty_array);
1054             recycle_inuse = 0;
1055             spin_unlock(&sid_lock);
1056         }
1057 }
1058 #else
1059 void flush_tlb_all(void)
1060 {
1061         spin_lock(&sid_lock);
1062         flush_tlb_all_local(NULL);
1063         recycle_sids();
1064         spin_unlock(&sid_lock);
1065 }
1066 #endif
1067
1068 #ifdef CONFIG_BLK_DEV_INITRD
1069 void free_initrd_mem(unsigned long start, unsigned long end)
1070 {
1071         if (start >= end)
1072                 return;
1073         printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
1074         for (; start < end; start += PAGE_SIZE) {
1075                 ClearPageReserved(virt_to_page(start));
1076                 init_page_count(virt_to_page(start));
1077                 free_page(start);
1078                 num_physpages++;
1079                 totalram_pages++;
1080         }
1081 }
1082 #endif