memblock: Fix include breakages caused by 24aa07882b
[linux-2.6.git] / arch / arm / kernel / setup.c
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/kexec.h>
23 #include <linux/of_fdt.h>
24 #include <linux/crash_dump.h>
25 #include <linux/root_dev.h>
26 #include <linux/cpu.h>
27 #include <linux/interrupt.h>
28 #include <linux/smp.h>
29 #include <linux/fs.h>
30 #include <linux/proc_fs.h>
31 #include <linux/memblock.h>
32 #include <linux/bug.h>
33 #include <linux/compiler.h>
34
35 #include <asm/unified.h>
36 #include <asm/cpu.h>
37 #include <asm/cputype.h>
38 #include <asm/elf.h>
39 #include <asm/procinfo.h>
40 #include <asm/sections.h>
41 #include <asm/setup.h>
42 #include <asm/smp_plat.h>
43 #include <asm/mach-types.h>
44 #include <asm/cacheflush.h>
45 #include <asm/cachetype.h>
46 #include <asm/tlbflush.h>
47 #include <asm/system.h>
48
49 #include <asm/prom.h>
50 #include <asm/mach/arch.h>
51 #include <asm/mach/irq.h>
52 #include <asm/mach/time.h>
53 #include <asm/traps.h>
54 #include <asm/unwind.h>
55 #include <asm/memblock.h>
56
57 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
58 #include "compat.h"
59 #endif
60 #include "atags.h"
61 #include "tcm.h"
62
63 #ifndef MEM_SIZE
64 #define MEM_SIZE        (16*1024*1024)
65 #endif
66
67 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
68 char fpe_type[8];
69
70 static int __init fpe_setup(char *line)
71 {
72         memcpy(fpe_type, line, 8);
73         return 1;
74 }
75
76 __setup("fpe=", fpe_setup);
77 #endif
78
79 extern void paging_init(struct machine_desc *desc);
80 extern void sanity_check_meminfo(void);
81 extern void reboot_setup(char *str);
82
83 unsigned int processor_id;
84 EXPORT_SYMBOL(processor_id);
85 unsigned int __machine_arch_type __read_mostly;
86 EXPORT_SYMBOL(__machine_arch_type);
87 unsigned int cacheid __read_mostly;
88 EXPORT_SYMBOL(cacheid);
89
90 unsigned int __atags_pointer __initdata;
91
92 unsigned int system_rev;
93 EXPORT_SYMBOL(system_rev);
94
95 unsigned int system_serial_low;
96 EXPORT_SYMBOL(system_serial_low);
97
98 unsigned int system_serial_high;
99 EXPORT_SYMBOL(system_serial_high);
100
101 unsigned int elf_hwcap __read_mostly;
102 EXPORT_SYMBOL(elf_hwcap);
103
104
105 #ifdef MULTI_CPU
106 struct processor processor __read_mostly;
107 #endif
108 #ifdef MULTI_TLB
109 struct cpu_tlb_fns cpu_tlb __read_mostly;
110 #endif
111 #ifdef MULTI_USER
112 struct cpu_user_fns cpu_user __read_mostly;
113 #endif
114 #ifdef MULTI_CACHE
115 struct cpu_cache_fns cpu_cache __read_mostly;
116 #endif
117 #ifdef CONFIG_OUTER_CACHE
118 struct outer_cache_fns outer_cache __read_mostly;
119 EXPORT_SYMBOL(outer_cache);
120 #endif
121
122 /*
123  * Cached cpu_architecture() result for use by assembler code.
124  * C code should use the cpu_architecture() function instead of accessing this
125  * variable directly.
126  */
127 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
128
129 struct stack {
130         u32 irq[3];
131         u32 abt[3];
132         u32 und[3];
133 } ____cacheline_aligned;
134
135 static struct stack stacks[NR_CPUS];
136
137 char elf_platform[ELF_PLATFORM_SIZE];
138 EXPORT_SYMBOL(elf_platform);
139
140 static const char *cpu_name;
141 static const char *machine_name;
142 static char __initdata cmd_line[COMMAND_LINE_SIZE];
143 struct machine_desc *machine_desc __initdata;
144
145 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
146 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
147 #define ENDIANNESS ((char)endian_test.l)
148
149 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
150
151 /*
152  * Standard memory resources
153  */
154 static struct resource mem_res[] = {
155         {
156                 .name = "Video RAM",
157                 .start = 0,
158                 .end = 0,
159                 .flags = IORESOURCE_MEM
160         },
161         {
162                 .name = "Kernel text",
163                 .start = 0,
164                 .end = 0,
165                 .flags = IORESOURCE_MEM
166         },
167         {
168                 .name = "Kernel data",
169                 .start = 0,
170                 .end = 0,
171                 .flags = IORESOURCE_MEM
172         }
173 };
174
175 #define video_ram   mem_res[0]
176 #define kernel_code mem_res[1]
177 #define kernel_data mem_res[2]
178
179 static struct resource io_res[] = {
180         {
181                 .name = "reserved",
182                 .start = 0x3bc,
183                 .end = 0x3be,
184                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
185         },
186         {
187                 .name = "reserved",
188                 .start = 0x378,
189                 .end = 0x37f,
190                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
191         },
192         {
193                 .name = "reserved",
194                 .start = 0x278,
195                 .end = 0x27f,
196                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
197         }
198 };
199
200 #define lp0 io_res[0]
201 #define lp1 io_res[1]
202 #define lp2 io_res[2]
203
204 static const char *proc_arch[] = {
205         "undefined/unknown",
206         "3",
207         "4",
208         "4T",
209         "5",
210         "5T",
211         "5TE",
212         "5TEJ",
213         "6TEJ",
214         "7",
215         "?(11)",
216         "?(12)",
217         "?(13)",
218         "?(14)",
219         "?(15)",
220         "?(16)",
221         "?(17)",
222 };
223
224 static int __get_cpu_architecture(void)
225 {
226         int cpu_arch;
227
228         if ((read_cpuid_id() & 0x0008f000) == 0) {
229                 cpu_arch = CPU_ARCH_UNKNOWN;
230         } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
231                 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
232         } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
233                 cpu_arch = (read_cpuid_id() >> 16) & 7;
234                 if (cpu_arch)
235                         cpu_arch += CPU_ARCH_ARMv3;
236         } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
237                 unsigned int mmfr0;
238
239                 /* Revised CPUID format. Read the Memory Model Feature
240                  * Register 0 and check for VMSAv7 or PMSAv7 */
241                 asm("mrc        p15, 0, %0, c0, c1, 4"
242                     : "=r" (mmfr0));
243                 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
244                     (mmfr0 & 0x000000f0) >= 0x00000030)
245                         cpu_arch = CPU_ARCH_ARMv7;
246                 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
247                          (mmfr0 & 0x000000f0) == 0x00000020)
248                         cpu_arch = CPU_ARCH_ARMv6;
249                 else
250                         cpu_arch = CPU_ARCH_UNKNOWN;
251         } else
252                 cpu_arch = CPU_ARCH_UNKNOWN;
253
254         return cpu_arch;
255 }
256
257 int __pure cpu_architecture(void)
258 {
259         BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
260
261         return __cpu_architecture;
262 }
263
264 static int cpu_has_aliasing_icache(unsigned int arch)
265 {
266         int aliasing_icache;
267         unsigned int id_reg, num_sets, line_size;
268
269         /* PIPT caches never alias. */
270         if (icache_is_pipt())
271                 return 0;
272
273         /* arch specifies the register format */
274         switch (arch) {
275         case CPU_ARCH_ARMv7:
276                 asm("mcr        p15, 2, %0, c0, c0, 0 @ set CSSELR"
277                     : /* No output operands */
278                     : "r" (1));
279                 isb();
280                 asm("mrc        p15, 1, %0, c0, c0, 0 @ read CCSIDR"
281                     : "=r" (id_reg));
282                 line_size = 4 << ((id_reg & 0x7) + 2);
283                 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
284                 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
285                 break;
286         case CPU_ARCH_ARMv6:
287                 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
288                 break;
289         default:
290                 /* I-cache aliases will be handled by D-cache aliasing code */
291                 aliasing_icache = 0;
292         }
293
294         return aliasing_icache;
295 }
296
297 static void __init cacheid_init(void)
298 {
299         unsigned int cachetype = read_cpuid_cachetype();
300         unsigned int arch = cpu_architecture();
301
302         if (arch >= CPU_ARCH_ARMv6) {
303                 if ((cachetype & (7 << 29)) == 4 << 29) {
304                         /* ARMv7 register format */
305                         arch = CPU_ARCH_ARMv7;
306                         cacheid = CACHEID_VIPT_NONALIASING;
307                         switch (cachetype & (3 << 14)) {
308                         case (1 << 14):
309                                 cacheid |= CACHEID_ASID_TAGGED;
310                                 break;
311                         case (3 << 14):
312                                 cacheid |= CACHEID_PIPT;
313                                 break;
314                         }
315                 } else {
316                         arch = CPU_ARCH_ARMv6;
317                         if (cachetype & (1 << 23))
318                                 cacheid = CACHEID_VIPT_ALIASING;
319                         else
320                                 cacheid = CACHEID_VIPT_NONALIASING;
321                 }
322                 if (cpu_has_aliasing_icache(arch))
323                         cacheid |= CACHEID_VIPT_I_ALIASING;
324         } else {
325                 cacheid = CACHEID_VIVT;
326         }
327
328         printk("CPU: %s data cache, %s instruction cache\n",
329                 cache_is_vivt() ? "VIVT" :
330                 cache_is_vipt_aliasing() ? "VIPT aliasing" :
331                 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
332                 cache_is_vivt() ? "VIVT" :
333                 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
334                 icache_is_vipt_aliasing() ? "VIPT aliasing" :
335                 icache_is_pipt() ? "PIPT" :
336                 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
337 }
338
339 /*
340  * These functions re-use the assembly code in head.S, which
341  * already provide the required functionality.
342  */
343 extern struct proc_info_list *lookup_processor_type(unsigned int);
344
345 void __init early_print(const char *str, ...)
346 {
347         extern void printascii(const char *);
348         char buf[256];
349         va_list ap;
350
351         va_start(ap, str);
352         vsnprintf(buf, sizeof(buf), str, ap);
353         va_end(ap);
354
355 #ifdef CONFIG_DEBUG_LL
356         printascii(buf);
357 #endif
358         printk("%s", buf);
359 }
360
361 static void __init feat_v6_fixup(void)
362 {
363         int id = read_cpuid_id();
364
365         if ((id & 0xff0f0000) != 0x41070000)
366                 return;
367
368         /*
369          * HWCAP_TLS is available only on 1136 r1p0 and later,
370          * see also kuser_get_tls_init.
371          */
372         if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
373                 elf_hwcap &= ~HWCAP_TLS;
374 }
375
376 /*
377  * cpu_init - initialise one CPU.
378  *
379  * cpu_init sets up the per-CPU stacks.
380  */
381 void cpu_init(void)
382 {
383         unsigned int cpu = smp_processor_id();
384         struct stack *stk = &stacks[cpu];
385
386         if (cpu >= NR_CPUS) {
387                 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
388                 BUG();
389         }
390
391         cpu_proc_init();
392
393         /*
394          * Define the placement constraint for the inline asm directive below.
395          * In Thumb-2, msr with an immediate value is not allowed.
396          */
397 #ifdef CONFIG_THUMB2_KERNEL
398 #define PLC     "r"
399 #else
400 #define PLC     "I"
401 #endif
402
403         /*
404          * setup stacks for re-entrant exception handlers
405          */
406         __asm__ (
407         "msr    cpsr_c, %1\n\t"
408         "add    r14, %0, %2\n\t"
409         "mov    sp, r14\n\t"
410         "msr    cpsr_c, %3\n\t"
411         "add    r14, %0, %4\n\t"
412         "mov    sp, r14\n\t"
413         "msr    cpsr_c, %5\n\t"
414         "add    r14, %0, %6\n\t"
415         "mov    sp, r14\n\t"
416         "msr    cpsr_c, %7"
417             :
418             : "r" (stk),
419               PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
420               "I" (offsetof(struct stack, irq[0])),
421               PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
422               "I" (offsetof(struct stack, abt[0])),
423               PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
424               "I" (offsetof(struct stack, und[0])),
425               PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
426             : "r14");
427 }
428
429 static void __init setup_processor(void)
430 {
431         struct proc_info_list *list;
432
433         /*
434          * locate processor in the list of supported processor
435          * types.  The linker builds this table for us from the
436          * entries in arch/arm/mm/proc-*.S
437          */
438         list = lookup_processor_type(read_cpuid_id());
439         if (!list) {
440                 printk("CPU configuration botched (ID %08x), unable "
441                        "to continue.\n", read_cpuid_id());
442                 while (1);
443         }
444
445         cpu_name = list->cpu_name;
446         __cpu_architecture = __get_cpu_architecture();
447
448 #ifdef MULTI_CPU
449         processor = *list->proc;
450 #endif
451 #ifdef MULTI_TLB
452         cpu_tlb = *list->tlb;
453 #endif
454 #ifdef MULTI_USER
455         cpu_user = *list->user;
456 #endif
457 #ifdef MULTI_CACHE
458         cpu_cache = *list->cache;
459 #endif
460
461         printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
462                cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
463                proc_arch[cpu_architecture()], cr_alignment);
464
465         snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
466                  list->arch_name, ENDIANNESS);
467         snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
468                  list->elf_name, ENDIANNESS);
469         elf_hwcap = list->elf_hwcap;
470 #ifndef CONFIG_ARM_THUMB
471         elf_hwcap &= ~HWCAP_THUMB;
472 #endif
473
474         feat_v6_fixup();
475
476         cacheid_init();
477         cpu_init();
478 }
479
480 void __init dump_machine_table(void)
481 {
482         struct machine_desc *p;
483
484         early_print("Available machine support:\n\nID (hex)\tNAME\n");
485         for_each_machine_desc(p)
486                 early_print("%08x\t%s\n", p->nr, p->name);
487
488         early_print("\nPlease check your kernel config and/or bootloader.\n");
489
490         while (true)
491                 /* can't use cpu_relax() here as it may require MMU setup */;
492 }
493
494 int __init arm_add_memory(phys_addr_t start, unsigned long size)
495 {
496         struct membank *bank = &meminfo.bank[meminfo.nr_banks];
497
498         if (meminfo.nr_banks >= NR_BANKS) {
499                 printk(KERN_CRIT "NR_BANKS too low, "
500                         "ignoring memory at 0x%08llx\n", (long long)start);
501                 return -EINVAL;
502         }
503
504         /*
505          * Ensure that start/size are aligned to a page boundary.
506          * Size is appropriately rounded down, start is rounded up.
507          */
508         size -= start & ~PAGE_MASK;
509         bank->start = PAGE_ALIGN(start);
510         bank->size  = size & PAGE_MASK;
511
512         /*
513          * Check whether this memory region has non-zero size or
514          * invalid node number.
515          */
516         if (bank->size == 0)
517                 return -EINVAL;
518
519         meminfo.nr_banks++;
520         return 0;
521 }
522
523 /*
524  * Pick out the memory size.  We look for mem=size@start,
525  * where start and size are "size[KkMm]"
526  */
527 static int __init early_mem(char *p)
528 {
529         static int usermem __initdata = 0;
530         unsigned long size;
531         phys_addr_t start;
532         char *endp;
533
534         /*
535          * If the user specifies memory size, we
536          * blow away any automatically generated
537          * size.
538          */
539         if (usermem == 0) {
540                 usermem = 1;
541                 meminfo.nr_banks = 0;
542         }
543
544         start = PHYS_OFFSET;
545         size  = memparse(p, &endp);
546         if (*endp == '@')
547                 start = memparse(endp + 1, NULL);
548
549         arm_add_memory(start, size);
550
551         return 0;
552 }
553 early_param("mem", early_mem);
554
555 static void __init
556 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
557 {
558 #ifdef CONFIG_BLK_DEV_RAM
559         extern int rd_size, rd_image_start, rd_prompt, rd_doload;
560
561         rd_image_start = image_start;
562         rd_prompt = prompt;
563         rd_doload = doload;
564
565         if (rd_sz)
566                 rd_size = rd_sz;
567 #endif
568 }
569
570 static void __init request_standard_resources(struct machine_desc *mdesc)
571 {
572         struct memblock_region *region;
573         struct resource *res;
574
575         kernel_code.start   = virt_to_phys(_text);
576         kernel_code.end     = virt_to_phys(_etext - 1);
577         kernel_data.start   = virt_to_phys(_sdata);
578         kernel_data.end     = virt_to_phys(_end - 1);
579
580         for_each_memblock(memory, region) {
581                 res = alloc_bootmem_low(sizeof(*res));
582                 res->name  = "System RAM";
583                 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
584                 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
585                 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
586
587                 request_resource(&iomem_resource, res);
588
589                 if (kernel_code.start >= res->start &&
590                     kernel_code.end <= res->end)
591                         request_resource(res, &kernel_code);
592                 if (kernel_data.start >= res->start &&
593                     kernel_data.end <= res->end)
594                         request_resource(res, &kernel_data);
595         }
596
597         if (mdesc->video_start) {
598                 video_ram.start = mdesc->video_start;
599                 video_ram.end   = mdesc->video_end;
600                 request_resource(&iomem_resource, &video_ram);
601         }
602
603         /*
604          * Some machines don't have the possibility of ever
605          * possessing lp0, lp1 or lp2
606          */
607         if (mdesc->reserve_lp0)
608                 request_resource(&ioport_resource, &lp0);
609         if (mdesc->reserve_lp1)
610                 request_resource(&ioport_resource, &lp1);
611         if (mdesc->reserve_lp2)
612                 request_resource(&ioport_resource, &lp2);
613 }
614
615 /*
616  *  Tag parsing.
617  *
618  * This is the new way of passing data to the kernel at boot time.  Rather
619  * than passing a fixed inflexible structure to the kernel, we pass a list
620  * of variable-sized tags to the kernel.  The first tag must be a ATAG_CORE
621  * tag for the list to be recognised (to distinguish the tagged list from
622  * a param_struct).  The list is terminated with a zero-length tag (this tag
623  * is not parsed in any way).
624  */
625 static int __init parse_tag_core(const struct tag *tag)
626 {
627         if (tag->hdr.size > 2) {
628                 if ((tag->u.core.flags & 1) == 0)
629                         root_mountflags &= ~MS_RDONLY;
630                 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
631         }
632         return 0;
633 }
634
635 __tagtable(ATAG_CORE, parse_tag_core);
636
637 static int __init parse_tag_mem32(const struct tag *tag)
638 {
639         return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
640 }
641
642 __tagtable(ATAG_MEM, parse_tag_mem32);
643
644 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
645 struct screen_info screen_info = {
646  .orig_video_lines      = 30,
647  .orig_video_cols       = 80,
648  .orig_video_mode       = 0,
649  .orig_video_ega_bx     = 0,
650  .orig_video_isVGA      = 1,
651  .orig_video_points     = 8
652 };
653
654 static int __init parse_tag_videotext(const struct tag *tag)
655 {
656         screen_info.orig_x            = tag->u.videotext.x;
657         screen_info.orig_y            = tag->u.videotext.y;
658         screen_info.orig_video_page   = tag->u.videotext.video_page;
659         screen_info.orig_video_mode   = tag->u.videotext.video_mode;
660         screen_info.orig_video_cols   = tag->u.videotext.video_cols;
661         screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
662         screen_info.orig_video_lines  = tag->u.videotext.video_lines;
663         screen_info.orig_video_isVGA  = tag->u.videotext.video_isvga;
664         screen_info.orig_video_points = tag->u.videotext.video_points;
665         return 0;
666 }
667
668 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
669 #endif
670
671 static int __init parse_tag_ramdisk(const struct tag *tag)
672 {
673         setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
674                       (tag->u.ramdisk.flags & 2) == 0,
675                       tag->u.ramdisk.start, tag->u.ramdisk.size);
676         return 0;
677 }
678
679 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
680
681 static int __init parse_tag_serialnr(const struct tag *tag)
682 {
683         system_serial_low = tag->u.serialnr.low;
684         system_serial_high = tag->u.serialnr.high;
685         return 0;
686 }
687
688 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
689
690 static int __init parse_tag_revision(const struct tag *tag)
691 {
692         system_rev = tag->u.revision.rev;
693         return 0;
694 }
695
696 __tagtable(ATAG_REVISION, parse_tag_revision);
697
698 static int __init parse_tag_cmdline(const struct tag *tag)
699 {
700 #if defined(CONFIG_CMDLINE_EXTEND)
701         strlcat(default_command_line, " ", COMMAND_LINE_SIZE);
702         strlcat(default_command_line, tag->u.cmdline.cmdline,
703                 COMMAND_LINE_SIZE);
704 #elif defined(CONFIG_CMDLINE_FORCE)
705         pr_warning("Ignoring tag cmdline (using the default kernel command line)\n");
706 #else
707         strlcpy(default_command_line, tag->u.cmdline.cmdline,
708                 COMMAND_LINE_SIZE);
709 #endif
710         return 0;
711 }
712
713 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
714
715 /*
716  * Scan the tag table for this tag, and call its parse function.
717  * The tag table is built by the linker from all the __tagtable
718  * declarations.
719  */
720 static int __init parse_tag(const struct tag *tag)
721 {
722         extern struct tagtable __tagtable_begin, __tagtable_end;
723         struct tagtable *t;
724
725         for (t = &__tagtable_begin; t < &__tagtable_end; t++)
726                 if (tag->hdr.tag == t->tag) {
727                         t->parse(tag);
728                         break;
729                 }
730
731         return t < &__tagtable_end;
732 }
733
734 /*
735  * Parse all tags in the list, checking both the global and architecture
736  * specific tag tables.
737  */
738 static void __init parse_tags(const struct tag *t)
739 {
740         for (; t->hdr.size; t = tag_next(t))
741                 if (!parse_tag(t))
742                         printk(KERN_WARNING
743                                 "Ignoring unrecognised tag 0x%08x\n",
744                                 t->hdr.tag);
745 }
746
747 /*
748  * This holds our defaults.
749  */
750 static struct init_tags {
751         struct tag_header hdr1;
752         struct tag_core   core;
753         struct tag_header hdr2;
754         struct tag_mem32  mem;
755         struct tag_header hdr3;
756 } init_tags __initdata = {
757         { tag_size(tag_core), ATAG_CORE },
758         { 1, PAGE_SIZE, 0xff },
759         { tag_size(tag_mem32), ATAG_MEM },
760         { MEM_SIZE },
761         { 0, ATAG_NONE }
762 };
763
764 static int __init customize_machine(void)
765 {
766         /* customizes platform devices, or adds new ones */
767         if (machine_desc->init_machine)
768                 machine_desc->init_machine();
769         return 0;
770 }
771 arch_initcall(customize_machine);
772
773 #ifdef CONFIG_KEXEC
774 static inline unsigned long long get_total_mem(void)
775 {
776         unsigned long total;
777
778         total = max_low_pfn - min_low_pfn;
779         return total << PAGE_SHIFT;
780 }
781
782 /**
783  * reserve_crashkernel() - reserves memory are for crash kernel
784  *
785  * This function reserves memory area given in "crashkernel=" kernel command
786  * line parameter. The memory reserved is used by a dump capture kernel when
787  * primary kernel is crashing.
788  */
789 static void __init reserve_crashkernel(void)
790 {
791         unsigned long long crash_size, crash_base;
792         unsigned long long total_mem;
793         int ret;
794
795         total_mem = get_total_mem();
796         ret = parse_crashkernel(boot_command_line, total_mem,
797                                 &crash_size, &crash_base);
798         if (ret)
799                 return;
800
801         ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
802         if (ret < 0) {
803                 printk(KERN_WARNING "crashkernel reservation failed - "
804                        "memory is in use (0x%lx)\n", (unsigned long)crash_base);
805                 return;
806         }
807
808         printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
809                "for crashkernel (System RAM: %ldMB)\n",
810                (unsigned long)(crash_size >> 20),
811                (unsigned long)(crash_base >> 20),
812                (unsigned long)(total_mem >> 20));
813
814         crashk_res.start = crash_base;
815         crashk_res.end = crash_base + crash_size - 1;
816         insert_resource(&iomem_resource, &crashk_res);
817 }
818 #else
819 static inline void reserve_crashkernel(void) {}
820 #endif /* CONFIG_KEXEC */
821
822 static void __init squash_mem_tags(struct tag *tag)
823 {
824         for (; tag->hdr.size; tag = tag_next(tag))
825                 if (tag->hdr.tag == ATAG_MEM)
826                         tag->hdr.tag = ATAG_NONE;
827 }
828
829 static struct machine_desc * __init setup_machine_tags(unsigned int nr)
830 {
831         struct tag *tags = (struct tag *)&init_tags;
832         struct machine_desc *mdesc = NULL, *p;
833         char *from = default_command_line;
834
835         init_tags.mem.start = PHYS_OFFSET;
836
837         /*
838          * locate machine in the list of supported machines.
839          */
840         for_each_machine_desc(p)
841                 if (nr == p->nr) {
842                         printk("Machine: %s\n", p->name);
843                         mdesc = p;
844                         break;
845                 }
846
847         if (!mdesc) {
848                 early_print("\nError: unrecognized/unsupported machine ID"
849                         " (r1 = 0x%08x).\n\n", nr);
850                 dump_machine_table(); /* does not return */
851         }
852
853         if (__atags_pointer)
854                 tags = phys_to_virt(__atags_pointer);
855         else if (mdesc->atag_offset)
856                 tags = (void *)(PAGE_OFFSET + mdesc->atag_offset);
857
858 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
859         /*
860          * If we have the old style parameters, convert them to
861          * a tag list.
862          */
863         if (tags->hdr.tag != ATAG_CORE)
864                 convert_to_tag_list(tags);
865 #endif
866
867         if (tags->hdr.tag != ATAG_CORE) {
868 #if defined(CONFIG_OF)
869                 /*
870                  * If CONFIG_OF is set, then assume this is a reasonably
871                  * modern system that should pass boot parameters
872                  */
873                 early_print("Warning: Neither atags nor dtb found\n");
874 #endif
875                 tags = (struct tag *)&init_tags;
876         }
877
878         if (mdesc->fixup)
879                 mdesc->fixup(tags, &from, &meminfo);
880
881         if (tags->hdr.tag == ATAG_CORE) {
882                 if (meminfo.nr_banks != 0)
883                         squash_mem_tags(tags);
884                 save_atags(tags);
885                 parse_tags(tags);
886         }
887
888         /* parse_early_param needs a boot_command_line */
889         strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
890
891         return mdesc;
892 }
893
894
895 void __init setup_arch(char **cmdline_p)
896 {
897         struct machine_desc *mdesc;
898
899         unwind_init();
900
901         setup_processor();
902         mdesc = setup_machine_fdt(__atags_pointer);
903         if (!mdesc)
904                 mdesc = setup_machine_tags(machine_arch_type);
905         machine_desc = mdesc;
906         machine_name = mdesc->name;
907
908         if (mdesc->soft_reboot)
909                 reboot_setup("s");
910
911         init_mm.start_code = (unsigned long) _text;
912         init_mm.end_code   = (unsigned long) _etext;
913         init_mm.end_data   = (unsigned long) _edata;
914         init_mm.brk        = (unsigned long) _end;
915
916         /* populate cmd_line too for later use, preserving boot_command_line */
917         strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
918         *cmdline_p = cmd_line;
919
920         parse_early_param();
921
922         sanity_check_meminfo();
923         arm_memblock_init(&meminfo, mdesc);
924
925         paging_init(mdesc);
926         request_standard_resources(mdesc);
927
928         unflatten_device_tree();
929
930 #ifdef CONFIG_SMP
931         if (is_smp())
932                 smp_init_cpus();
933 #endif
934         reserve_crashkernel();
935
936         tcm_init();
937
938 #ifdef CONFIG_ZONE_DMA
939         if (mdesc->dma_zone_size) {
940                 extern unsigned long arm_dma_zone_size;
941                 arm_dma_zone_size = mdesc->dma_zone_size;
942         }
943 #endif
944 #ifdef CONFIG_MULTI_IRQ_HANDLER
945         handle_arch_irq = mdesc->handle_irq;
946 #endif
947
948 #ifdef CONFIG_VT
949 #if defined(CONFIG_VGA_CONSOLE)
950         conswitchp = &vga_con;
951 #elif defined(CONFIG_DUMMY_CONSOLE)
952         conswitchp = &dummy_con;
953 #endif
954 #endif
955         early_trap_init();
956
957         if (mdesc->init_early)
958                 mdesc->init_early();
959 }
960
961
962 static int __init topology_init(void)
963 {
964         int cpu;
965
966         for_each_possible_cpu(cpu) {
967                 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
968                 cpuinfo->cpu.hotpluggable = 1;
969                 register_cpu(&cpuinfo->cpu, cpu);
970         }
971
972         return 0;
973 }
974 subsys_initcall(topology_init);
975
976 #ifdef CONFIG_HAVE_PROC_CPU
977 static int __init proc_cpu_init(void)
978 {
979         struct proc_dir_entry *res;
980
981         res = proc_mkdir("cpu", NULL);
982         if (!res)
983                 return -ENOMEM;
984         return 0;
985 }
986 fs_initcall(proc_cpu_init);
987 #endif
988
989 static const char *hwcap_str[] = {
990         "swp",
991         "half",
992         "thumb",
993         "26bit",
994         "fastmult",
995         "fpa",
996         "vfp",
997         "edsp",
998         "java",
999         "iwmmxt",
1000         "crunch",
1001         "thumbee",
1002         "neon",
1003         "vfpv3",
1004         "vfpv3d16",
1005         "tls",
1006         "vfpv4",
1007         "idiva",
1008         "idivt",
1009         NULL
1010 };
1011
1012 static int c_show(struct seq_file *m, void *v)
1013 {
1014         int i;
1015
1016         seq_printf(m, "Processor\t: %s rev %d (%s)\n",
1017                    cpu_name, read_cpuid_id() & 15, elf_platform);
1018
1019 #if defined(CONFIG_SMP)
1020         for_each_online_cpu(i) {
1021                 /*
1022                  * glibc reads /proc/cpuinfo to determine the number of
1023                  * online processors, looking for lines beginning with
1024                  * "processor".  Give glibc what it expects.
1025                  */
1026                 seq_printf(m, "processor\t: %d\n", i);
1027                 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
1028                            per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1029                            (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1030         }
1031 #else /* CONFIG_SMP */
1032         seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1033                    loops_per_jiffy / (500000/HZ),
1034                    (loops_per_jiffy / (5000/HZ)) % 100);
1035 #endif
1036
1037         /* dump out the processor features */
1038         seq_puts(m, "Features\t: ");
1039
1040         for (i = 0; hwcap_str[i]; i++)
1041                 if (elf_hwcap & (1 << i))
1042                         seq_printf(m, "%s ", hwcap_str[i]);
1043
1044         seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
1045         seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
1046
1047         if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
1048                 /* pre-ARM7 */
1049                 seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
1050         } else {
1051                 if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
1052                         /* ARM7 */
1053                         seq_printf(m, "CPU variant\t: 0x%02x\n",
1054                                    (read_cpuid_id() >> 16) & 127);
1055                 } else {
1056                         /* post-ARM7 */
1057                         seq_printf(m, "CPU variant\t: 0x%x\n",
1058                                    (read_cpuid_id() >> 20) & 15);
1059                 }
1060                 seq_printf(m, "CPU part\t: 0x%03x\n",
1061                            (read_cpuid_id() >> 4) & 0xfff);
1062         }
1063         seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
1064
1065         seq_puts(m, "\n");
1066
1067         seq_printf(m, "Hardware\t: %s\n", machine_name);
1068         seq_printf(m, "Revision\t: %04x\n", system_rev);
1069         seq_printf(m, "Serial\t\t: %08x%08x\n",
1070                    system_serial_high, system_serial_low);
1071
1072         return 0;
1073 }
1074
1075 static void *c_start(struct seq_file *m, loff_t *pos)
1076 {
1077         return *pos < 1 ? (void *)1 : NULL;
1078 }
1079
1080 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1081 {
1082         ++*pos;
1083         return NULL;
1084 }
1085
1086 static void c_stop(struct seq_file *m, void *v)
1087 {
1088 }
1089
1090 const struct seq_operations cpuinfo_op = {
1091         .start  = c_start,
1092         .next   = c_next,
1093         .stop   = c_stop,
1094         .show   = c_show
1095 };