Merge remote-tracking branch 'origin/dev/sumit-linux-3.10.96' into TOT-merge
[linux-3.10.git] / arch / arm64 / kernel / setup.c
1 /*
2  * Based on arch/arm/kernel/setup.c
3  *
4  * Copyright (C) 1995-2001 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include <linux/export.h>
21 #include <linux/kernel.h>
22 #include <linux/stddef.h>
23 #include <linux/ioport.h>
24 #include <linux/delay.h>
25 #include <linux/utsname.h>
26 #include <linux/initrd.h>
27 #include <linux/console.h>
28 #include <linux/bootmem.h>
29 #include <linux/seq_file.h>
30 #include <linux/screen_info.h>
31 #include <linux/init.h>
32 #include <linux/kexec.h>
33 #include <linux/crash_dump.h>
34 #include <linux/root_dev.h>
35 #include <linux/clk-provider.h>
36 #include <linux/cpu.h>
37 #include <linux/interrupt.h>
38 #include <linux/smp.h>
39 #include <linux/fs.h>
40 #include <linux/proc_fs.h>
41 #include <linux/memblock.h>
42 #include <linux/of_fdt.h>
43 #include <linux/of_platform.h>
44 #include <linux/personality.h>
45
46 #include <asm/cpu.h>
47 #include <asm/cputype.h>
48 #include <asm/elf.h>
49 #include <asm/cputable.h>
50 #include <asm/cpufeature.h>
51 #include <asm/cpu_ops.h>
52 #include <asm/sections.h>
53 #include <asm/setup.h>
54 #include <asm/smp_plat.h>
55 #include <asm/cacheflush.h>
56 #include <asm/tlbflush.h>
57 #include <asm/traps.h>
58 #include <asm/memblock.h>
59 #include <asm/psci.h>
60
61 #include <asm/mach/arch.h>
62
63 unsigned int processor_id;
64 EXPORT_SYMBOL(processor_id);
65
66 unsigned long elf_hwcap __read_mostly;
67 EXPORT_SYMBOL_GPL(elf_hwcap);
68
69 #ifdef CONFIG_COMPAT
70 #define COMPAT_ELF_HWCAP_DEFAULT        \
71                                 (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
72                                  COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
73                                  COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
74                                  COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
75                                  COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV)
76 unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
77 unsigned int compat_elf_hwcap2 __read_mostly;
78 #endif
79
80 static const char *machine_name;
81
82 unsigned int system_rev;
83 EXPORT_SYMBOL(system_rev);
84
85 unsigned int system_serial_low;
86 EXPORT_SYMBOL(system_serial_low);
87
88 unsigned int system_serial_high;
89 EXPORT_SYMBOL(system_serial_high);
90
91 #ifdef CONFIG_ARM64_MACH_FRAMEWORK
92 struct machine_desc *machine_desc __initdata;
93 #endif
94 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
95
96 phys_addr_t __fdt_pointer __initdata;
97
98 /*
99  * Standard memory resources
100  */
101 static struct resource mem_res[] = {
102         {
103                 .name = "Kernel code",
104                 .start = 0,
105                 .end = 0,
106                 .flags = IORESOURCE_MEM
107         },
108         {
109                 .name = "Kernel data",
110                 .start = 0,
111                 .end = 0,
112                 .flags = IORESOURCE_MEM
113         }
114 };
115
116 #define kernel_code mem_res[0]
117 #define kernel_data mem_res[1]
118
119 void __init early_print(const char *str, ...)
120 {
121         char buf[256];
122         va_list ap;
123
124         va_start(ap, str);
125         vsnprintf(buf, sizeof(buf), str, ap);
126         va_end(ap);
127
128         pr_info("%s", buf);
129 }
130
131 struct mpidr_hash mpidr_hash;
132 #ifdef CONFIG_SMP
133 /**
134  * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
135  *                        level in order to build a linear index from an
136  *                        MPIDR value. Resulting algorithm is a collision
137  *                        free hash carried out through shifting and ORing
138  */
139 static void __init smp_build_mpidr_hash(void)
140 {
141         u32 i, affinity, fs[4], bits[4], ls;
142         u64 mask = 0;
143         /*
144          * Pre-scan the list of MPIDRS and filter out bits that do
145          * not contribute to affinity levels, ie they never toggle.
146          */
147         for_each_possible_cpu(i)
148                 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
149         pr_debug("mask of set bits %#llx\n", mask);
150         /*
151          * Find and stash the last and first bit set at all affinity levels to
152          * check how many bits are required to represent them.
153          */
154         for (i = 0; i < 4; i++) {
155                 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
156                 /*
157                  * Find the MSB bit and LSB bits position
158                  * to determine how many bits are required
159                  * to express the affinity level.
160                  */
161                 ls = fls(affinity);
162                 fs[i] = affinity ? ffs(affinity) - 1 : 0;
163                 bits[i] = ls - fs[i];
164         }
165         /*
166          * An index can be created from the MPIDR_EL1 by isolating the
167          * significant bits at each affinity level and by shifting
168          * them in order to compress the 32 bits values space to a
169          * compressed set of values. This is equivalent to hashing
170          * the MPIDR_EL1 through shifting and ORing. It is a collision free
171          * hash though not minimal since some levels might contain a number
172          * of CPUs that is not an exact power of 2 and their bit
173          * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
174          */
175         mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
176         mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
177         mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
178                                                 (bits[1] + bits[0]);
179         mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
180                                   fs[3] - (bits[2] + bits[1] + bits[0]);
181         mpidr_hash.mask = mask;
182         mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
183         pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
184                 mpidr_hash.shift_aff[0],
185                 mpidr_hash.shift_aff[1],
186                 mpidr_hash.shift_aff[2],
187                 mpidr_hash.shift_aff[3],
188                 mpidr_hash.mask,
189                 mpidr_hash.bits);
190         /*
191          * 4x is an arbitrary value used to warn on a hash table much bigger
192          * than expected on most systems.
193          */
194         if (mpidr_hash_size() > 4 * num_possible_cpus())
195                 pr_warn("Large number of MPIDR hash buckets detected\n");
196         __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
197 }
198 #endif
199 bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
200 {
201         return phys_id == cpu_logical_map(cpu);
202 }
203
204 static void __init setup_processor(void)
205 {
206         struct cpu_info *cpu_info;
207         u64 features, block;
208
209         cpu_info = lookup_processor_type(read_cpuid_id());
210         if (!cpu_info) {
211                 pr_info("CPU configuration botched (ID %08x), unable to continue.\n",
212                        read_cpuid_id());
213                 while (1);
214         }
215
216         pr_info("CPU: %s [%08x] revision %d\n",
217                cpu_info->cpu_name, read_cpuid_id(), read_cpuid_id() & 15);
218
219         sprintf(init_utsname()->machine, ELF_PLATFORM);
220         elf_hwcap = 0;
221
222         cpuinfo_store_boot_cpu();
223
224         /*
225          * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks.
226          * The blocks we test below represent incremental functionality
227          * for non-negative values. Negative values are reserved.
228          */
229         features = read_cpuid(ID_AA64ISAR0_EL1);
230         block = (features >> 4) & 0xf;
231         if (!(block & 0x8)) {
232                 switch (block) {
233                 default:
234                 case 2:
235                         elf_hwcap |= HWCAP_PMULL;
236                 case 1:
237                         elf_hwcap |= HWCAP_AES;
238                 case 0:
239                         break;
240                 }
241         }
242
243         block = (features >> 8) & 0xf;
244         if (block && !(block & 0x8))
245                 elf_hwcap |= HWCAP_SHA1;
246
247         block = (features >> 12) & 0xf;
248         if (block && !(block & 0x8))
249                 elf_hwcap |= HWCAP_SHA2;
250
251         block = (features >> 16) & 0xf;
252         if (block && !(block & 0x8))
253                 elf_hwcap |= HWCAP_CRC32;
254
255 #ifdef CONFIG_COMPAT
256         /*
257          * ID_ISAR5_EL1 carries similar information as above, but pertaining to
258          * the Aarch32 32-bit execution state.
259          */
260         features = read_cpuid(ID_ISAR5_EL1);
261         block = (features >> 4) & 0xf;
262         if (!(block & 0x8)) {
263                 switch (block) {
264                 default:
265                 case 2:
266                         compat_elf_hwcap2 |= COMPAT_HWCAP2_PMULL;
267                 case 1:
268                         compat_elf_hwcap2 |= COMPAT_HWCAP2_AES;
269                 case 0:
270                         break;
271                 }
272         }
273
274         block = (features >> 8) & 0xf;
275         if (block && !(block & 0x8))
276                 compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;
277
278         block = (features >> 12) & 0xf;
279         if (block && !(block & 0x8))
280                 compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;
281
282         block = (features >> 16) & 0xf;
283         if (block && !(block & 0x8))
284                 compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
285 #endif
286 }
287
288 #ifdef CONFIG_ARM64_MACH_FRAMEWORK
289 static struct machine_desc * __init setup_machine_fdt(phys_addr_t dt_phys)
290 #else
291 static void __init setup_machine_fdt(phys_addr_t dt_phys)
292 #endif
293 {
294         struct boot_param_header *devtree;
295         unsigned long dt_root;
296 #ifdef CONFIG_ARM64_MACH_FRAMEWORK
297         struct machine_desc *mdesc, *mdesc_best = NULL;
298         unsigned int score, mdesc_score = ~1;
299 #endif
300
301         cpuinfo_store_cpu();
302
303         /* Check we have a non-NULL DT pointer */
304         if (!dt_phys) {
305                 early_print("\n"
306                         "Error: NULL or invalid device tree blob\n"
307                         "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n"
308                         "\nPlease check your bootloader.\n");
309
310                 while (true)
311                         cpu_relax();
312
313         }
314
315         devtree = phys_to_virt(dt_phys);
316
317         /* Check device tree validity */
318         if (be32_to_cpu(devtree->magic) != OF_DT_HEADER) {
319                 early_print("\n"
320                         "Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n"
321                         "Expected 0x%x, found 0x%x\n"
322                         "\nPlease check your bootloader.\n",
323                         dt_phys, devtree, OF_DT_HEADER,
324                         be32_to_cpu(devtree->magic));
325
326                 while (true)
327                         cpu_relax();
328         }
329
330         initial_boot_params = devtree;
331         dt_root = of_get_flat_dt_root();
332
333 #ifdef CONFIG_ARM64_MACH_FRAMEWORK
334         for_each_machine_desc(mdesc) {
335                 score = of_flat_dt_match(dt_root, mdesc->dt_compat);
336                 if (score > 0 && score < mdesc_score) {
337                         mdesc_best = mdesc;
338                         mdesc_score = score;
339                 }
340         }
341         if (!mdesc_best) {
342                 const char *prop;
343                 long size;
344
345                 pr_info("\nError: unrecognized/unsupported "
346                             "device tree compatible list:\n[ ");
347
348                 prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
349                 while (size > 0) {
350                         pr_info("'%s' ", prop);
351                         size -= strlen(prop) + 1;
352                         prop += strlen(prop) + 1;
353                 }
354                 pr_info("]\n\n");
355
356                 while (true)
357                         /* can't use cpu_relax() here as it may require MMU setup */;
358         }
359 #endif
360
361         machine_name = of_get_flat_dt_prop(dt_root, "model", NULL);
362         if (!machine_name)
363                 machine_name = of_get_flat_dt_prop(dt_root, "compatible", NULL);
364         if (!machine_name)
365                 machine_name = "<unknown>";
366         pr_info("Machine: %s\n", machine_name);
367
368         /* Retrieve various information from the /chosen node */
369         of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
370         /* Initialize {size,address}-cells info */
371         of_scan_flat_dt(early_init_dt_scan_root, NULL);
372         /* Setup memory, calling early_init_dt_add_memory_arch */
373         of_scan_flat_dt(early_init_dt_scan_memory, NULL);
374
375 #ifdef CONFIG_ARM64_MACH_FRAMEWORK
376         return mdesc_best;
377 #endif
378 }
379
380 /*
381  * Limit the memory size that was specified via FDT.
382  */
383 static int __init early_mem(char *p)
384 {
385         phys_addr_t limit;
386
387         if (!p)
388                 return 1;
389
390         limit = memparse(p, &p) & PAGE_MASK;
391         pr_notice("Memory limited to %lldMB\n", limit >> 20);
392
393         memblock_enforce_memory_limit(limit);
394
395         return 0;
396 }
397 early_param("mem", early_mem);
398
399 static void __init request_standard_resources(void)
400 {
401         struct memblock_region *region;
402         struct resource *res;
403
404         kernel_code.start   = virt_to_phys(_text);
405         kernel_code.end     = virt_to_phys(_etext - 1);
406         kernel_data.start   = virt_to_phys(_sdata);
407         kernel_data.end     = virt_to_phys(_end - 1);
408
409         for_each_memblock(memory, region) {
410                 res = alloc_bootmem_low(sizeof(*res));
411                 res->name  = "System RAM";
412                 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
413                 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
414                 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
415
416                 request_resource(&iomem_resource, res);
417
418                 if (kernel_code.start >= res->start &&
419                     kernel_code.end <= res->end)
420                         request_resource(res, &kernel_code);
421                 if (kernel_data.start >= res->start &&
422                     kernel_data.end <= res->end)
423                         request_resource(res, &kernel_data);
424         }
425 }
426
427 u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
428
429 void __init setup_arch(char **cmdline_p)
430 {
431 #ifdef CONFIG_ARM64_MACH_FRAMEWORK
432         struct machine_desc *mdesc;
433 #endif
434
435         setup_processor();
436
437 #ifdef CONFIG_ARM64_MACH_FRAMEWORK
438         mdesc = setup_machine_fdt(__fdt_pointer);
439         machine_desc = mdesc;
440 #else
441         setup_machine_fdt(__fdt_pointer);
442 #endif
443
444         init_mm.start_code = (unsigned long) _text;
445         init_mm.end_code   = (unsigned long) _etext;
446         init_mm.end_data   = (unsigned long) _edata;
447         init_mm.brk        = (unsigned long) _end;
448
449         *cmdline_p = boot_command_line;
450
451         init_mem_pgprot();
452
453         parse_early_param();
454
455         arm64_memblock_init();
456
457         paging_init();
458         request_standard_resources();
459
460         unflatten_device_tree();
461
462         psci_init();
463
464         cpu_logical_map(0) = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
465         cpu_read_bootcpu_ops();
466 #ifdef CONFIG_SMP
467 #ifdef CONFIG_ARM64_MACH_FRAMEWORK
468         smp_set_ops(machine_desc->smp);
469 #endif
470         smp_init_cpus();
471         smp_build_mpidr_hash();
472 #endif
473
474 #ifdef CONFIG_VT
475 #if defined(CONFIG_VGA_CONSOLE)
476         conswitchp = &vga_con;
477 #elif defined(CONFIG_DUMMY_CONSOLE)
478         conswitchp = &dummy_con;
479 #endif
480 #endif
481
482 #ifdef CONFIG_ARM64_MACH_FRAMEWORK
483         if (machine_desc->init_early)
484                 machine_desc->init_early();
485 #endif
486 }
487
488 static int __init arm64_device_init(void)
489 {
490 #if defined(CONFIG_COMMON_CLK)
491         of_clk_init(NULL);
492 #endif
493 #ifdef CONFIG_ARM64_MACH_FRAMEWORK
494         if (!machine_desc->init_machine)
495 #endif
496                 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
497         return 0;
498 }
499 arch_initcall(arm64_device_init);
500
501 static int __init topology_init(void)
502 {
503         int i;
504
505         for_each_possible_cpu(i) {
506                 struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
507                 cpu->hotpluggable = 1;
508 #if !defined(CONFIG_HOTPLUG_CPU0)
509                 if (i == 0)
510                         cpu->hotpluggable = 0;
511 #endif
512
513                 register_cpu(cpu, i);
514         }
515
516         return 0;
517 }
518 subsys_initcall(topology_init);
519
520 static const char *hwcap_str[] = {
521         "fp",
522         "asimd",
523         "evtstrm",
524         "aes",
525         "pmull",
526         "sha1",
527         "sha2",
528         "crc32",
529         NULL
530 };
531
532 static void denver_show(struct seq_file *m)
533 {
534         u32 aidr;
535
536         asm volatile("mrs %0, AIDR_EL1" : "=r" (aidr) : );
537         seq_printf(m, "MTS version\t: %u\n", aidr);
538 }
539
540 #ifdef CONFIG_COMPAT
541 static const char *compat_hwcap_str[] = {
542         "swp",
543         "half",
544         "thumb",
545         "26bit",
546         "fastmult",
547         "fpa",
548         "vfp",
549         "edsp",
550         "java",
551         "iwmmxt",
552         "crunch",
553         "thumbee",
554         "neon",
555         "vfpv3",
556         "vfpv3d16",
557         "tls",
558         "vfpv4",
559         "idiva",
560         "idivt",
561         "vfpd32",
562         "lpae",
563         "evtstrm"
564 };
565 #endif /* CONFIG_COMPAT */
566
567 static int c_show(struct seq_file *m, void *v)
568 {
569         int i, j;
570
571         for_each_online_cpu(i) {
572                 struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
573                 u32 midr = cpuinfo->reg_midr;
574
575                 /*
576                  * glibc reads /proc/cpuinfo to determine the number of
577                  * online processors, looking for lines beginning with
578                  * "processor".  Give glibc what it expects.
579                  */
580 #ifdef CONFIG_SMP
581                 seq_printf(m, "processor\t: %d\n", i);
582 #endif
583                 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
584                            loops_per_jiffy / (500000UL/HZ),
585                            loops_per_jiffy / (5000UL/HZ) % 100);
586
587                 /*
588                  * Dump out the common processor features in a single line.
589                  * Userspace should read the hwcaps with getauxval(AT_HWCAP)
590                  * rather than attempting to parse this, but there's a body of
591                  * software which does already (at least for 32-bit).
592                  */
593                 seq_puts(m, "Features\t:");
594                 if (personality(current->personality) == PER_LINUX32) {
595 #ifdef CONFIG_COMPAT
596                         for (j = 0; compat_hwcap_str[j]; j++)
597                                 if (COMPAT_ELF_HWCAP & (1 << j))
598                                         seq_printf(m, " %s", compat_hwcap_str[j]);
599 #endif /* CONFIG_COMPAT */
600                 } else {
601                         for (j = 0; hwcap_str[j]; j++)
602                                 if (elf_hwcap & (1 << j))
603                                         seq_printf(m, " %s", hwcap_str[j]);
604                 }
605                 seq_puts(m, "\n");
606
607                 seq_printf(m, "CPU implementer\t: 0x%02x\n", (midr >> 24));
608                 seq_printf(m, "CPU architecture: 8\n");
609                 seq_printf(m, "CPU variant\t: 0x%x\n", ((midr >> 20) & 0xf));
610                 seq_printf(m, "CPU part\t: 0x%03x\n", ((midr >> 4) & 0xfff));
611                 seq_printf(m, "CPU revision\t: %d\n\n", (midr & 0xf));
612         }
613
614         seq_printf(m, "Hardware\t: %s\n", machine_name);
615         seq_printf(m, "Revision\t: %04x\n", system_rev);
616         seq_printf(m, "Serial\t\t: %08x%08x\n",
617                    system_serial_high, system_serial_low);
618
619         if ((read_cpuid_id() >> 24) == 'N')
620                 denver_show(m);
621         return 0;
622 }
623
624 static void *c_start(struct seq_file *m, loff_t *pos)
625 {
626         return *pos < 1 ? (void *)1 : NULL;
627 }
628
629 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
630 {
631         ++*pos;
632         return NULL;
633 }
634
635 static void c_stop(struct seq_file *m, void *v)
636 {
637 }
638
639 const struct seq_operations cpuinfo_op = {
640         .start  = c_start,
641         .next   = c_next,
642         .stop   = c_stop,
643         .show   = c_show
644 };
645
646 #ifdef CONFIG_ARM64_MACH_FRAMEWORK
647 static int __init customize_machine(void)
648 {
649         /* customizes platform devices, or adds new ones */
650         if (machine_desc->init_machine)
651                 machine_desc->init_machine();
652         return 0;
653 }
654 arch_initcall(customize_machine);
655
656 static int __init init_machine_late(void)
657 {
658         if (machine_desc->init_late)
659                 machine_desc->init_late();
660         return 0;
661 }
662 late_initcall(init_machine_late);
663 #endif