Remove obsolete #include <linux/config.h>
[linux-2.6.git] / arch / ppc / platforms / hdpu.c
1 /*
2  * Board setup routines for the Sky Computers HDPU Compute Blade.
3  *
4  * Written by Brian Waite <waite@skycomputers.com>
5  *
6  * Based on code done by - Mark A. Greer <mgreer@mvista.com>
7  *                         Rabeeh Khoury - rabeeh@galileo.co.il
8  *
9  * This program is free software; you can redistribute  it and/or modify it
10  * under  the terms of  the GNU General  Public License as published by the
11  * Free Software Foundation;  either version 2 of the  License, or (at your
12  * option) any later version.
13  */
14
15
16 #include <linux/pci.h>
17 #include <linux/delay.h>
18 #include <linux/irq.h>
19 #include <linux/ide.h>
20 #include <linux/seq_file.h>
21 #include <linux/platform_device.h>
22
23 #include <linux/initrd.h>
24 #include <linux/root_dev.h>
25 #include <linux/smp.h>
26
27 #include <asm/time.h>
28 #include <asm/machdep.h>
29 #include <asm/todc.h>
30 #include <asm/mv64x60.h>
31 #include <asm/ppcboot.h>
32 #include <platforms/hdpu.h>
33 #include <linux/mv643xx.h>
34 #include <linux/hdpu_features.h>
35 #include <linux/device.h>
36 #include <linux/mtd/physmap.h>
37
38 #define BOARD_VENDOR    "Sky Computers"
39 #define BOARD_MACHINE   "HDPU-CB-A"
40
41 bd_t ppcboot_bd;
42 int ppcboot_bd_valid = 0;
43
44 static mv64x60_handle_t bh;
45
46 extern char cmd_line[];
47
48 unsigned long hdpu_find_end_of_memory(void);
49 void hdpu_mpsc_progress(char *s, unsigned short hex);
50 void hdpu_heartbeat(void);
51
52 static void parse_bootinfo(unsigned long r3,
53                            unsigned long r4, unsigned long r5,
54                            unsigned long r6, unsigned long r7);
55 static void hdpu_set_l1pe(void);
56 static void hdpu_cpustate_set(unsigned char new_state);
57 #ifdef CONFIG_SMP
58 static DEFINE_SPINLOCK(timebase_lock);
59 static unsigned int timebase_upper = 0, timebase_lower = 0;
60 extern int smp_tb_synchronized;
61
62 void __devinit hdpu_tben_give(void);
63 void __devinit hdpu_tben_take(void);
64 #endif
65
66 static int __init
67 hdpu_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin)
68 {
69         struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
70
71         if (hose->index == 0) {
72                 static char pci_irq_table[][4] = {
73                         {HDPU_PCI_0_IRQ, 0, 0, 0},
74                         {HDPU_PCI_0_IRQ, 0, 0, 0},
75                 };
76
77                 const long min_idsel = 1, max_idsel = 2, irqs_per_slot = 4;
78                 return PCI_IRQ_TABLE_LOOKUP;
79         } else {
80                 static char pci_irq_table[][4] = {
81                         {HDPU_PCI_1_IRQ, 0, 0, 0},
82                 };
83
84                 const long min_idsel = 1, max_idsel = 1, irqs_per_slot = 4;
85                 return PCI_IRQ_TABLE_LOOKUP;
86         }
87 }
88
89 static void __init hdpu_intr_setup(void)
90 {
91         mv64x60_write(&bh, MV64x60_GPP_IO_CNTL,
92                       (1 | (1 << 2) | (1 << 3) | (1 << 4) | (1 << 5) |
93                        (1 << 6) | (1 << 7) | (1 << 12) | (1 << 16) |
94                        (1 << 18) | (1 << 19) | (1 << 20) | (1 << 21) |
95                        (1 << 22) | (1 << 23) | (1 << 24) | (1 << 25) |
96                        (1 << 26) | (1 << 27) | (1 << 28) | (1 << 29)));
97
98         /* XXXX Erranum FEr PCI-#8 */
99         mv64x60_clr_bits(&bh, MV64x60_PCI0_CMD, (1 << 5) | (1 << 9));
100         mv64x60_clr_bits(&bh, MV64x60_PCI1_CMD, (1 << 5) | (1 << 9));
101
102         /*
103          * Dismiss and then enable interrupt on GPP interrupt cause
104          * for CPU #0
105          */
106         mv64x60_write(&bh, MV64x60_GPP_INTR_CAUSE, ~((1 << 8) | (1 << 13)));
107         mv64x60_set_bits(&bh, MV64x60_GPP_INTR_MASK, (1 << 8) | (1 << 13));
108
109         /*
110          * Dismiss and then enable interrupt on CPU #0 high cause reg
111          * BIT25 summarizes GPP interrupts 8-15
112          */
113         mv64x60_set_bits(&bh, MV64360_IC_CPU0_INTR_MASK_HI, (1 << 25));
114 }
115
116 static void __init hdpu_setup_peripherals(void)
117 {
118         unsigned int val;
119
120         mv64x60_set_32bit_window(&bh, MV64x60_CPU2BOOT_WIN,
121                                  HDPU_EMB_FLASH_BASE, HDPU_EMB_FLASH_SIZE, 0);
122         bh.ci->enable_window_32bit(&bh, MV64x60_CPU2BOOT_WIN);
123
124         mv64x60_set_32bit_window(&bh, MV64x60_CPU2DEV_0_WIN,
125                                  HDPU_TBEN_BASE, HDPU_TBEN_SIZE, 0);
126         bh.ci->enable_window_32bit(&bh, MV64x60_CPU2DEV_0_WIN);
127
128         mv64x60_set_32bit_window(&bh, MV64x60_CPU2DEV_1_WIN,
129                                  HDPU_NEXUS_ID_BASE, HDPU_NEXUS_ID_SIZE, 0);
130         bh.ci->enable_window_32bit(&bh, MV64x60_CPU2DEV_1_WIN);
131
132         mv64x60_set_32bit_window(&bh, MV64x60_CPU2SRAM_WIN,
133                                  HDPU_INTERNAL_SRAM_BASE,
134                                  HDPU_INTERNAL_SRAM_SIZE, 0);
135         bh.ci->enable_window_32bit(&bh, MV64x60_CPU2SRAM_WIN);
136
137         bh.ci->disable_window_32bit(&bh, MV64x60_ENET2MEM_4_WIN);
138         mv64x60_set_32bit_window(&bh, MV64x60_ENET2MEM_4_WIN, 0, 0, 0);
139
140         mv64x60_clr_bits(&bh, MV64x60_PCI0_PCI_DECODE_CNTL, (1 << 3));
141         mv64x60_clr_bits(&bh, MV64x60_PCI1_PCI_DECODE_CNTL, (1 << 3));
142         mv64x60_clr_bits(&bh, MV64x60_TIMR_CNTR_0_3_CNTL,
143                          ((1 << 0) | (1 << 8) | (1 << 16) | (1 << 24)));
144
145         /* Enable pipelining */
146         mv64x60_set_bits(&bh, MV64x60_CPU_CONFIG, (1 << 13));
147         /* Enable Snoop Pipelineing */
148         mv64x60_set_bits(&bh, MV64360_D_UNIT_CONTROL_HIGH, (1 << 24));
149
150         /*
151          * Change DRAM read buffer assignment.
152          * Assign read buffer 0 dedicated only for CPU,
153          * and the rest read buffer 1.
154          */
155         val = mv64x60_read(&bh, MV64360_SDRAM_CONFIG);
156         val = val & 0x03ffffff;
157         val = val | 0xf8000000;
158         mv64x60_write(&bh, MV64360_SDRAM_CONFIG, val);
159
160         /*
161          * Configure internal SRAM -
162          * Cache coherent write back, if CONFIG_MV64360_SRAM_CACHE_COHERENT set
163          * Parity enabled.
164          * Parity error propagation
165          * Arbitration not parked for CPU only
166          * Other bits are reserved.
167          */
168 #ifdef CONFIG_MV64360_SRAM_CACHE_COHERENT
169         mv64x60_write(&bh, MV64360_SRAM_CONFIG, 0x001600b2);
170 #else
171         mv64x60_write(&bh, MV64360_SRAM_CONFIG, 0x001600b0);
172 #endif
173
174         hdpu_intr_setup();
175 }
176
177 static void __init hdpu_setup_bridge(void)
178 {
179         struct mv64x60_setup_info si;
180         int i;
181
182         memset(&si, 0, sizeof(si));
183
184         si.phys_reg_base = HDPU_BRIDGE_REG_BASE;
185         si.pci_0.enable_bus = 1;
186         si.pci_0.pci_io.cpu_base = HDPU_PCI0_IO_START_PROC_ADDR;
187         si.pci_0.pci_io.pci_base_hi = 0;
188         si.pci_0.pci_io.pci_base_lo = HDPU_PCI0_IO_START_PCI_ADDR;
189         si.pci_0.pci_io.size = HDPU_PCI0_IO_SIZE;
190         si.pci_0.pci_io.swap = MV64x60_CPU2PCI_SWAP_NONE;
191         si.pci_0.pci_mem[0].cpu_base = HDPU_PCI0_MEM_START_PROC_ADDR;
192         si.pci_0.pci_mem[0].pci_base_hi = HDPU_PCI0_MEM_START_PCI_HI_ADDR;
193         si.pci_0.pci_mem[0].pci_base_lo = HDPU_PCI0_MEM_START_PCI_LO_ADDR;
194         si.pci_0.pci_mem[0].size = HDPU_PCI0_MEM_SIZE;
195         si.pci_0.pci_mem[0].swap = MV64x60_CPU2PCI_SWAP_NONE;
196         si.pci_0.pci_cmd_bits = 0;
197         si.pci_0.latency_timer = 0x80;
198
199         si.pci_1.enable_bus = 1;
200         si.pci_1.pci_io.cpu_base = HDPU_PCI1_IO_START_PROC_ADDR;
201         si.pci_1.pci_io.pci_base_hi = 0;
202         si.pci_1.pci_io.pci_base_lo = HDPU_PCI1_IO_START_PCI_ADDR;
203         si.pci_1.pci_io.size = HDPU_PCI1_IO_SIZE;
204         si.pci_1.pci_io.swap = MV64x60_CPU2PCI_SWAP_NONE;
205         si.pci_1.pci_mem[0].cpu_base = HDPU_PCI1_MEM_START_PROC_ADDR;
206         si.pci_1.pci_mem[0].pci_base_hi = HDPU_PCI1_MEM_START_PCI_HI_ADDR;
207         si.pci_1.pci_mem[0].pci_base_lo = HDPU_PCI1_MEM_START_PCI_LO_ADDR;
208         si.pci_1.pci_mem[0].size = HDPU_PCI1_MEM_SIZE;
209         si.pci_1.pci_mem[0].swap = MV64x60_CPU2PCI_SWAP_NONE;
210         si.pci_1.pci_cmd_bits = 0;
211         si.pci_1.latency_timer = 0x80;
212
213         for (i = 0; i < MV64x60_CPU2MEM_WINDOWS; i++) {
214 #if defined(CONFIG_NOT_COHERENT_CACHE)
215                 si.cpu_prot_options[i] = 0;
216                 si.enet_options[i] = MV64360_ENET2MEM_SNOOP_NONE;
217                 si.mpsc_options[i] = MV64360_MPSC2MEM_SNOOP_NONE;
218                 si.idma_options[i] = MV64360_IDMA2MEM_SNOOP_NONE;
219
220                 si.pci_1.acc_cntl_options[i] =
221                     MV64360_PCI_ACC_CNTL_SNOOP_NONE |
222                     MV64360_PCI_ACC_CNTL_SWAP_NONE |
223                     MV64360_PCI_ACC_CNTL_MBURST_128_BYTES |
224                     MV64360_PCI_ACC_CNTL_RDSIZE_256_BYTES;
225
226                 si.pci_0.acc_cntl_options[i] =
227                     MV64360_PCI_ACC_CNTL_SNOOP_NONE |
228                     MV64360_PCI_ACC_CNTL_SWAP_NONE |
229                     MV64360_PCI_ACC_CNTL_MBURST_128_BYTES |
230                     MV64360_PCI_ACC_CNTL_RDSIZE_256_BYTES;
231
232 #else
233                 si.cpu_prot_options[i] = 0;
234                 si.enet_options[i] = MV64360_ENET2MEM_SNOOP_WB; /* errata */
235                 si.mpsc_options[i] = MV64360_MPSC2MEM_SNOOP_WB; /* errata */
236                 si.idma_options[i] = MV64360_IDMA2MEM_SNOOP_WB; /* errata */
237
238                 si.pci_0.acc_cntl_options[i] =
239                     MV64360_PCI_ACC_CNTL_SNOOP_WB |
240                     MV64360_PCI_ACC_CNTL_SWAP_NONE |
241                     MV64360_PCI_ACC_CNTL_MBURST_32_BYTES |
242                     MV64360_PCI_ACC_CNTL_RDSIZE_256_BYTES;
243
244                 si.pci_1.acc_cntl_options[i] =
245                     MV64360_PCI_ACC_CNTL_SNOOP_WB |
246                     MV64360_PCI_ACC_CNTL_SWAP_NONE |
247                     MV64360_PCI_ACC_CNTL_MBURST_32_BYTES |
248                     MV64360_PCI_ACC_CNTL_RDSIZE_256_BYTES;
249 #endif
250         }
251
252         hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_INIT_PCI);
253
254         /* Lookup PCI host bridges */
255         mv64x60_init(&bh, &si);
256         pci_dram_offset = 0;    /* System mem at same addr on PCI & cpu bus */
257         ppc_md.pci_swizzle = common_swizzle;
258         ppc_md.pci_map_irq = hdpu_map_irq;
259
260         mv64x60_set_bus(&bh, 0, 0);
261         bh.hose_a->first_busno = 0;
262         bh.hose_a->last_busno = 0xff;
263         bh.hose_a->last_busno = pciauto_bus_scan(bh.hose_a, 0);
264
265         bh.hose_b->first_busno = bh.hose_a->last_busno + 1;
266         mv64x60_set_bus(&bh, 1, bh.hose_b->first_busno);
267         bh.hose_b->last_busno = 0xff;
268         bh.hose_b->last_busno = pciauto_bus_scan(bh.hose_b,
269                 bh.hose_b->first_busno);
270
271         ppc_md.pci_exclude_device = mv64x60_pci_exclude_device;
272
273         hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_INIT_REG);
274         /*
275          * Enabling of PCI internal-vs-external arbitration
276          * is a platform- and errata-dependent decision.
277          */
278         return;
279 }
280
281 #if defined(CONFIG_SERIAL_MPSC_CONSOLE)
282 static void __init hdpu_early_serial_map(void)
283 {
284 #ifdef  CONFIG_KGDB
285         static char first_time = 1;
286
287 #if defined(CONFIG_KGDB_TTYS0)
288 #define KGDB_PORT 0
289 #elif defined(CONFIG_KGDB_TTYS1)
290 #define KGDB_PORT 1
291 #else
292 #error "Invalid kgdb_tty port"
293 #endif
294
295         if (first_time) {
296                 gt_early_mpsc_init(KGDB_PORT,
297                                    B9600 | CS8 | CREAD | HUPCL | CLOCAL);
298                 first_time = 0;
299         }
300
301         return;
302 #endif
303 }
304 #endif
305
306 static void hdpu_init2(void)
307 {
308         return;
309 }
310
311 #if defined(CONFIG_MV643XX_ETH)
312 static void __init hdpu_fixup_eth_pdata(struct platform_device *pd)
313 {
314
315         struct mv643xx_eth_platform_data *eth_pd;
316         eth_pd = pd->dev.platform_data;
317
318         eth_pd->force_phy_addr = 1;
319         eth_pd->phy_addr = pd->id;
320         eth_pd->speed = SPEED_100;
321         eth_pd->duplex = DUPLEX_FULL;
322         eth_pd->tx_queue_size = 400;
323         eth_pd->rx_queue_size = 800;
324 }
325 #endif
326
327 static void __init hdpu_fixup_mpsc_pdata(struct platform_device *pd)
328 {
329
330         struct mpsc_pdata *pdata;
331
332         pdata = (struct mpsc_pdata *)pd->dev.platform_data;
333
334         pdata->max_idle = 40;
335         if (ppcboot_bd_valid)
336                 pdata->default_baud = ppcboot_bd.bi_baudrate;
337         else
338                 pdata->default_baud = HDPU_DEFAULT_BAUD;
339         pdata->brg_clk_src = HDPU_MPSC_CLK_SRC;
340         pdata->brg_clk_freq = HDPU_MPSC_CLK_FREQ;
341 }
342
343 #if defined(CONFIG_HDPU_FEATURES)
344 static void __init hdpu_fixup_cpustate_pdata(struct platform_device *pd)
345 {
346         struct platform_device *pds[1];
347         pds[0] = pd;
348         mv64x60_pd_fixup(&bh, pds, 1);
349 }
350 #endif
351
352 static int hdpu_platform_notify(struct device *dev)
353 {
354         static struct {
355                 char *bus_id;
356                 void ((*rtn) (struct platform_device * pdev));
357         } dev_map[] = {
358                 {
359                 MPSC_CTLR_NAME ".0", hdpu_fixup_mpsc_pdata},
360 #if defined(CONFIG_MV643XX_ETH)
361                 {
362                 MV643XX_ETH_NAME ".0", hdpu_fixup_eth_pdata},
363 #endif
364 #if defined(CONFIG_HDPU_FEATURES)
365                 {
366                 HDPU_CPUSTATE_NAME ".0", hdpu_fixup_cpustate_pdata},
367 #endif
368         };
369         struct platform_device *pdev;
370         int i;
371
372         if (dev && dev->bus_id)
373                 for (i = 0; i < ARRAY_SIZE(dev_map); i++)
374                         if (!strncmp(dev->bus_id, dev_map[i].bus_id,
375                                      BUS_ID_SIZE)) {
376
377                                 pdev = container_of(dev,
378                                                     struct platform_device,
379                                                     dev);
380                                 dev_map[i].rtn(pdev);
381                         }
382
383         return 0;
384 }
385
386 static void __init hdpu_setup_arch(void)
387 {
388         if (ppc_md.progress)
389                 ppc_md.progress("hdpu_setup_arch: enter", 0);
390 #ifdef CONFIG_BLK_DEV_INITRD
391         if (initrd_start)
392                 ROOT_DEV = Root_RAM0;
393         else
394 #endif
395 #ifdef  CONFIG_ROOT_NFS
396                 ROOT_DEV = Root_NFS;
397 #else
398                 ROOT_DEV = Root_SDA2;
399 #endif
400
401         ppc_md.heartbeat = hdpu_heartbeat;
402
403         ppc_md.heartbeat_reset = HZ;
404         ppc_md.heartbeat_count = 1;
405
406         if (ppc_md.progress)
407                 ppc_md.progress("hdpu_setup_arch: Enabling L2 cache", 0);
408
409         /* Enable L1 Parity Bits */
410         hdpu_set_l1pe();
411
412         /* Enable L2 and L3 caches (if 745x) */
413         _set_L2CR(0x80080000);
414
415         if (ppc_md.progress)
416                 ppc_md.progress("hdpu_setup_arch: enter", 0);
417
418         hdpu_setup_bridge();
419
420         hdpu_setup_peripherals();
421
422 #ifdef CONFIG_SERIAL_MPSC_CONSOLE
423         hdpu_early_serial_map();
424 #endif
425
426         printk("SKY HDPU Compute Blade \n");
427
428         if (ppc_md.progress)
429                 ppc_md.progress("hdpu_setup_arch: exit", 0);
430
431         hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_OK);
432         return;
433 }
434 static void __init hdpu_init_irq(void)
435 {
436         mv64360_init_irq();
437 }
438
439 static void __init hdpu_set_l1pe()
440 {
441         unsigned long ictrl;
442         asm volatile ("mfspr %0, 1011":"=r" (ictrl):);
443         ictrl |= ICTRL_EICE | ICTRL_EDC | ICTRL_EICP;
444         asm volatile ("mtspr 1011, %0"::"r" (ictrl));
445 }
446
447 /*
448  * Set BAT 1 to map 0xf1000000 to end of physical memory space.
449  */
450 static __inline__ void hdpu_set_bat(void)
451 {
452         mb();
453         mtspr(SPRN_DBAT1U, 0xf10001fe);
454         mtspr(SPRN_DBAT1L, 0xf100002a);
455         mb();
456
457         return;
458 }
459
460 unsigned long __init hdpu_find_end_of_memory(void)
461 {
462         return mv64x60_get_mem_size(CONFIG_MV64X60_NEW_BASE,
463                                     MV64x60_TYPE_MV64360);
464 }
465
466 static void hdpu_reset_board(void)
467 {
468         volatile int infinite = 1;
469
470         hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_RESET);
471
472         local_irq_disable();
473
474         /* Clear all the LEDs */
475         mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, ((1 << 4) |
476                                                    (1 << 5) | (1 << 6)));
477
478         /* disable and invalidate the L2 cache */
479         _set_L2CR(0);
480         _set_L2CR(0x200000);
481
482         /* flush and disable L1 I/D cache */
483         __asm__ __volatile__
484             ("\n"
485              "mfspr   3,1008\n"
486              "ori       5,5,0xcc00\n"
487              "ori       4,3,0xc00\n"
488              "andc      5,3,5\n"
489              "sync\n"
490              "mtspr     1008,4\n"
491              "isync\n" "sync\n" "mtspr  1008,5\n" "isync\n" "sync\n");
492
493         /* Hit the reset bit */
494         mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, (1 << 3));
495
496         while (infinite)
497                 infinite = infinite;
498
499         return;
500 }
501
502 static void hdpu_restart(char *cmd)
503 {
504         volatile ulong i = 10000000;
505
506         hdpu_reset_board();
507
508         while (i-- > 0) ;
509         panic("restart failed\n");
510 }
511
512 static void hdpu_halt(void)
513 {
514         local_irq_disable();
515
516         hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_HALT);
517
518         /* Clear all the LEDs */
519         mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, ((1 << 4) | (1 << 5) |
520                                                    (1 << 6)));
521         while (1) ;
522         /* NOTREACHED */
523 }
524
525 static void hdpu_power_off(void)
526 {
527         hdpu_halt();
528         /* NOTREACHED */
529 }
530
531 static int hdpu_show_cpuinfo(struct seq_file *m)
532 {
533         uint pvid;
534
535         pvid = mfspr(SPRN_PVR);
536         seq_printf(m, "vendor\t\t: Sky Computers\n");
537         seq_printf(m, "machine\t\t: HDPU Compute Blade\n");
538         seq_printf(m, "PVID\t\t: 0x%x, vendor: %s\n",
539                    pvid, (pvid & (1 << 15) ? "IBM" : "Motorola"));
540
541         return 0;
542 }
543
544 static void __init hdpu_calibrate_decr(void)
545 {
546         ulong freq;
547
548         if (ppcboot_bd_valid)
549                 freq = ppcboot_bd.bi_busfreq / 4;
550         else
551                 freq = 133000000;
552
553         printk("time_init: decrementer frequency = %lu.%.6lu MHz\n",
554                freq / 1000000, freq % 1000000);
555
556         tb_ticks_per_jiffy = freq / HZ;
557         tb_to_us = mulhwu_scale_factor(freq, 1000000);
558
559         return;
560 }
561
562 static void parse_bootinfo(unsigned long r3,
563                            unsigned long r4, unsigned long r5,
564                            unsigned long r6, unsigned long r7)
565 {
566         bd_t *bd = NULL;
567         char *cmdline_start = NULL;
568         int cmdline_len = 0;
569
570         if (r3) {
571                 if ((r3 & 0xf0000000) == 0)
572                         r3 += KERNELBASE;
573                 if ((r3 & 0xf0000000) == KERNELBASE) {
574                         bd = (void *)r3;
575
576                         memcpy(&ppcboot_bd, bd, sizeof(ppcboot_bd));
577                         ppcboot_bd_valid = 1;
578                 }
579         }
580 #ifdef CONFIG_BLK_DEV_INITRD
581         if (r4 && r5 && r5 > r4) {
582                 if ((r4 & 0xf0000000) == 0)
583                         r4 += KERNELBASE;
584                 if ((r5 & 0xf0000000) == 0)
585                         r5 += KERNELBASE;
586                 if ((r4 & 0xf0000000) == KERNELBASE) {
587                         initrd_start = r4;
588                         initrd_end = r5;
589                         initrd_below_start_ok = 1;
590                 }
591         }
592 #endif                          /* CONFIG_BLK_DEV_INITRD */
593
594         if (r6 && r7 && r7 > r6) {
595                 if ((r6 & 0xf0000000) == 0)
596                         r6 += KERNELBASE;
597                 if ((r7 & 0xf0000000) == 0)
598                         r7 += KERNELBASE;
599                 if ((r6 & 0xf0000000) == KERNELBASE) {
600                         cmdline_start = (void *)r6;
601                         cmdline_len = (r7 - r6);
602                         strncpy(cmd_line, cmdline_start, cmdline_len);
603                 }
604         }
605 }
606
607 #if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
608 static void
609 hdpu_ide_request_region(ide_ioreg_t from, unsigned int extent, const char *name)
610 {
611         request_region(from, extent, name);
612         return;
613 }
614
615 static void hdpu_ide_release_region(ide_ioreg_t from, unsigned int extent)
616 {
617         release_region(from, extent);
618         return;
619 }
620
621 static void __init
622 hdpu_ide_pci_init_hwif_ports(hw_regs_t * hw, ide_ioreg_t data_port,
623                              ide_ioreg_t ctrl_port, int *irq)
624 {
625         struct pci_dev *dev;
626
627         pci_for_each_dev(dev) {
628                 if (((dev->class >> 8) == PCI_CLASS_STORAGE_IDE) ||
629                     ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID)) {
630                         hw->irq = dev->irq;
631
632                         if (irq != NULL) {
633                                 *irq = dev->irq;
634                         }
635                 }
636         }
637
638         return;
639 }
640 #endif
641
642 void hdpu_heartbeat(void)
643 {
644         if (mv64x60_read(&bh, MV64x60_GPP_VALUE) & (1 << 5))
645                 mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, (1 << 5));
646         else
647                 mv64x60_write(&bh, MV64x60_GPP_VALUE_SET, (1 << 5));
648
649         ppc_md.heartbeat_count = ppc_md.heartbeat_reset;
650
651 }
652
653 static void __init hdpu_map_io(void)
654 {
655         io_block_mapping(0xf1000000, 0xf1000000, 0x20000, _PAGE_IO);
656 }
657
658 #ifdef CONFIG_SMP
659 char hdpu_smp0[] = "SMP Cpu #0";
660 char hdpu_smp1[] = "SMP Cpu #1";
661
662 static irqreturn_t hdpu_smp_cpu0_int_handler(int irq, void *dev_id,
663                                              struct pt_regs *regs)
664 {
665         volatile unsigned int doorbell;
666
667         doorbell = mv64x60_read(&bh, MV64360_CPU0_DOORBELL);
668
669         /* Ack the doorbell interrupts */
670         mv64x60_write(&bh, MV64360_CPU0_DOORBELL_CLR, doorbell);
671
672         if (doorbell & 1) {
673                 smp_message_recv(0, regs);
674         }
675         if (doorbell & 2) {
676                 smp_message_recv(1, regs);
677         }
678         if (doorbell & 4) {
679                 smp_message_recv(2, regs);
680         }
681         if (doorbell & 8) {
682                 smp_message_recv(3, regs);
683         }
684         return IRQ_HANDLED;
685 }
686
687 static irqreturn_t hdpu_smp_cpu1_int_handler(int irq, void *dev_id,
688                                              struct pt_regs *regs)
689 {
690         volatile unsigned int doorbell;
691
692         doorbell = mv64x60_read(&bh, MV64360_CPU1_DOORBELL);
693
694         /* Ack the doorbell interrupts */
695         mv64x60_write(&bh, MV64360_CPU1_DOORBELL_CLR, doorbell);
696
697         if (doorbell & 1) {
698                 smp_message_recv(0, regs);
699         }
700         if (doorbell & 2) {
701                 smp_message_recv(1, regs);
702         }
703         if (doorbell & 4) {
704                 smp_message_recv(2, regs);
705         }
706         if (doorbell & 8) {
707                 smp_message_recv(3, regs);
708         }
709         return IRQ_HANDLED;
710 }
711
712 static void smp_hdpu_CPU_two(void)
713 {
714         __asm__ __volatile__
715             ("\n"
716              "lis     3,0x0000\n"
717              "ori     3,3,0x00c0\n"
718              "mtspr   26, 3\n" "li      4,0\n" "mtspr   27,4\n" "rfi");
719
720 }
721
722 static int smp_hdpu_probe(void)
723 {
724         int *cpu_count_reg;
725         int num_cpus = 0;
726
727         cpu_count_reg = ioremap(HDPU_NEXUS_ID_BASE, HDPU_NEXUS_ID_SIZE);
728         if (cpu_count_reg) {
729                 num_cpus = (*cpu_count_reg >> 20) & 0x3;
730                 iounmap(cpu_count_reg);
731         }
732
733         /* Validate the bits in the CPLD. If we could not map the reg, return 2.
734          * If the register reported 0 or 3, return 2.
735          * Older CPLD revisions set these bits to all ones (val = 3).
736          */
737         if ((num_cpus < 1) || (num_cpus > 2)) {
738                 printk
739                     ("Unable to determine the number of processors %d . deafulting to 2.\n",
740                      num_cpus);
741                 num_cpus = 2;
742         }
743         return num_cpus;
744 }
745
746 static void
747 smp_hdpu_message_pass(int target, int msg)
748 {
749         if (msg > 0x3) {
750                 printk("SMP %d: smp_message_pass: unknown msg %d\n",
751                        smp_processor_id(), msg);
752                 return;
753         }
754         switch (target) {
755         case MSG_ALL:
756                 mv64x60_write(&bh, MV64360_CPU0_DOORBELL, 1 << msg);
757                 mv64x60_write(&bh, MV64360_CPU1_DOORBELL, 1 << msg);
758                 break;
759         case MSG_ALL_BUT_SELF:
760                 if (smp_processor_id())
761                         mv64x60_write(&bh, MV64360_CPU0_DOORBELL, 1 << msg);
762                 else
763                         mv64x60_write(&bh, MV64360_CPU1_DOORBELL, 1 << msg);
764                 break;
765         default:
766                 if (target == 0)
767                         mv64x60_write(&bh, MV64360_CPU0_DOORBELL, 1 << msg);
768                 else
769                         mv64x60_write(&bh, MV64360_CPU1_DOORBELL, 1 << msg);
770                 break;
771         }
772 }
773
774 static void smp_hdpu_kick_cpu(int nr)
775 {
776         volatile unsigned int *bootaddr;
777
778         if (ppc_md.progress)
779                 ppc_md.progress("smp_hdpu_kick_cpu", 0);
780
781         hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_CPU1_KICK);
782
783        /* Disable BootCS. Must also reduce the windows size to zero. */
784         bh.ci->disable_window_32bit(&bh, MV64x60_CPU2BOOT_WIN);
785         mv64x60_set_32bit_window(&bh, MV64x60_CPU2BOOT_WIN, 0, 0, 0);
786
787         bootaddr = ioremap(HDPU_INTERNAL_SRAM_BASE, HDPU_INTERNAL_SRAM_SIZE);
788         if (!bootaddr) {
789                 if (ppc_md.progress)
790                         ppc_md.progress("smp_hdpu_kick_cpu: ioremap failed", 0);
791                 return;
792         }
793
794         memcpy((void *)(bootaddr + 0x40), (void *)&smp_hdpu_CPU_two, 0x20);
795
796         /* map SRAM to 0xfff00000 */
797         bh.ci->disable_window_32bit(&bh, MV64x60_CPU2SRAM_WIN);
798
799         mv64x60_set_32bit_window(&bh, MV64x60_CPU2SRAM_WIN,
800                                  0xfff00000, HDPU_INTERNAL_SRAM_SIZE, 0);
801         bh.ci->enable_window_32bit(&bh, MV64x60_CPU2SRAM_WIN);
802
803         /* Enable CPU1 arbitration */
804         mv64x60_clr_bits(&bh, MV64x60_CPU_MASTER_CNTL, (1 << 9));
805
806         /*
807          * Wait 100mSecond until other CPU has reached __secondary_start.
808          * When it reaches, it is permittable to rever the SRAM mapping etc...
809          */
810         mdelay(100);
811         *(unsigned long *)KERNELBASE = nr;
812         asm volatile ("dcbf 0,%0"::"r" (KERNELBASE):"memory");
813
814         iounmap(bootaddr);
815
816         /* Set up window for internal sram (256KByte insize) */
817         bh.ci->disable_window_32bit(&bh, MV64x60_CPU2SRAM_WIN);
818         mv64x60_set_32bit_window(&bh, MV64x60_CPU2SRAM_WIN,
819                                  HDPU_INTERNAL_SRAM_BASE,
820                                  HDPU_INTERNAL_SRAM_SIZE, 0);
821         bh.ci->enable_window_32bit(&bh, MV64x60_CPU2SRAM_WIN);
822         /*
823          * Set up windows for embedded FLASH (using boot CS window).
824          */
825
826         bh.ci->disable_window_32bit(&bh, MV64x60_CPU2BOOT_WIN);
827         mv64x60_set_32bit_window(&bh, MV64x60_CPU2BOOT_WIN,
828                                  HDPU_EMB_FLASH_BASE, HDPU_EMB_FLASH_SIZE, 0);
829         bh.ci->enable_window_32bit(&bh, MV64x60_CPU2BOOT_WIN);
830 }
831
832 static void smp_hdpu_setup_cpu(int cpu_nr)
833 {
834         if (cpu_nr == 0) {
835                 if (ppc_md.progress)
836                         ppc_md.progress("smp_hdpu_setup_cpu 0", 0);
837                 mv64x60_write(&bh, MV64360_CPU0_DOORBELL_CLR, 0xff);
838                 mv64x60_write(&bh, MV64360_CPU0_DOORBELL_MASK, 0xff);
839                 request_irq(60, hdpu_smp_cpu0_int_handler,
840                             SA_INTERRUPT, hdpu_smp0, 0);
841         }
842
843         if (cpu_nr == 1) {
844                 if (ppc_md.progress)
845                         ppc_md.progress("smp_hdpu_setup_cpu 1", 0);
846
847                 hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR |
848                                   CPUSTATE_KERNEL_CPU1_OK);
849
850                 /* Enable L1 Parity Bits */
851                 hdpu_set_l1pe();
852
853                 /* Enable L2 cache */
854                 _set_L2CR(0);
855                 _set_L2CR(0x80080000);
856
857                 mv64x60_write(&bh, MV64360_CPU1_DOORBELL_CLR, 0x0);
858                 mv64x60_write(&bh, MV64360_CPU1_DOORBELL_MASK, 0xff);
859                 request_irq(28, hdpu_smp_cpu1_int_handler,
860                             SA_INTERRUPT, hdpu_smp1, 0);
861         }
862
863 }
864
865 void __devinit hdpu_tben_give()
866 {
867         volatile unsigned long *val = 0;
868
869         /* By writing 0 to the TBEN_BASE, the timebases is frozen */
870         val = ioremap(HDPU_TBEN_BASE, 4);
871         *val = 0;
872         mb();
873
874         spin_lock(&timebase_lock);
875         timebase_upper = get_tbu();
876         timebase_lower = get_tbl();
877         spin_unlock(&timebase_lock);
878
879         while (timebase_upper || timebase_lower)
880                 barrier();
881
882         /* By writing 1 to the TBEN_BASE, the timebases is thawed */
883         *val = 1;
884         mb();
885
886         iounmap(val);
887
888 }
889
890 void __devinit hdpu_tben_take()
891 {
892         while (!(timebase_upper || timebase_lower))
893                 barrier();
894
895         spin_lock(&timebase_lock);
896         set_tb(timebase_upper, timebase_lower);
897         timebase_upper = 0;
898         timebase_lower = 0;
899         spin_unlock(&timebase_lock);
900 }
901
902 static struct smp_ops_t hdpu_smp_ops = {
903         .message_pass = smp_hdpu_message_pass,
904         .probe = smp_hdpu_probe,
905         .kick_cpu = smp_hdpu_kick_cpu,
906         .setup_cpu = smp_hdpu_setup_cpu,
907         .give_timebase = hdpu_tben_give,
908         .take_timebase = hdpu_tben_take,
909 };
910 #endif                          /* CONFIG_SMP */
911
912 void __init
913 platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
914               unsigned long r6, unsigned long r7)
915 {
916         parse_bootinfo(r3, r4, r5, r6, r7);
917
918         isa_mem_base = 0;
919
920         ppc_md.setup_arch = hdpu_setup_arch;
921         ppc_md.init = hdpu_init2;
922         ppc_md.show_cpuinfo = hdpu_show_cpuinfo;
923         ppc_md.init_IRQ = hdpu_init_irq;
924         ppc_md.get_irq = mv64360_get_irq;
925         ppc_md.restart = hdpu_restart;
926         ppc_md.power_off = hdpu_power_off;
927         ppc_md.halt = hdpu_halt;
928         ppc_md.find_end_of_memory = hdpu_find_end_of_memory;
929         ppc_md.calibrate_decr = hdpu_calibrate_decr;
930         ppc_md.setup_io_mappings = hdpu_map_io;
931
932         bh.p_base = CONFIG_MV64X60_NEW_BASE;
933         bh.v_base = (unsigned long *)bh.p_base;
934
935         hdpu_set_bat();
936
937 #if defined(CONFIG_SERIAL_TEXT_DEBUG)
938         ppc_md.progress = hdpu_mpsc_progress;   /* embedded UART */
939         mv64x60_progress_init(bh.p_base);
940 #endif                          /* CONFIG_SERIAL_TEXT_DEBUG */
941
942 #ifdef CONFIG_SMP
943         smp_ops = &hdpu_smp_ops;
944 #endif                          /* CONFIG_SMP */
945
946 #if defined(CONFIG_SERIAL_MPSC) || defined(CONFIG_MV643XX_ETH)
947         platform_notify = hdpu_platform_notify;
948 #endif
949         return;
950 }
951
952 #if defined(CONFIG_SERIAL_TEXT_DEBUG) && defined(CONFIG_SERIAL_MPSC_CONSOLE)
953 /* SMP safe version of the serial text debug routine. Uses Semaphore 0 */
954 void hdpu_mpsc_progress(char *s, unsigned short hex)
955 {
956         while (mv64x60_read(&bh, MV64360_WHO_AM_I) !=
957                mv64x60_read(&bh, MV64360_SEMAPHORE_0)) {
958         }
959         mv64x60_mpsc_progress(s, hex);
960         mv64x60_write(&bh, MV64360_SEMAPHORE_0, 0xff);
961 }
962 #endif
963
964 static void hdpu_cpustate_set(unsigned char new_state)
965 {
966         unsigned int state = (new_state << 21);
967         mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, (0xff << 21));
968         mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, state);
969 }
970
971 #ifdef CONFIG_MTD_PHYSMAP
972 static struct mtd_partition hdpu_partitions[] = {
973         {
974          .name = "Root FS",
975          .size = 0x03400000,
976          .offset = 0,
977          .mask_flags = 0,
978          },{
979          .name = "User FS",
980          .size = 0x00800000,
981          .offset = 0x03400000,
982          .mask_flags = 0,
983          },{
984          .name = "Kernel Image",
985          .size = 0x002C0000,
986          .offset = 0x03C00000,
987          .mask_flags = 0,
988          },{
989          .name = "bootEnv",
990          .size = 0x00040000,
991          .offset = 0x03EC0000,
992          .mask_flags = 0,
993          },{
994          .name = "bootROM",
995          .size = 0x00100000,
996          .offset = 0x03F00000,
997          .mask_flags = 0,
998          }
999 };
1000
1001 static int __init hdpu_setup_mtd(void)
1002 {
1003
1004         physmap_set_partitions(hdpu_partitions, 5);
1005         return 0;
1006 }
1007
1008 arch_initcall(hdpu_setup_mtd);
1009 #endif
1010
1011 #ifdef CONFIG_HDPU_FEATURES
1012
1013 static struct resource hdpu_cpustate_resources[] = {
1014         [0] = {
1015                .name = "addr base",
1016                .start = MV64x60_GPP_VALUE_SET,
1017                .end = MV64x60_GPP_VALUE_CLR + 1,
1018                .flags = IORESOURCE_MEM,
1019                },
1020 };
1021
1022 static struct resource hdpu_nexus_resources[] = {
1023         [0] = {
1024                .name = "nexus register",
1025                .start = HDPU_NEXUS_ID_BASE,
1026                .end = HDPU_NEXUS_ID_BASE + HDPU_NEXUS_ID_SIZE,
1027                .flags = IORESOURCE_MEM,
1028                },
1029 };
1030
1031 static struct platform_device hdpu_cpustate_device = {
1032         .name = HDPU_CPUSTATE_NAME,
1033         .id = 0,
1034         .num_resources = ARRAY_SIZE(hdpu_cpustate_resources),
1035         .resource = hdpu_cpustate_resources,
1036 };
1037
1038 static struct platform_device hdpu_nexus_device = {
1039         .name = HDPU_NEXUS_NAME,
1040         .id = 0,
1041         .num_resources = ARRAY_SIZE(hdpu_nexus_resources),
1042         .resource = hdpu_nexus_resources,
1043 };
1044
1045 static int __init hdpu_add_pds(void)
1046 {
1047         platform_device_register(&hdpu_cpustate_device);
1048         platform_device_register(&hdpu_nexus_device);
1049         return 0;
1050 }
1051
1052 arch_initcall(hdpu_add_pds);
1053 #endif