Merge git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-mn10300
Linus Torvalds [Thu, 28 Oct 2010 01:53:26 +0000 (18:53 -0700)]
* git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-mn10300: (44 commits)
  MN10300: Save frame pointer in thread_info struct rather than global var
  MN10300: Change "Matsushita" to "Panasonic".
  MN10300: Create a defconfig for the ASB2364 board
  MN10300: Update the ASB2303 defconfig
  MN10300: ASB2364: Add support for SMSC911X and SMC911X
  MN10300: ASB2364: Handle the IRQ multiplexer in the FPGA
  MN10300: Generic time support
  MN10300: Specify an ELF HWCAP flag for MN10300 Atomic Operations Unit support
  MN10300: Map userspace atomic op regs as a vmalloc page
  MN10300: And Panasonic AM34 subarch and implement SMP
  MN10300: Delete idle_timestamp from irq_cpustat_t
  MN10300: Make various interrupt priority settings configurable
  MN10300: Optimise do_csum()
  MN10300: Implement atomic ops using atomic ops unit
  MN10300: Make the FPU operate in non-lazy mode under SMP
  MN10300: SMP TLB flushing
  MN10300: Use the [ID]PTEL2 registers rather than [ID]PTEL for TLB control
  MN10300: Make the use of PIDR to mark TLB entries controllable
  MN10300: Rename __flush_tlb*() to local_flush_tlb*()
  MN10300: AM34 erratum requires MMUCTR read and write on exception entry
  ...

145 files changed:
Kbuild
MAINTAINERS
arch/mn10300/Kconfig
arch/mn10300/Makefile
arch/mn10300/boot/compressed/head.S
arch/mn10300/configs/asb2303_defconfig
arch/mn10300/configs/asb2364_defconfig [new file with mode: 0644]
arch/mn10300/include/asm/atomic.h
arch/mn10300/include/asm/bitops.h
arch/mn10300/include/asm/cache.h
arch/mn10300/include/asm/cacheflush.h
arch/mn10300/include/asm/cpu-regs.h
arch/mn10300/include/asm/dmactl-regs.h
arch/mn10300/include/asm/elf.h
arch/mn10300/include/asm/exceptions.h
arch/mn10300/include/asm/fpu.h
arch/mn10300/include/asm/frame.inc
arch/mn10300/include/asm/gdb-stub.h
arch/mn10300/include/asm/hardirq.h
arch/mn10300/include/asm/highmem.h
arch/mn10300/include/asm/intctl-regs.h
arch/mn10300/include/asm/io.h
arch/mn10300/include/asm/irq.h
arch/mn10300/include/asm/irq_regs.h
arch/mn10300/include/asm/irqflags.h
arch/mn10300/include/asm/mmu_context.h
arch/mn10300/include/asm/pgalloc.h
arch/mn10300/include/asm/pgtable.h
arch/mn10300/include/asm/processor.h
arch/mn10300/include/asm/ptrace.h
arch/mn10300/include/asm/reset-regs.h
arch/mn10300/include/asm/rtc.h
arch/mn10300/include/asm/rwlock.h [new file with mode: 0644]
arch/mn10300/include/asm/serial-regs.h
arch/mn10300/include/asm/serial.h
arch/mn10300/include/asm/smp.h
arch/mn10300/include/asm/smsc911x.h [new file with mode: 0644]
arch/mn10300/include/asm/spinlock.h
arch/mn10300/include/asm/spinlock_types.h [new file with mode: 0644]
arch/mn10300/include/asm/system.h
arch/mn10300/include/asm/thread_info.h
arch/mn10300/include/asm/timer-regs.h
arch/mn10300/include/asm/timex.h
arch/mn10300/include/asm/tlbflush.h
arch/mn10300/include/asm/uaccess.h
arch/mn10300/kernel/Makefile
arch/mn10300/kernel/asm-offsets.c
arch/mn10300/kernel/cevt-mn10300.c [new file with mode: 0644]
arch/mn10300/kernel/csrc-mn10300.c [new file with mode: 0644]
arch/mn10300/kernel/entry.S
arch/mn10300/kernel/fpu-low.S
arch/mn10300/kernel/fpu-nofpu-low.S [new file with mode: 0644]
arch/mn10300/kernel/fpu-nofpu.c [new file with mode: 0644]
arch/mn10300/kernel/fpu.c
arch/mn10300/kernel/gdb-io-serial-low.S
arch/mn10300/kernel/gdb-io-serial.c
arch/mn10300/kernel/gdb-io-ttysm.c
arch/mn10300/kernel/gdb-stub.c
arch/mn10300/kernel/head.S
arch/mn10300/kernel/internal.h
arch/mn10300/kernel/irq.c
arch/mn10300/kernel/kprobes.c
arch/mn10300/kernel/mn10300-serial-low.S
arch/mn10300/kernel/mn10300-serial.c
arch/mn10300/kernel/mn10300-watchdog-low.S
arch/mn10300/kernel/mn10300-watchdog.c
arch/mn10300/kernel/process.c
arch/mn10300/kernel/profile.c
arch/mn10300/kernel/rtc.c
arch/mn10300/kernel/setup.c
arch/mn10300/kernel/signal.c
arch/mn10300/kernel/smp-low.S [new file with mode: 0644]
arch/mn10300/kernel/smp.c [new file with mode: 0644]
arch/mn10300/kernel/switch_to.S
arch/mn10300/kernel/time.c
arch/mn10300/kernel/traps.c
arch/mn10300/lib/bitops.c
arch/mn10300/lib/delay.c
arch/mn10300/lib/do_csum.S
arch/mn10300/mm/Kconfig.cache [new file with mode: 0644]
arch/mn10300/mm/Makefile
arch/mn10300/mm/cache-flush-by-reg.S [new file with mode: 0644]
arch/mn10300/mm/cache-flush-by-tag.S [new file with mode: 0644]
arch/mn10300/mm/cache-flush-icache.c [new file with mode: 0644]
arch/mn10300/mm/cache-flush-mn10300.S [deleted file]
arch/mn10300/mm/cache-inv-by-reg.S [new file with mode: 0644]
arch/mn10300/mm/cache-inv-by-tag.S [new file with mode: 0644]
arch/mn10300/mm/cache-inv-icache.c [new file with mode: 0644]
arch/mn10300/mm/cache-mn10300.S [deleted file]
arch/mn10300/mm/cache-smp-flush.c [new file with mode: 0644]
arch/mn10300/mm/cache-smp-inv.c [new file with mode: 0644]
arch/mn10300/mm/cache-smp.c [new file with mode: 0644]
arch/mn10300/mm/cache-smp.h [new file with mode: 0644]
arch/mn10300/mm/cache.c
arch/mn10300/mm/fault.c
arch/mn10300/mm/init.c
arch/mn10300/mm/misalignment.c
arch/mn10300/mm/mmu-context.c
arch/mn10300/mm/pgtable.c
arch/mn10300/mm/tlb-mn10300.S
arch/mn10300/mm/tlb-smp.c [new file with mode: 0644]
arch/mn10300/proc-mn103e010/include/proc/cache.h
arch/mn10300/proc-mn103e010/include/proc/clock.h
arch/mn10300/proc-mn103e010/include/proc/dmactl-regs.h [new file with mode: 0644]
arch/mn10300/proc-mn103e010/include/proc/intctl-regs.h [new file with mode: 0644]
arch/mn10300/proc-mn103e010/include/proc/proc.h
arch/mn10300/proc-mn103e010/proc-init.c
arch/mn10300/proc-mn2ws0050/Makefile [new file with mode: 0644]
arch/mn10300/proc-mn2ws0050/include/proc/cache.h [new file with mode: 0644]
arch/mn10300/proc-mn2ws0050/include/proc/clock.h [new file with mode: 0644]
arch/mn10300/proc-mn2ws0050/include/proc/dmactl-regs.h [new file with mode: 0644]
arch/mn10300/proc-mn2ws0050/include/proc/intctl-regs.h [new file with mode: 0644]
arch/mn10300/proc-mn2ws0050/include/proc/irq.h [new file with mode: 0644]
arch/mn10300/proc-mn2ws0050/include/proc/nand-regs.h [new file with mode: 0644]
arch/mn10300/proc-mn2ws0050/include/proc/proc.h [new file with mode: 0644]
arch/mn10300/proc-mn2ws0050/include/proc/smp-regs.h [new file with mode: 0644]
arch/mn10300/proc-mn2ws0050/proc-init.c [new file with mode: 0644]
arch/mn10300/unit-asb2303/include/unit/clock.h
arch/mn10300/unit-asb2303/include/unit/serial.h
arch/mn10300/unit-asb2303/include/unit/timex.h
arch/mn10300/unit-asb2303/unit-init.c
arch/mn10300/unit-asb2305/include/unit/clock.h
arch/mn10300/unit-asb2305/include/unit/serial.h
arch/mn10300/unit-asb2305/include/unit/timex.h
arch/mn10300/unit-asb2305/pci-asb2305.c
arch/mn10300/unit-asb2305/pci.c
arch/mn10300/unit-asb2305/unit-init.c
arch/mn10300/unit-asb2364/Makefile [new file with mode: 0644]
arch/mn10300/unit-asb2364/include/unit/clock.h [new file with mode: 0644]
arch/mn10300/unit-asb2364/include/unit/fpga-regs.h [new file with mode: 0644]
arch/mn10300/unit-asb2364/include/unit/irq.h [new file with mode: 0644]
arch/mn10300/unit-asb2364/include/unit/leds.h [new file with mode: 0644]
arch/mn10300/unit-asb2364/include/unit/serial.h [new file with mode: 0644]
arch/mn10300/unit-asb2364/include/unit/smsc911x.h [new file with mode: 0644]
arch/mn10300/unit-asb2364/include/unit/timex.h [new file with mode: 0644]
arch/mn10300/unit-asb2364/irq-fpga.c [new file with mode: 0644]
arch/mn10300/unit-asb2364/leds.c [new file with mode: 0644]
arch/mn10300/unit-asb2364/smsc911x.c [new file with mode: 0644]
arch/mn10300/unit-asb2364/unit-init.c [new file with mode: 0644]
drivers/net/Kconfig
drivers/net/smsc911x.c
drivers/net/smsc911x.h
include/linux/smp.h
kernel/smp.c
mm/maccess.c

diff --git a/Kbuild b/Kbuild
index 431f7ca..b00037a 100644 (file)
--- a/Kbuild
+++ b/Kbuild
@@ -53,7 +53,7 @@ targets += arch/$(SRCARCH)/kernel/asm-offsets.s
 # Default sed regexp - multiline due to syntax constraints
 define sed-y
        "/^->/{s:->#\(.*\):/* \1 */:; \
-       s:^->\([^ ]*\) [\$$#]*\([-0-9]*\) \(.*\):#define \1 (\2) /* \3 */:; \
+       s:^->\([^ ]*\) [\$$#]*\([-0-9]*\) \(.*\):#define \1 \2 /* \3 */:; \
        s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \
        s:->::; p;}"
 endef
index debde01..1e6b6bd 100644 (file)
@@ -4448,7 +4448,7 @@ L:        platform-driver-x86@vger.kernel.org
 S:     Maintained
 F:     drivers/platform/x86/panasonic-laptop.c
 
-PANASONIC MN10300/AM33 PORT
+PANASONIC MN10300/AM33/AM34 PORT
 M:     David Howells <dhowells@redhat.com>
 M:     Koichi Yasutake <yasutake.koichi@jp.panasonic.com>
 L:     linux-am33-list@redhat.com (moderated for non-subscribers)
index 7c2a2f7..365766a 100644 (file)
@@ -9,8 +9,19 @@ config MN10300
        def_bool y
        select HAVE_OPROFILE
 
-config AM33
-       def_bool y
+config AM33_2
+       def_bool n
+
+config AM33_3
+       def_bool n
+
+config AM34_2
+       def_bool n
+       select MN10300_HAS_ATOMIC_OPS_UNIT
+       select MN10300_HAS_CACHE_SNOOP
+
+config ERRATUM_NEED_TO_RELOAD_MMUCTR
+       def_bool y if AM33_3 || AM34_2
 
 config MMU
        def_bool y
@@ -37,7 +48,7 @@ config GENERIC_CALIBRATE_DELAY
        def_bool y
 
 config GENERIC_CMOS_UPDATE
-        def_bool y
+        def_bool n
 
 config GENERIC_FIND_NEXT_BIT
        def_bool y
@@ -45,6 +56,27 @@ config GENERIC_FIND_NEXT_BIT
 config GENERIC_HWEIGHT
        def_bool y
 
+config GENERIC_TIME
+       def_bool y
+
+config GENERIC_CLOCKEVENTS
+       def_bool y
+
+config GENERIC_CLOCKEVENTS_BUILD
+       def_bool y
+       depends on GENERIC_CLOCKEVENTS
+
+config GENERIC_CLOCKEVENTS_BROADCAST
+       bool
+
+config CEVT_MN10300
+       def_bool y
+       depends on GENERIC_CLOCKEVENTS
+
+config CSRC_MN10300
+       def_bool y
+       depends on GENERIC_TIME
+
 config GENERIC_BUG
        def_bool y
 
@@ -61,18 +93,14 @@ config GENERIC_HARDIRQS
 config HOTPLUG_CPU
        def_bool n
 
-config HZ
-       int
-       default 1000
-
-mainmenu "Matsushita MN10300/AM33 Kernel Configuration"
+mainmenu "Panasonic MN10300/AM33 Kernel Configuration"
 
 source "init/Kconfig"
 
 source "kernel/Kconfig.freezer"
 
 
-menu "Matsushita MN10300 system setup"
+menu "Panasonic MN10300 system setup"
 
 choice
        prompt "Unit type"
@@ -87,6 +115,10 @@ config MN10300_UNIT_ASB2303
 config MN10300_UNIT_ASB2305
        bool "ASB2305"
 
+config MN10300_UNIT_ASB2364
+       bool "ASB2364"
+       select SMSC911X_ARCH_HOOKS if SMSC911X
+
 endchoice
 
 choice
@@ -99,57 +131,51 @@ choice
 config MN10300_PROC_MN103E010
        bool "MN103E010"
        depends on MN10300_UNIT_ASB2303 || MN10300_UNIT_ASB2305
+       select AM33_2
+       select MN10300_PROC_HAS_TTYSM0
+       select MN10300_PROC_HAS_TTYSM1
+       select MN10300_PROC_HAS_TTYSM2
+
+config MN10300_PROC_MN2WS0050
+       bool "MN2WS0050"
+       depends on MN10300_UNIT_ASB2364
+       select AM34_2
        select MN10300_PROC_HAS_TTYSM0
        select MN10300_PROC_HAS_TTYSM1
        select MN10300_PROC_HAS_TTYSM2
 
 endchoice
 
-choice
-       prompt "Processor core support"
-       default MN10300_CPU_AM33V2
+config MN10300_HAS_ATOMIC_OPS_UNIT
+       def_bool n
        help
-         This option specifies the processor core for which the kernel will be
-         compiled. It affects the instruction set used.
-
-config MN10300_CPU_AM33V2
-       bool "AM33v2"
-
-endchoice
+         This should be enabled if the processor has an atomic ops unit
+         capable of doing LL/SC equivalent operations.
 
 config FPU
        bool "FPU present"
        default y
-       depends on MN10300_PROC_MN103E010
+       depends on MN10300_PROC_MN103E010 || MN10300_PROC_MN2WS0050
 
-choice
-       prompt "CPU Caching mode"
-       default MN10300_CACHE_WBACK
+config LAZY_SAVE_FPU
+       bool "Save FPU state lazily"
+       default y
+       depends on FPU && !SMP
        help
-         This option determines the caching mode for the kernel.
-
-         Write-Back caching mode involves the all reads and writes causing
-         the affected cacheline to be read into the cache first before being
-         operated upon. Memory is not then updated by a write until the cache
-         is filled and a cacheline needs to be displaced from the cache to
-         make room. Only at that point is it written back.
-
-         Write-Through caching only fetches cachelines from memory on a
-         read. Writes always get written directly to memory. If the affected
-         cacheline is also in cache, it will be updated too.
-
-         The final option is to turn of caching entirely.
+         Enable this to be lazy in the saving of the FPU state to the owning
+         task's thread struct.  This is useful if most tasks on the system
+         don't use the FPU as only those tasks that use it will pass it
+         between them, and the state needn't be saved for a task that isn't
+         using it.
 
-config MN10300_CACHE_WBACK
-       bool "Write-Back"
+         This can't be so easily used on SMP as the process that owns the FPU
+         state on a CPU may be currently running on another CPU, so for the
+         moment, it is disabled.
 
-config MN10300_CACHE_WTHRU
-       bool "Write-Through"
+source "arch/mn10300/mm/Kconfig.cache"
 
-config MN10300_CACHE_DISABLED
-       bool "Disabled"
-
-endchoice
+config MN10300_TLB_USE_PIDR
+       def_bool y
 
 menu "Memory layout options"
 
@@ -170,24 +196,55 @@ config KERNEL_TEXT_ADDRESS
 
 config KERNEL_ZIMAGE_BASE_ADDRESS
        hex "Base address of compressed vmlinux image"
-       default "0x90700000"
+       default "0x50700000"
 
+config BOOT_STACK_OFFSET
+       hex
+       default "0xF00" if SMP
+       default "0xFF0" if !SMP
+
+config BOOT_STACK_SIZE
+       hex
+       depends on SMP
+       default "0x100"
 endmenu
 
-config PREEMPT
-       bool "Preemptible Kernel"
-       help
-         This option reduces the latency of the kernel when reacting to
-         real-time or interactive events by allowing a low priority process to
-         be preempted even if it is in kernel mode executing a system call.
-         This allows applications to run more reliably even when the system is
-         under load.
+config SMP
+       bool "Symmetric multi-processing support"
+       default y
+       depends on MN10300_PROC_MN2WS0038 || MN10300_PROC_MN2WS0050
+       ---help---
+         This enables support for systems with more than one CPU. If you have
+         a system with only one CPU, like most personal computers, say N. If
+         you have a system with more than one CPU, say Y.
+
+         If you say N here, the kernel will run on single and multiprocessor
+         machines, but will use only one CPU of a multiprocessor machine. If
+         you say Y here, the kernel will run on many, but not all,
+         singleprocessor machines. On a singleprocessor machine, the kernel
+         will run faster if you say N here.
 
-         Say Y here if you are building a kernel for a desktop, embedded
-         or real-time system.  Say N if you are unsure.
+         See also <file:Documentation/i386/IO-APIC.txt>,
+         <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
+         <http://www.tldp.org/docs.html#howto>.
+
+         If you don't know what to do here, say N.
+
+config NR_CPUS
+       int
+       depends on SMP
+       default "2"
+
+config USE_GENERIC_SMP_HELPERS
+       bool
+       depends on SMP
+       default y
+
+source "kernel/Kconfig.preempt"
 
 config MN10300_CURRENT_IN_E2
        bool "Hold current task address in E2 register"
+       depends on !SMP
        default y
        help
          This option removes the E2/R2 register from the set available to gcc
@@ -209,12 +266,15 @@ config MN10300_USING_JTAG
          suppresses the use of certain hardware debugging features, such as
          single-stepping, which are taken over completely by the JTAG unit.
 
+source "kernel/Kconfig.hz"
+source "kernel/time/Kconfig"
+
 config MN10300_RTC
        bool "Using MN10300 RTC"
-       depends on MN10300_PROC_MN103E010
+       depends on MN10300_PROC_MN103E010 || MN10300_PROC_MN2WS0050
+       select GENERIC_CMOS_UPDATE
        default n
        help
-
          This option enables support for the RTC, thus enabling time to be
          tracked, even when system is powered down. This is available on-chip
          on the MN103E010.
@@ -306,14 +366,23 @@ config MN10300_TTYSM1
 
 choice
        prompt "Select the timer to supply the clock for SIF1"
-       default MN10300_TTYSM0_TIMER9
+       default MN10300_TTYSM1_TIMER12 \
+               if !(AM33_2 || AM33_3)
+       default MN10300_TTYSM1_TIMER9 \
+               if AM33_2 || AM33_3
        depends on MN10300_TTYSM1
 
+config MN10300_TTYSM1_TIMER12
+       bool "Use timer 12 (16-bit)"
+       depends on !(AM33_2 || AM33_3)
+
 config MN10300_TTYSM1_TIMER9
        bool "Use timer 9 (16-bit)"
+       depends on AM33_2 || AM33_3
 
 config MN10300_TTYSM1_TIMER3
        bool "Use timer 3 (8-bit)"
+       depends on AM33_2 || AM33_3
 
 endchoice
 
@@ -328,17 +397,107 @@ config MN10300_TTYSM2
 
 choice
        prompt "Select the timer to supply the clock for SIF2"
-       default MN10300_TTYSM0_TIMER10
+       default MN10300_TTYSM2_TIMER3 \
+               if !(AM33_2 || AM33_3)
+       default MN10300_TTYSM2_TIMER10 \
+               if AM33_2 || AM33_3
        depends on MN10300_TTYSM2
 
+config MN10300_TTYSM2_TIMER9
+       bool "Use timer 9 (16-bit)"
+       depends on !(AM33_2 || AM33_3)
+
+config MN10300_TTYSM2_TIMER1
+       bool "Use timer 1 (8-bit)"
+       depends on !(AM33_2 || AM33_3)
+
+config MN10300_TTYSM2_TIMER3
+       bool "Use timer 3 (8-bit)"
+       depends on !(AM33_2 || AM33_3)
+
 config MN10300_TTYSM2_TIMER10
        bool "Use timer 10 (16-bit)"
+       depends on AM33_2 || AM33_3
 
 endchoice
 
 config MN10300_TTYSM2_CTS
        bool "Enable the use of the CTS line /dev/ttySM2"
-       depends on MN10300_TTYSM2
+       depends on MN10300_TTYSM2 && AM33_2
+
+endmenu
+
+menu "Interrupt request priority options"
+
+comment "[!] NOTE: A lower number/level indicates a higher priority (0 is highest, 6 is lowest)"
+
+comment "____Non-maskable interrupt levels____"
+comment "The following must be set to a higher priority than local_irq_disable() and on-chip serial"
+
+config GDBSTUB_IRQ_LEVEL
+       int "GDBSTUB interrupt priority"
+       depends on GDBSTUB
+       range 0 1 if LINUX_CLI_LEVEL = 2
+       range 0 2 if LINUX_CLI_LEVEL = 3
+       range 0 3 if LINUX_CLI_LEVEL = 4
+       range 0 4 if LINUX_CLI_LEVEL = 5
+       range 0 5 if LINUX_CLI_LEVEL = 6
+       default 0
+
+comment "The following must be set to a higher priority than local_irq_disable()"
+
+config MN10300_SERIAL_IRQ_LEVEL
+       int "MN10300 on-chip serial interrupt priority"
+       depends on MN10300_TTYSM
+       range 1 1 if LINUX_CLI_LEVEL = 2
+       range 1 2 if LINUX_CLI_LEVEL = 3
+       range 1 3 if LINUX_CLI_LEVEL = 4
+       range 1 4 if LINUX_CLI_LEVEL = 5
+       range 1 5 if LINUX_CLI_LEVEL = 6
+       default 1
+
+comment "-"
+comment "____Maskable interrupt levels____"
+
+config LINUX_CLI_LEVEL
+       int "The highest interrupt priority excluded by local_irq_disable() (2-6)"
+       range 2 6
+       default 2
+       help
+         local_irq_disable() doesn't actually disable maskable interrupts -
+         what it does is restrict the levels of interrupt which are permitted
+         (a lower level indicates a higher priority) by lowering the value in
+         EPSW.IM from 7.  Any interrupt is permitted for which the level is
+         lower than EPSW.IM.
+
+         Certain interrupts, such as GDBSTUB and virtual MN10300 on-chip
+         serial DMA interrupts are allowed to interrupt normal disabled
+         sections.
+
+comment "The following must be set to a equal to or lower priority than LINUX_CLI_LEVEL"
+
+config TIMER_IRQ_LEVEL
+       int "Kernel timer interrupt priority"
+       range LINUX_CLI_LEVEL 6
+       default 4
+
+config PCI_IRQ_LEVEL
+       int "PCI interrupt priority"
+       depends on PCI
+       range LINUX_CLI_LEVEL 6
+       default 5
+
+config ETHERNET_IRQ_LEVEL
+       int "Ethernet interrupt priority"
+       depends on SMC91X || SMC911X || SMSC911X
+       range LINUX_CLI_LEVEL 6
+       default 6
+
+config EXT_SERIAL_IRQ_LEVEL
+       int "External serial port interrupt priority"
+       depends on SERIAL_8250
+       range LINUX_CLI_LEVEL 6
+       default 6
 
 endmenu
 
index ac5c6bd..7120282 100644 (file)
@@ -36,6 +36,9 @@ endif
 ifeq ($(CONFIG_MN10300_PROC_MN103E010),y)
 PROCESSOR      := mn103e010
 endif
+ifeq ($(CONFIG_MN10300_PROC_MN2WS0050),y)
+PROCESSOR      := mn2ws0050
+endif
 
 ifeq ($(CONFIG_MN10300_UNIT_ASB2303),y)
 UNIT           := asb2303
@@ -43,6 +46,9 @@ endif
 ifeq ($(CONFIG_MN10300_UNIT_ASB2305),y)
 UNIT           := asb2305
 endif
+ifeq ($(CONFIG_MN10300_UNIT_ASB2364),y)
+UNIT           := asb2364
+endif
 
 
 head-y         := arch/mn10300/kernel/head.o arch/mn10300/kernel/init_task.o
index 502e1eb..7b50345 100644 (file)
 
 #include <linux/linkage.h>
 #include <asm/cpu-regs.h>
+#include <asm/cache.h>
+#ifdef CONFIG_SMP
+#include <proc/smp-regs.h>
+#endif
 
        .globl startup_32
 startup_32:
-       # first save off parameters from bootloader
+#ifdef CONFIG_SMP
+       #
+       # Secondary CPUs jump directly to the kernel entry point
+       #
+       # Must save primary CPU's D0-D2 registers as they hold boot parameters
+       #
+       mov     (CPUID), d3
+       and     CPUID_MASK,d3
+       beq     startup_primary
+       mov     CONFIG_KERNEL_TEXT_ADDRESS,a0
+       jmp     (a0)
+
+startup_primary:
+#endif /* CONFIG_SMP */
+
+       # first save parameters from bootloader
        mov     param_save_area,a0
        mov     d0,(a0)
        mov     d1,(4,a0)
@@ -37,8 +56,15 @@ startup_32:
        mov     (a0),d0
        btst    CHCTR_ICBUSY|CHCTR_DCBUSY,d0            # wait till not busy
        lne
-       mov     CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD,d0   # writethru dcache
+
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+#ifdef CONFIG_MN10300_CACHE_WBACK
+       mov     CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK,d0
+#else
+       mov     CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRTHROUGH,d0
+#endif /* WBACK */
        movhu   d0,(a0)                                 # enable
+#endif /* !ENABLED */
 
        # clear the BSS area
        mov     __bss_start,a0
@@ -54,6 +80,9 @@ bssclear_end:
 
        # decompress the kernel
        call    decompress_kernel[],0
+#ifdef CONFIG_MN10300_CACHE_WBACK
+       call    mn10300_dcache_flush_inv[],0
+#endif
 
        # disable caches again
        mov     CHCTR,a0
@@ -69,10 +98,46 @@ bssclear_end:
        mov     (4,a0),d1
        mov     (8,a0),d2
 
+       # jump to the kernel proper entry point
        mov     a3,sp
        mov     CONFIG_KERNEL_TEXT_ADDRESS,a0
        jmp     (a0)
 
+
+###############################################################################
+#
+# Cache flush routines
+#
+###############################################################################
+#ifdef CONFIG_MN10300_CACHE_WBACK
+mn10300_dcache_flush_inv:
+       movhu   (CHCTR),d0
+       btst    CHCTR_DCEN,d0
+       beq     mn10300_dcache_flush_inv_end
+
+       mov     L1_CACHE_NENTRIES,d1
+       clr     a1
+
+mn10300_dcache_flush_inv_loop:
+       mov     (DCACHE_PURGE_WAY0(0),a1),d0    # unconditional purge
+       mov     (DCACHE_PURGE_WAY1(0),a1),d0    # unconditional purge
+       mov     (DCACHE_PURGE_WAY2(0),a1),d0    # unconditional purge
+       mov     (DCACHE_PURGE_WAY3(0),a1),d0    # unconditional purge
+
+       add     L1_CACHE_BYTES,a1
+       add     -1,d1
+       bne     mn10300_dcache_flush_inv_loop
+
+mn10300_dcache_flush_inv_end:
+       ret     [],0
+#endif /* CONFIG_MN10300_CACHE_WBACK */
+
+
+###############################################################################
+#
+# Data areas
+#
+###############################################################################
        .data
        .align          4
 param_save_area:
index d80dfcb..3f749b6 100644 (file)
@@ -12,6 +12,8 @@ CONFIG_SLAB=y
 CONFIG_PROFILING=y
 # CONFIG_BLOCK is not set
 CONFIG_PREEMPT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
 CONFIG_MN10300_RTC=y
 CONFIG_MN10300_TTYSM_CONSOLE=y
 CONFIG_MN10300_TTYSM0=y
diff --git a/arch/mn10300/configs/asb2364_defconfig b/arch/mn10300/configs/asb2364_defconfig
new file mode 100644 (file)
index 0000000..83ce2f2
--- /dev/null
@@ -0,0 +1,98 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_NS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_RELAY=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_EMBEDDED=y
+# CONFIG_KALLSYMS is not set
+# CONFIG_VM_EVENT_COUNTERS is not set
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLOCK is not set
+CONFIG_MN10300_UNIT_ASB2364=y
+CONFIG_PREEMPT=y
+# CONFIG_MN10300_USING_JTAG is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_MN10300_TTYSM_CONSOLE=y
+CONFIG_MN10300_TTYSM0=y
+CONFIG_MN10300_TTYSM0_TIMER2=y
+CONFIG_MN10300_TTYSM1=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_IPV6=y
+# CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET6_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET6_XFRM_MODE_BEET is not set
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_CONNECTOR=y
+CONFIG_MTD=y
+CONFIG_MTD_DEBUG=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_REDBOOT_PARTS=y
+CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_GEOMETRY=y
+CONFIG_MTD_CFI_I4=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_NETDEVICES=y
+CONFIG_NET_ETHERNET=y
+CONFIG_SMSC911X=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_PROC_KCORE=y
+# CONFIG_PROC_PAGE_MONITOR is not set
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_JFFS2_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_ROOT_NFS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
index f0cc1f8..92d2f92 100644 (file)
@@ -1 +1,351 @@
+/* MN10300 Atomic counter operations
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#ifndef _ASM_ATOMIC_H
+#define _ASM_ATOMIC_H
+
+#include <asm/irqflags.h>
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_SMP
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+static inline
+unsigned long __xchg(volatile unsigned long *m, unsigned long val)
+{
+       unsigned long status;
+       unsigned long oldval;
+
+       asm volatile(
+               "1:     mov     %4,(_AAR,%3)    \n"
+               "       mov     (_ADR,%3),%1    \n"
+               "       mov     %5,(_ADR,%3)    \n"
+               "       mov     (_ADR,%3),%0    \n"     /* flush */
+               "       mov     (_ASR,%3),%0    \n"
+               "       or      %0,%0           \n"
+               "       bne     1b              \n"
+               : "=&r"(status), "=&r"(oldval), "=m"(*m)
+               : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m), "r"(val)
+               : "memory", "cc");
+
+       return oldval;
+}
+
+static inline unsigned long __cmpxchg(volatile unsigned long *m,
+                                     unsigned long old, unsigned long new)
+{
+       unsigned long status;
+       unsigned long oldval;
+
+       asm volatile(
+               "1:     mov     %4,(_AAR,%3)    \n"
+               "       mov     (_ADR,%3),%1    \n"
+               "       cmp     %5,%1           \n"
+               "       bne     2f              \n"
+               "       mov     %6,(_ADR,%3)    \n"
+               "2:     mov     (_ADR,%3),%0    \n"     /* flush */
+               "       mov     (_ASR,%3),%0    \n"
+               "       or      %0,%0           \n"
+               "       bne     1b              \n"
+               : "=&r"(status), "=&r"(oldval), "=m"(*m)
+               : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m),
+                 "r"(old), "r"(new)
+               : "memory", "cc");
+
+       return oldval;
+}
+#else  /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */
+#error "No SMP atomic operation support!"
+#endif /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */
+
+#else  /* CONFIG_SMP */
+
+/*
+ * Emulate xchg for non-SMP MN10300
+ */
+struct __xchg_dummy { unsigned long a[100]; };
+#define __xg(x) ((struct __xchg_dummy *)(x))
+
+static inline
+unsigned long __xchg(volatile unsigned long *m, unsigned long val)
+{
+       unsigned long oldval;
+       unsigned long flags;
+
+       flags = arch_local_cli_save();
+       oldval = *m;
+       *m = val;
+       arch_local_irq_restore(flags);
+       return oldval;
+}
+
+/*
+ * Emulate cmpxchg for non-SMP MN10300
+ */
+static inline unsigned long __cmpxchg(volatile unsigned long *m,
+                                     unsigned long old, unsigned long new)
+{
+       unsigned long oldval;
+       unsigned long flags;
+
+       flags = arch_local_cli_save();
+       oldval = *m;
+       if (oldval == old)
+               *m = new;
+       arch_local_irq_restore(flags);
+       return oldval;
+}
+
+#endif /* CONFIG_SMP */
+
+#define xchg(ptr, v)                                           \
+       ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr),    \
+                                    (unsigned long)(v)))
+
+#define cmpxchg(ptr, o, n)                                     \
+       ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \
+                                       (unsigned long)(o),     \
+                                       (unsigned long)(n)))
+
+#define atomic_xchg(ptr, v)            (xchg(&(ptr)->counter, (v)))
+#define atomic_cmpxchg(v, old, new)    (cmpxchg(&((v)->counter), (old), (new)))
+
+#endif /* !__ASSEMBLY__ */
+
+#ifndef CONFIG_SMP
 #include <asm-generic/atomic.h>
+#else
+
+/*
+ * Atomic operations that C can't guarantee us.  Useful for
+ * resource counting etc..
+ */
+
+#define ATOMIC_INIT(i) { (i) }
+
+#ifdef __KERNEL__
+
+/**
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v.  Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+#define atomic_read(v) ((v)->counter)
+
+/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i.  Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+#define atomic_set(v, i) (((v)->counter) = (i))
+
+/**
+ * atomic_add_return - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v and returns the result
+ * Note that the guaranteed useful range of an atomic_t is only 24 bits.
+ */
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+       int retval;
+#ifdef CONFIG_SMP
+       int status;
+
+       asm volatile(
+               "1:     mov     %4,(_AAR,%3)    \n"
+               "       mov     (_ADR,%3),%1    \n"
+               "       add     %5,%1           \n"
+               "       mov     %1,(_ADR,%3)    \n"
+               "       mov     (_ADR,%3),%0    \n"     /* flush */
+               "       mov     (_ASR,%3),%0    \n"
+               "       or      %0,%0           \n"
+               "       bne     1b              \n"
+               : "=&r"(status), "=&r"(retval), "=m"(v->counter)
+               : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
+               : "memory", "cc");
+
+#else
+       unsigned long flags;
+
+       flags = arch_local_cli_save();
+       retval = v->counter;
+       retval += i;
+       v->counter = retval;
+       arch_local_irq_restore(flags);
+#endif
+       return retval;
+}
+
+/**
+ * atomic_sub_return - subtract integer from atomic variable
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v and returns the result
+ * Note that the guaranteed useful range of an atomic_t is only 24 bits.
+ */
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+       int retval;
+#ifdef CONFIG_SMP
+       int status;
+
+       asm volatile(
+               "1:     mov     %4,(_AAR,%3)    \n"
+               "       mov     (_ADR,%3),%1    \n"
+               "       sub     %5,%1           \n"
+               "       mov     %1,(_ADR,%3)    \n"
+               "       mov     (_ADR,%3),%0    \n"     /* flush */
+               "       mov     (_ASR,%3),%0    \n"
+               "       or      %0,%0           \n"
+               "       bne     1b              \n"
+               : "=&r"(status), "=&r"(retval), "=m"(v->counter)
+               : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
+               : "memory", "cc");
+
+#else
+       unsigned long flags;
+       flags = arch_local_cli_save();
+       retval = v->counter;
+       retval -= i;
+       v->counter = retval;
+       arch_local_irq_restore(flags);
+#endif
+       return retval;
+}
+
+static inline int atomic_add_negative(int i, atomic_t *v)
+{
+       return atomic_add_return(i, v) < 0;
+}
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+       atomic_add_return(i, v);
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+       atomic_sub_return(i, v);
+}
+
+static inline void atomic_inc(atomic_t *v)
+{
+       atomic_add_return(1, v);
+}
+
+static inline void atomic_dec(atomic_t *v)
+{
+       atomic_sub_return(1, v);
+}
+
+#define atomic_dec_return(v)           atomic_sub_return(1, (v))
+#define atomic_inc_return(v)           atomic_add_return(1, (v))
+
+#define atomic_sub_and_test(i, v)      (atomic_sub_return((i), (v)) == 0)
+#define atomic_dec_and_test(v)         (atomic_sub_return(1, (v)) == 0)
+#define atomic_inc_and_test(v)         (atomic_add_return(1, (v)) == 0)
+
+#define atomic_add_unless(v, a, u)                             \
+({                                                             \
+       int c, old;                                             \
+       c = atomic_read(v);                                     \
+       while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+               c = old;                                        \
+       c != (u);                                               \
+})
+
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
+/**
+ * atomic_clear_mask - Atomically clear bits in memory
+ * @mask: Mask of the bits to be cleared
+ * @v: pointer to word in memory
+ *
+ * Atomically clears the bits set in mask from the memory word specified.
+ */
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+#ifdef CONFIG_SMP
+       int status;
+
+       asm volatile(
+               "1:     mov     %3,(_AAR,%2)    \n"
+               "       mov     (_ADR,%2),%0    \n"
+               "       and     %4,%0           \n"
+               "       mov     %0,(_ADR,%2)    \n"
+               "       mov     (_ADR,%2),%0    \n"     /* flush */
+               "       mov     (_ASR,%2),%0    \n"
+               "       or      %0,%0           \n"
+               "       bne     1b              \n"
+               : "=&r"(status), "=m"(*addr)
+               : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(~mask)
+               : "memory", "cc");
+#else
+       unsigned long flags;
+
+       mask = ~mask;
+       flags = arch_local_cli_save();
+       *addr &= mask;
+       arch_local_irq_restore(flags);
+#endif
+}
+
+/**
+ * atomic_set_mask - Atomically set bits in memory
+ * @mask: Mask of the bits to be set
+ * @v: pointer to word in memory
+ *
+ * Atomically sets the bits set in mask from the memory word specified.
+ */
+static inline void atomic_set_mask(unsigned long mask, unsigned long *addr)
+{
+#ifdef CONFIG_SMP
+       int status;
+
+       asm volatile(
+               "1:     mov     %3,(_AAR,%2)    \n"
+               "       mov     (_ADR,%2),%0    \n"
+               "       or      %4,%0           \n"
+               "       mov     %0,(_ADR,%2)    \n"
+               "       mov     (_ADR,%2),%0    \n"     /* flush */
+               "       mov     (_ASR,%2),%0    \n"
+               "       or      %0,%0           \n"
+               "       bne     1b              \n"
+               : "=&r"(status), "=m"(*addr)
+               : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(mask)
+               : "memory", "cc");
+#else
+       unsigned long flags;
+
+       flags = arch_local_cli_save();
+       *addr |= mask;
+       arch_local_irq_restore(flags);
+#endif
+}
+
+/* Atomic operations are already serializing on MN10300??? */
+#define smp_mb__before_atomic_dec()    barrier()
+#define smp_mb__after_atomic_dec()     barrier()
+#define smp_mb__before_atomic_inc()    barrier()
+#define smp_mb__after_atomic_inc()     barrier()
+
+#include <asm-generic/atomic-long.h>
+
+#endif /* __KERNEL__ */
+#endif /* CONFIG_SMP */
+#endif /* _ASM_ATOMIC_H */
index 3f50e96..3b8a868 100644 (file)
@@ -57,7 +57,7 @@
 #define clear_bit(nr, addr) ___clear_bit((nr), (addr))
 
 
-static inline void __clear_bit(int nr, volatile void *addr)
+static inline void __clear_bit(unsigned long nr, volatile void *addr)
 {
        unsigned int *a = (unsigned int *) addr;
        int mask;
@@ -70,15 +70,15 @@ static inline void __clear_bit(int nr, volatile void *addr)
 /*
  * test bit
  */
-static inline int test_bit(int nr, const volatile void *addr)
+static inline int test_bit(unsigned long nr, const volatile void *addr)
 {
-       return 1UL & (((const unsigned int *) addr)[nr >> 5] >> (nr & 31));
+       return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31));
 }
 
 /*
  * change bit
  */
-static inline void __change_bit(int nr, volatile void *addr)
+static inline void __change_bit(unsigned long nr, volatile void *addr)
 {
        int     mask;
        unsigned int *a = (unsigned int *) addr;
@@ -88,7 +88,7 @@ static inline void __change_bit(int nr, volatile void *addr)
        *a ^= mask;
 }
 
-extern void change_bit(int nr, volatile void *addr);
+extern void change_bit(unsigned long nr, volatile void *addr);
 
 /*
  * test and set bit
@@ -135,7 +135,7 @@ extern void change_bit(int nr, volatile void *addr);
 /*
  * test and change bit
  */
-static inline int __test_and_change_bit(int nr, volatile void *addr)
+static inline int __test_and_change_bit(unsigned long nr, volatile void *addr)
 {
        int     mask, retval;
        unsigned int *a = (unsigned int *)addr;
@@ -148,7 +148,7 @@ static inline int __test_and_change_bit(int nr, volatile void *addr)
        return retval;
 }
 
-extern int test_and_change_bit(int nr, volatile void *addr);
+extern int test_and_change_bit(unsigned long nr, volatile void *addr);
 
 #include <asm-generic/bitops/lock.h>
 
index 781bf61..f29cde2 100644 (file)
 
 /* instruction cache access registers */
 #define ICACHE_DATA(WAY, ENTRY, OFF) \
-       __SYSREG(0xc8000000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10 + (OFF) * 4, u32)
+       __SYSREG(0xc8000000 + (WAY) * L1_CACHE_WAYDISP + \
+               (ENTRY) * L1_CACHE_BYTES + (OFF) * 4, u32)
 #define ICACHE_TAG(WAY, ENTRY)  \
-       __SYSREG(0xc8100000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10, u32)
+       __SYSREG(0xc8100000 + (WAY) * L1_CACHE_WAYDISP + \
+               (ENTRY) * L1_CACHE_BYTES, u32)
 
-/* instruction cache access registers */
+/* data cache access registers */
 #define DCACHE_DATA(WAY, ENTRY, OFF) \
-       __SYSREG(0xc8200000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10 + (OFF) * 4, u32)
+       __SYSREG(0xc8200000 + (WAY) * L1_CACHE_WAYDISP + \
+               (ENTRY) * L1_CACHE_BYTES + (OFF) * 4, u32)
 #define DCACHE_TAG(WAY, ENTRY)  \
-       __SYSREG(0xc8300000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10, u32)
+       __SYSREG(0xc8300000 + (WAY) * L1_CACHE_WAYDISP + \
+               (ENTRY) * L1_CACHE_BYTES, u32)
 
 #endif /* _ASM_CACHE_H */
index 29e692f..faed902 100644 (file)
 #include <linux/mm.h>
 
 /*
- * virtually-indexed cache management (our cache is physically indexed)
+ * Primitive routines
  */
-#define flush_cache_all()                      do {} while (0)
-#define flush_cache_mm(mm)                     do {} while (0)
-#define flush_cache_dup_mm(mm)                 do {} while (0)
-#define flush_cache_range(mm, start, end)      do {} while (0)
-#define flush_cache_page(vma, vmaddr, pfn)     do {} while (0)
-#define flush_cache_vmap(start, end)           do {} while (0)
-#define flush_cache_vunmap(start, end)         do {} while (0)
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page)                        do {} while (0)
-#define flush_dcache_mmap_lock(mapping)                do {} while (0)
-#define flush_dcache_mmap_unlock(mapping)      do {} while (0)
-
-/*
- * physically-indexed cache management
- */
-#ifndef CONFIG_MN10300_CACHE_DISABLED
-
-extern void flush_icache_range(unsigned long start, unsigned long end);
-extern void flush_icache_page(struct vm_area_struct *vma, struct page *pg);
-
-#else
-
-#define flush_icache_range(start, end)         do {} while (0)
-#define flush_icache_page(vma, pg)             do {} while (0)
-
-#endif
-
-#define flush_icache_user_range(vma, pg, adr, len) \
-       flush_icache_range(adr, adr + len)
-
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-       do {                                    \
-               memcpy(dst, src, len);          \
-               flush_icache_page(vma, page);   \
-       } while (0)
-
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
-       memcpy(dst, src, len)
-
-/*
- * primitive routines
- */
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+extern void mn10300_local_icache_inv(void);
+extern void mn10300_local_icache_inv_page(unsigned long start);
+extern void mn10300_local_icache_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_local_icache_inv_range2(unsigned long start, unsigned long size);
+extern void mn10300_local_dcache_inv(void);
+extern void mn10300_local_dcache_inv_page(unsigned long start);
+extern void mn10300_local_dcache_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_local_dcache_inv_range2(unsigned long start, unsigned long size);
 extern void mn10300_icache_inv(void);
+extern void mn10300_icache_inv_page(unsigned long start);
+extern void mn10300_icache_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_icache_inv_range2(unsigned long start, unsigned long size);
 extern void mn10300_dcache_inv(void);
-extern void mn10300_dcache_inv_page(unsigned start);
-extern void mn10300_dcache_inv_range(unsigned start, unsigned end);
-extern void mn10300_dcache_inv_range2(unsigned start, unsigned size);
+extern void mn10300_dcache_inv_page(unsigned long start);
+extern void mn10300_dcache_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_dcache_inv_range2(unsigned long start, unsigned long size);
 #ifdef CONFIG_MN10300_CACHE_WBACK
+extern void mn10300_local_dcache_flush(void);
+extern void mn10300_local_dcache_flush_page(unsigned long start);
+extern void mn10300_local_dcache_flush_range(unsigned long start, unsigned long end);
+extern void mn10300_local_dcache_flush_range2(unsigned long start, unsigned long size);
+extern void mn10300_local_dcache_flush_inv(void);
+extern void mn10300_local_dcache_flush_inv_page(unsigned long start);
+extern void mn10300_local_dcache_flush_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_local_dcache_flush_inv_range2(unsigned long start, unsigned long size);
 extern void mn10300_dcache_flush(void);
-extern void mn10300_dcache_flush_page(unsigned start);
-extern void mn10300_dcache_flush_range(unsigned start, unsigned end);
-extern void mn10300_dcache_flush_range2(unsigned start, unsigned size);
+extern void mn10300_dcache_flush_page(unsigned long start);
+extern void mn10300_dcache_flush_range(unsigned long start, unsigned long end);
+extern void mn10300_dcache_flush_range2(unsigned long start, unsigned long size);
 extern void mn10300_dcache_flush_inv(void);
-extern void mn10300_dcache_flush_inv_page(unsigned start);
-extern void mn10300_dcache_flush_inv_range(unsigned start, unsigned end);
-extern void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size);
+extern void mn10300_dcache_flush_inv_page(unsigned long start);
+extern void mn10300_dcache_flush_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_dcache_flush_inv_range2(unsigned long start, unsigned long size);
 #else
+#define mn10300_local_dcache_flush()                   do {} while (0)
+#define mn10300_local_dcache_flush_page(start)         do {} while (0)
+#define mn10300_local_dcache_flush_range(start, end)   do {} while (0)
+#define mn10300_local_dcache_flush_range2(start, size) do {} while (0)
+#define mn10300_local_dcache_flush_inv() \
+               mn10300_local_dcache_inv()
+#define mn10300_local_dcache_flush_inv_page(start) \
+               mn10300_local_dcache_inv_page(start)
+#define mn10300_local_dcache_flush_inv_range(start, end) \
+               mn10300_local_dcache_inv_range(start, end)
+#define mn10300_local_dcache_flush_inv_range2(start, size) \
+               mn10300_local_dcache_inv_range2(start, size)
 #define mn10300_dcache_flush()                         do {} while (0)
 #define mn10300_dcache_flush_page(start)               do {} while (0)
 #define mn10300_dcache_flush_range(start, end)         do {} while (0)
@@ -90,7 +79,26 @@ extern void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size);
        mn10300_dcache_inv_range2((start), (size))
 #endif /* CONFIG_MN10300_CACHE_WBACK */
 #else
+#define mn10300_local_icache_inv()                     do {} while (0)
+#define mn10300_local_icache_inv_page(start)           do {} while (0)
+#define mn10300_local_icache_inv_range(start, end)     do {} while (0)
+#define mn10300_local_icache_inv_range2(start, size)   do {} while (0)
+#define mn10300_local_dcache_inv()                     do {} while (0)
+#define mn10300_local_dcache_inv_page(start)           do {} while (0)
+#define mn10300_local_dcache_inv_range(start, end)     do {} while (0)
+#define mn10300_local_dcache_inv_range2(start, size)   do {} while (0)
+#define mn10300_local_dcache_flush()                   do {} while (0)
+#define mn10300_local_dcache_flush_inv_page(start)     do {} while (0)
+#define mn10300_local_dcache_flush_inv()               do {} while (0)
+#define mn10300_local_dcache_flush_inv_range(start, end)do {} while (0)
+#define mn10300_local_dcache_flush_inv_range2(start, size) do {} while (0)
+#define mn10300_local_dcache_flush_page(start)         do {} while (0)
+#define mn10300_local_dcache_flush_range(start, end)   do {} while (0)
+#define mn10300_local_dcache_flush_range2(start, size) do {} while (0)
 #define mn10300_icache_inv()                           do {} while (0)
+#define mn10300_icache_inv_page(start)                 do {} while (0)
+#define mn10300_icache_inv_range(start, end)           do {} while (0)
+#define mn10300_icache_inv_range2(start, size)         do {} while (0)
 #define mn10300_dcache_inv()                           do {} while (0)
 #define mn10300_dcache_inv_page(start)                 do {} while (0)
 #define mn10300_dcache_inv_range(start, end)           do {} while (0)
@@ -103,10 +111,56 @@ extern void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size);
 #define mn10300_dcache_flush_page(start)               do {} while (0)
 #define mn10300_dcache_flush_range(start, end)         do {} while (0)
 #define mn10300_dcache_flush_range2(start, size)       do {} while (0)
-#endif /* CONFIG_MN10300_CACHE_DISABLED */
+#endif /* CONFIG_MN10300_CACHE_ENABLED */
+
+/*
+ * Virtually-indexed cache management (our cache is physically indexed)
+ */
+#define flush_cache_all()                      do {} while (0)
+#define flush_cache_mm(mm)                     do {} while (0)
+#define flush_cache_dup_mm(mm)                 do {} while (0)
+#define flush_cache_range(mm, start, end)      do {} while (0)
+#define flush_cache_page(vma, vmaddr, pfn)     do {} while (0)
+#define flush_cache_vmap(start, end)           do {} while (0)
+#define flush_cache_vunmap(start, end)         do {} while (0)
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+#define flush_dcache_page(page)                        do {} while (0)
+#define flush_dcache_mmap_lock(mapping)                do {} while (0)
+#define flush_dcache_mmap_unlock(mapping)      do {} while (0)
+
+/*
+ * Physically-indexed cache management
+ */
+#if defined(CONFIG_MN10300_CACHE_FLUSH_ICACHE)
+extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
+extern void flush_icache_range(unsigned long start, unsigned long end);
+#elif defined(CONFIG_MN10300_CACHE_INV_ICACHE)
+static inline void flush_icache_page(struct vm_area_struct *vma,
+                                    struct page *page)
+{
+       mn10300_icache_inv_page(page_to_phys(page));
+}
+extern void flush_icache_range(unsigned long start, unsigned long end);
+#else
+#define flush_icache_range(start, end)         do {} while (0)
+#define flush_icache_page(vma, pg)             do {} while (0)
+#endif
+
+
+#define flush_icache_user_range(vma, pg, adr, len) \
+       flush_icache_range(adr, adr + len)
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+       do {                                    \
+               memcpy(dst, src, len);          \
+               flush_icache_page(vma, page);   \
+       } while (0)
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+       memcpy(dst, src, len)
 
 /*
- * internal debugging function
+ * Internal debugging function
  */
 #ifdef CONFIG_DEBUG_PAGEALLOC
 extern void kernel_map_pages(struct page *page, int numpages, int enable);
index 757e9b5..90ed4a3 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/types.h>
 #endif
 
-#ifdef CONFIG_MN10300_CPU_AM33V2
 /* we tell the compiler to pretend to be AM33 so that it doesn't try and use
  * the FP regs, but tell the assembler that we're actually allowed AM33v2
  * instructions */
@@ -24,7 +23,6 @@ asm(" .am33_2\n");
 #else
 .am33_2
 #endif
-#endif
 
 #ifdef __KERNEL__
 
@@ -58,6 +56,9 @@ asm(" .am33_2\n");
 #define EPSW_nAR               0x00040000      /* register bank control */
 #define EPSW_ML                        0x00080000      /* monitor level */
 #define EPSW_FE                        0x00100000      /* FPU enable */
+#define EPSW_IM_SHIFT          8               /* EPSW_IM_SHIFT determines the interrupt mode */
+
+#define NUM2EPSW_IM(num)       ((num) << EPSW_IM_SHIFT)
 
 /* FPU registers */
 #define FPCR_EF_I              0x00000001      /* inexact result FPU exception flag */
@@ -99,9 +100,11 @@ asm(" .am33_2\n");
 #define CPUREV                 __SYSREGC(0xc0000050, u32)      /* CPU revision register */
 #define CPUREV_TYPE            0x0000000f      /* CPU type */
 #define CPUREV_TYPE_S          0
-#define CPUREV_TYPE_AM33V1     0x00000000      /* - AM33 V1 core, AM33/1.00 arch */
-#define CPUREV_TYPE_AM33V2     0x00000001      /* - AM33 V2 core, AM33/2.00 arch */
-#define CPUREV_TYPE_AM34V1     0x00000002      /* - AM34 V1 core, AM33/2.00 arch */
+#define CPUREV_TYPE_AM33_1     0x00000000      /* - AM33-1 core, AM33/1.00 arch */
+#define CPUREV_TYPE_AM33_2     0x00000001      /* - AM33-2 core, AM33/2.00 arch */
+#define CPUREV_TYPE_AM34_1     0x00000002      /* - AM34-1 core, AM33/2.00 arch */
+#define CPUREV_TYPE_AM33_3     0x00000003      /* - AM33-3 core, AM33/2.00 arch */
+#define CPUREV_TYPE_AM34_2     0x00000004      /* - AM34-2 core, AM33/3.00 arch */
 #define CPUREV_REVISION                0x000000f0      /* CPU revision */
 #define CPUREV_REVISION_S      4
 #define CPUREV_ICWAY           0x00000f00      /* number of instruction cache ways */
@@ -180,6 +183,21 @@ asm(" .am33_2\n");
 #define CHCTR_ICWMD            0x0f00          /* instruction cache way mode */
 #define CHCTR_DCWMD            0xf000          /* data cache way mode */
 
+#ifdef CONFIG_AM34_2
+#define ICIVCR                 __SYSREG(0xc0000c00, u32)       /* icache area invalidate control */
+#define ICIVCR_ICIVBSY         0x00000008                      /* icache area invalidate busy */
+#define ICIVCR_ICI             0x00000001                      /* icache area invalidate */
+
+#define ICIVMR                 __SYSREG(0xc0000c04, u32)       /* icache area invalidate mask */
+
+#define        DCPGCR                  __SYSREG(0xc0000c10, u32)       /* data cache area purge control */
+#define        DCPGCR_DCPGBSY          0x00000008                      /* data cache area purge busy */
+#define        DCPGCR_DCP              0x00000002                      /* data cache area purge */
+#define        DCPGCR_DCI              0x00000001                      /* data cache area invalidate */
+
+#define        DCPGMR                  __SYSREG(0xc0000c14, u32)       /* data cache area purge mask */
+#endif /* CONFIG_AM34_2 */
+
 /* MMU control registers */
 #define MMUCTR                 __SYSREG(0xc0000090, u32)       /* MMU control register */
 #define MMUCTR_IRP             0x0000003f      /* instruction TLB replace pointer */
@@ -203,6 +221,9 @@ asm(" .am33_2\n");
 #define MMUCTR_DTL_LOCK0_3     0x03000000      /* - entry 0-3 locked */
 #define MMUCTR_DTL_LOCK0_7     0x04000000      /* - entry 0-7 locked */
 #define MMUCTR_DTL_LOCK0_15    0x05000000      /* - entry 0-15 locked */
+#ifdef CONFIG_AM34_2
+#define MMUCTR_WTE             0x80000000      /* write-through cache TLB entry bit enable */
+#endif
 
 #define PIDR                   __SYSREG(0xc0000094, u16)       /* PID register */
 #define PIDR_PID               0x00ff          /* process identifier */
@@ -231,14 +252,6 @@ asm(" .am33_2\n");
 #define xPTEL_PS_4Mb           0x00000c00      /* - 4Mb page */
 #define xPTEL_PPN              0xfffff006      /* physical page number */
 
-#define xPTEL_V_BIT            0       /* bit numbers corresponding to above masks */
-#define xPTEL_UNUSED1_BIT      1
-#define xPTEL_UNUSED2_BIT      2
-#define xPTEL_C_BIT            3
-#define xPTEL_PV_BIT           4
-#define xPTEL_D_BIT            5
-#define xPTEL_G_BIT            9
-
 #define IPTEU                  __SYSREG(0xc00000a4, u32)       /* instruction TLB virtual addr */
 #define DPTEU                  __SYSREG(0xc00000b4, u32)       /* data TLB virtual addr */
 #define xPTEU_VPN              0xfffffc00      /* virtual page number */
@@ -262,7 +275,16 @@ asm(" .am33_2\n");
 #define xPTEL2_PS_128Kb                0x00000100      /* - 128Kb page */
 #define xPTEL2_PS_1Kb          0x00000200      /* - 1Kb page */
 #define xPTEL2_PS_4Mb          0x00000300      /* - 4Mb page */
-#define xPTEL2_PPN             0xfffffc00      /* physical page number */
+#define xPTEL2_CWT             0x00000400      /* cacheable write-through */
+#define xPTEL2_UNUSED1         0x00000800      /* unused bit (broadcast mask) */
+#define xPTEL2_PPN             0xfffff000      /* physical page number */
+
+#define xPTEL2_V_BIT           0       /* bit numbers corresponding to above masks */
+#define xPTEL2_C_BIT           1
+#define xPTEL2_PV_BIT          2
+#define xPTEL2_D_BIT           3
+#define xPTEL2_G_BIT           7
+#define xPTEL2_UNUSED1_BIT     11
 
 #define MMUFCR                 __SYSREGC(0xc000009c, u32)      /* MMU exception cause */
 #define MMUFCR_IFC             __SYSREGC(0xc000009c, u16)      /* MMU instruction excep cause */
@@ -285,6 +307,47 @@ asm(" .am33_2\n");
 #define MMUFCR_xFC_PR_RWK_RWU  0x01c0          /* - R/W kernel and R/W user */
 #define MMUFCR_xFC_ILLADDR     0x0200          /* illegal address excep flag */
 
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+/* atomic operation registers */
+#define AAR            __SYSREG(0xc0000a00, u32)       /* cacheable address */
+#define AAR2           __SYSREG(0xc0000a04, u32)       /* uncacheable address */
+#define ADR            __SYSREG(0xc0000a08, u32)       /* data */
+#define ASR            __SYSREG(0xc0000a0c, u32)       /* status */
+#define AARU           __SYSREG(0xd400aa00, u32)       /* user address */
+#define ADRU           __SYSREG(0xd400aa08, u32)       /* user data */
+#define ASRU           __SYSREG(0xd400aa0c, u32)       /* user status */
+
+#define ASR_RW         0x00000008      /* read */
+#define ASR_BW         0x00000004      /* bus error */
+#define ASR_IW         0x00000002      /* interrupt */
+#define ASR_LW         0x00000001      /* bus lock */
+
+#define ASRU_RW                ASR_RW          /* read */
+#define ASRU_BW                ASR_BW          /* bus error */
+#define ASRU_IW                ASR_IW          /* interrupt */
+#define ASRU_LW                ASR_LW          /* bus lock */
+
+/* in inline ASM, we stick the base pointer in to a reg and use offsets from
+ * it */
+#define ATOMIC_OPS_BASE_ADDR 0xc0000a00
+#ifndef __ASSEMBLY__
+asm(
+       "_AAR   = 0\n"
+       "_AAR2  = 4\n"
+       "_ADR   = 8\n"
+       "_ASR   = 12\n");
+#else
+#define _AAR           0
+#define _AAR2          4
+#define _ADR           8
+#define _ASR           12
+#endif
+
+/* physical page address for userspace atomic operations registers */
+#define USER_ATOMIC_OPS_PAGE_ADDR  0xd400a000
+
+#endif /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */
+
 #endif /* __KERNEL__ */
 
 #endif /* _ASM_CPU_REGS_H */
index 58a199d..80337b3 100644 (file)
 #ifndef _ASM_DMACTL_REGS_H
 #define _ASM_DMACTL_REGS_H
 
-#include <asm/cpu-regs.h>
-
-#ifdef __KERNEL__
-
-/* DMA registers */
-#define        DMxCTR(N)               __SYSREG(0xd2000000 + ((N) * 0x100), u32)       /* control reg */
-#define        DMxCTR_BG               0x0000001f      /* transfer request source */
-#define        DMxCTR_BG_SOFT          0x00000000      /* - software source */
-#define        DMxCTR_BG_SC0TX         0x00000002      /* - serial port 0 transmission */
-#define        DMxCTR_BG_SC0RX         0x00000003      /* - serial port 0 reception */
-#define        DMxCTR_BG_SC1TX         0x00000004      /* - serial port 1 transmission */
-#define        DMxCTR_BG_SC1RX         0x00000005      /* - serial port 1 reception */
-#define        DMxCTR_BG_SC2TX         0x00000006      /* - serial port 2 transmission */
-#define        DMxCTR_BG_SC2RX         0x00000007      /* - serial port 2 reception */
-#define        DMxCTR_BG_TM0UFLOW      0x00000008      /* - timer 0 underflow */
-#define        DMxCTR_BG_TM1UFLOW      0x00000009      /* - timer 1 underflow */
-#define        DMxCTR_BG_TM2UFLOW      0x0000000a      /* - timer 2 underflow */
-#define        DMxCTR_BG_TM3UFLOW      0x0000000b      /* - timer 3 underflow */
-#define        DMxCTR_BG_TM6ACMPCAP    0x0000000c      /* - timer 6A compare/capture */
-#define        DMxCTR_BG_AFE           0x0000000d      /* - analogue front-end interrupt source */
-#define        DMxCTR_BG_ADC           0x0000000e      /* - A/D conversion end interrupt source */
-#define        DMxCTR_BG_IRDA          0x0000000f      /* - IrDA interrupt source */
-#define        DMxCTR_BG_RTC           0x00000010      /* - RTC interrupt source */
-#define        DMxCTR_BG_XIRQ0         0x00000011      /* - XIRQ0 pin interrupt source */
-#define        DMxCTR_BG_XIRQ1         0x00000012      /* - XIRQ1 pin interrupt source */
-#define        DMxCTR_BG_XDMR0         0x00000013      /* - external request 0 source (XDMR0 pin) */
-#define        DMxCTR_BG_XDMR1         0x00000014      /* - external request 1 source (XDMR1 pin) */
-#define        DMxCTR_SAM              0x000000e0      /* DMA transfer src addr mode */
-#define        DMxCTR_SAM_INCR         0x00000000      /* - increment */
-#define        DMxCTR_SAM_DECR         0x00000020      /* - decrement */
-#define        DMxCTR_SAM_FIXED        0x00000040      /* - fixed */
-#define        DMxCTR_DAM              0x00000000      /* DMA transfer dest addr mode */
-#define        DMxCTR_DAM_INCR         0x00000000      /* - increment */
-#define        DMxCTR_DAM_DECR         0x00000100      /* - decrement */
-#define        DMxCTR_DAM_FIXED        0x00000200      /* - fixed */
-#define        DMxCTR_TM               0x00001800      /* DMA transfer mode */
-#define        DMxCTR_TM_BATCH         0x00000000      /* - batch transfer */
-#define        DMxCTR_TM_INTERM        0x00001000      /* - intermittent transfer */
-#define        DMxCTR_UT               0x00006000      /* DMA transfer unit */
-#define        DMxCTR_UT_1             0x00000000      /* - 1 byte */
-#define        DMxCTR_UT_2             0x00002000      /* - 2 byte */
-#define        DMxCTR_UT_4             0x00004000      /* - 4 byte */
-#define        DMxCTR_UT_16            0x00006000      /* - 16 byte */
-#define        DMxCTR_TEN              0x00010000      /* DMA channel transfer enable */
-#define        DMxCTR_RQM              0x00060000      /* external request input source mode */
-#define        DMxCTR_RQM_FALLEDGE     0x00000000      /* - falling edge */
-#define        DMxCTR_RQM_RISEEDGE     0x00020000      /* - rising edge */
-#define        DMxCTR_RQM_LOLEVEL      0x00040000      /* - low level */
-#define        DMxCTR_RQM_HILEVEL      0x00060000      /* - high level */
-#define        DMxCTR_RQF              0x01000000      /* DMA transfer request flag */
-#define        DMxCTR_XEND             0x80000000      /* DMA transfer end flag */
-
-#define        DMxSRC(N)               __SYSREG(0xd2000004 + ((N) * 0x100), u32)       /* control reg */
-
-#define        DMxDST(N)               __SYSREG(0xd2000008 + ((N) * 0x100), u32)       /* src addr reg */
-
-#define        DMxSIZ(N)               __SYSREG(0xd200000c + ((N) * 0x100), u32)       /* dest addr reg */
-#define DMxSIZ_CT              0x000fffff      /* number of bytes to transfer */
-
-#define        DMxCYC(N)               __SYSREG(0xd2000010 + ((N) * 0x100), u32)       /* intermittent
-                                                                                * size reg */
-#define DMxCYC_CYC             0x000000ff      /* number of interrmittent transfers -1 */
-
-#define DM0IRQ                 16              /* DMA channel 0 complete IRQ */
-#define DM1IRQ                 17              /* DMA channel 1 complete IRQ */
-#define DM2IRQ                 18              /* DMA channel 2 complete IRQ */
-#define DM3IRQ                 19              /* DMA channel 3 complete IRQ */
-
-#define        DM0ICR                  GxICR(DM0IRQ)   /* DMA channel 0 complete intr ctrl reg */
-#define        DM1ICR                  GxICR(DM0IR1)   /* DMA channel 1 complete intr ctrl reg */
-#define        DM2ICR                  GxICR(DM0IR2)   /* DMA channel 2 complete intr ctrl reg */
-#define        DM3ICR                  GxICR(DM0IR3)   /* DMA channel 3 complete intr ctrl reg */
-
-#ifndef __ASSEMBLY__
-
-struct mn10300_dmactl_regs {
-       u32             ctr;
-       const void      *src;
-       void            *dst;
-       u32             siz;
-       u32             cyc;
-} __attribute__((aligned(0x100)));
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* __KERNEL__ */
+#include <proc/dmactl-regs.h>
 
 #endif /* _ASM_DMACTL_REGS_H */
index e5fa97c..8157c92 100644 (file)
 #define R_MN10300_ALIGN        34      /* Alignment requirement. */
 
 /*
+ * AM33/AM34 HW Capabilities
+ */
+#define HWCAP_MN10300_ATOMIC_OP_UNIT   1       /* Has AM34 Atomic Operations */
+
+
+/*
  * ELF register definitions..
  */
 typedef unsigned long elf_greg_t;
@@ -47,8 +53,6 @@ typedef struct {
        u_int32_t       fpcr;
 } elf_fpregset_t;
 
-extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
-
 /*
  * This is used to ensure we don't load something for the wrong architecture
  */
@@ -130,7 +134,11 @@ do {                                               \
  * instruction set this CPU supports.  This could be done in user space,
  * but it's not easy, and we've already done it here.
  */
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+#define ELF_HWCAP      (HWCAP_MN10300_ATOMIC_OP_UNIT)
+#else
 #define ELF_HWCAP      (0)
+#endif
 
 /*
  * This yields a string that ld.so will use to load implementation
index fa16466..ca3e205 100644 (file)
@@ -15,8 +15,8 @@
 
 /*
  * define the breakpoint instruction opcode to use
- * - note that the JTAG unit steals 0xFF, so we want to avoid that if we can
- *   (can use 0xF7)
+ * - note that the JTAG unit steals 0xFF, so you can't use JTAG and GDBSTUB at
+ *   the same time.
  */
 #define GDBSTUB_BKPT           0xFF
 
@@ -90,7 +90,6 @@ enum exception_code {
 
 extern void __set_intr_stub(enum exception_code code, void *handler);
 extern void set_intr_stub(enum exception_code code, void *handler);
-extern void set_jtag_stub(enum exception_code code, void *handler);
 
 struct pt_regs;
 
@@ -102,7 +101,6 @@ extern asmlinkage void dtlb_aerror(void);
 extern asmlinkage void raw_bus_error(void);
 extern asmlinkage void double_fault(void);
 extern asmlinkage int  system_call(struct pt_regs *);
-extern asmlinkage void fpu_exception(struct pt_regs *, enum exception_code);
 extern asmlinkage void nmi(struct pt_regs *, enum exception_code);
 extern asmlinkage void uninitialised_exception(struct pt_regs *,
                                               enum exception_code);
@@ -116,6 +114,8 @@ extern void die(const char *, struct pt_regs *, enum exception_code)
 
 extern int die_if_no_fixup(const char *, struct pt_regs *, enum exception_code);
 
+#define NUM2EXCEP_IRQ_LEVEL(num)       (EXCEP_IRQ_LEVEL0 + (num) * 8)
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_EXCEPTIONS_H */
index 64a2b83..b7625de 100644 (file)
 #ifndef _ASM_FPU_H
 #define _ASM_FPU_H
 
-#include <asm/processor.h>
+#ifndef __ASSEMBLY__
+
+#include <linux/sched.h>
+#include <asm/exceptions.h>
 #include <asm/sigcontext.h>
-#include <asm/user.h>
 
 #ifdef __KERNEL__
 
-/* the task that owns the FPU state */
+extern asmlinkage void fpu_disabled(void);
+
+#ifdef CONFIG_FPU
+
+#ifdef CONFIG_LAZY_SAVE_FPU
+/* the task that currently owns the FPU state */
 extern struct task_struct *fpu_state_owner;
+#endif
 
-#define set_using_fpu(tsk)                             \
-do {                                                   \
-       (tsk)->thread.fpu_flags |= THREAD_USING_FPU;    \
-} while (0)
+#if (THREAD_USING_FPU & ~0xff)
+#error THREAD_USING_FPU must be smaller than 0x100.
+#endif
 
-#define clear_using_fpu(tsk)                           \
-do {                                                   \
-       (tsk)->thread.fpu_flags &= ~THREAD_USING_FPU;   \
-} while (0)
+static inline void set_using_fpu(struct task_struct *tsk)
+{
+       asm volatile(
+               "bset %0,(0,%1)"
+               :
+               : "i"(THREAD_USING_FPU), "a"(&tsk->thread.fpu_flags)
+               : "memory", "cc");
+}
 
-#define is_using_fpu(tsk) ((tsk)->thread.fpu_flags & THREAD_USING_FPU)
+static inline void clear_using_fpu(struct task_struct *tsk)
+{
+       asm volatile(
+               "bclr %0,(0,%1)"
+               :
+               : "i"(THREAD_USING_FPU), "a"(&tsk->thread.fpu_flags)
+               : "memory", "cc");
+}
 
-#define unlazy_fpu(tsk)                                        \
-do {                                                   \
-       preempt_disable();                              \
-       if (fpu_state_owner == (tsk))                   \
-               fpu_save(&tsk->thread.fpu_state);       \
-       preempt_enable();                               \
-} while (0)
-
-#define exit_fpu()                             \
-do {                                           \
-       struct task_struct *__tsk = current;    \
-       preempt_disable();                      \
-       if (fpu_state_owner == __tsk)           \
-               fpu_state_owner = NULL;         \
-       preempt_enable();                       \
-} while (0)
-
-#define flush_fpu()                                    \
-do {                                                   \
-       struct task_struct *__tsk = current;            \
-       preempt_disable();                              \
-       if (fpu_state_owner == __tsk) {                 \
-               fpu_state_owner = NULL;                 \
-               __tsk->thread.uregs->epsw &= ~EPSW_FE;  \
-       }                                               \
-       preempt_enable();                               \
-       clear_using_fpu(__tsk);                         \
-} while (0)
+#define is_using_fpu(tsk) ((tsk)->thread.fpu_flags & THREAD_USING_FPU)
 
-extern asmlinkage void fpu_init_state(void);
 extern asmlinkage void fpu_kill_state(struct task_struct *);
-extern asmlinkage void fpu_disabled(struct pt_regs *, enum exception_code);
 extern asmlinkage void fpu_exception(struct pt_regs *, enum exception_code);
-
-#ifdef CONFIG_FPU
+extern asmlinkage void fpu_invalid_op(struct pt_regs *, enum exception_code);
+extern asmlinkage void fpu_init_state(void);
 extern asmlinkage void fpu_save(struct fpu_state_struct *);
-extern asmlinkage void fpu_restore(struct fpu_state_struct *);
-#else
-#define fpu_save(a)
-#define fpu_restore(a)
-#endif /* CONFIG_FPU  */
-
-/*
- * signal frame handlers
- */
 extern int fpu_setup_sigcontext(struct fpucontext *buf);
 extern int fpu_restore_sigcontext(struct fpucontext *buf);
 
+static inline void unlazy_fpu(struct task_struct *tsk)
+{
+       preempt_disable();
+#ifndef CONFIG_LAZY_SAVE_FPU
+       if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
+               fpu_save(&tsk->thread.fpu_state);
+               tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
+               tsk->thread.uregs->epsw &= ~EPSW_FE;
+       }
+#else
+       if (fpu_state_owner == tsk)
+               fpu_save(&tsk->thread.fpu_state);
+#endif
+       preempt_enable();
+}
+
+static inline void exit_fpu(void)
+{
+#ifdef CONFIG_LAZY_SAVE_FPU
+       struct task_struct *tsk = current;
+
+       preempt_disable();
+       if (fpu_state_owner == tsk)
+               fpu_state_owner = NULL;
+       preempt_enable();
+#endif
+}
+
+static inline void flush_fpu(void)
+{
+       struct task_struct *tsk = current;
+
+       preempt_disable();
+#ifndef CONFIG_LAZY_SAVE_FPU
+       if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
+               tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
+               tsk->thread.uregs->epsw &= ~EPSW_FE;
+       }
+#else
+       if (fpu_state_owner == tsk) {
+               fpu_state_owner = NULL;
+               tsk->thread.uregs->epsw &= ~EPSW_FE;
+       }
+#endif
+       preempt_enable();
+       clear_using_fpu(tsk);
+}
+
+#else /* CONFIG_FPU */
+
+extern asmlinkage
+void unexpected_fpu_exception(struct pt_regs *, enum exception_code);
+#define fpu_invalid_op unexpected_fpu_exception
+#define fpu_exception unexpected_fpu_exception
+
+struct task_struct;
+struct fpu_state_struct;
+static inline bool is_using_fpu(struct task_struct *tsk) { return false; }
+static inline void set_using_fpu(struct task_struct *tsk) {}
+static inline void clear_using_fpu(struct task_struct *tsk) {}
+static inline void fpu_init_state(void) {}
+static inline void fpu_save(struct fpu_state_struct *s) {}
+static inline void fpu_kill_state(struct task_struct *tsk) {}
+static inline void unlazy_fpu(struct task_struct *tsk) {}
+static inline void exit_fpu(void) {}
+static inline void flush_fpu(void) {}
+static inline int fpu_setup_sigcontext(struct fpucontext *buf) { return 0; }
+static inline int fpu_restore_sigcontext(struct fpucontext *buf) { return 0; }
+#endif /* CONFIG_FPU  */
+
 #endif /* __KERNEL__ */
+#endif /* !__ASSEMBLY__ */
 #endif /* _ASM_FPU_H */
index 5b1949b..2ee58e3 100644 (file)
@@ -18,6 +18,7 @@
 #ifndef __ASM_OFFSETS_H__
 #include <asm/asm-offsets.h>
 #endif
+#include <asm/thread_info.h>
 
 #define pi break
 
        movm    [d2,d3,a2,a3,exreg0,exreg1,exother],(sp)
        mov     sp,fp                           # FRAME pointer in A3
        add     -12,sp                          # allow for calls to be made
-       mov     (__frame),a1
-       mov     a1,(REG_NEXT,fp)
-       mov     fp,(__frame)
 
-       and     ~EPSW_FE,epsw                   # disable the FPU inside the kernel
+       # push the exception frame onto the front of the list
+       GET_THREAD_INFO a1
+       mov     (TI_frame,a1),a0
+       mov     a0,(REG_NEXT,fp)
+       mov     fp,(TI_frame,a1)
+
+       # disable the FPU inside the kernel
+       and     ~EPSW_FE,epsw
 
        # we may be holding current in E2
 #ifdef CONFIG_MN10300_CURRENT_IN_E2
 .macro RESTORE_ALL
        # peel back the stack to the calling frame
        # - this permits execve() to discard extra frames due to kernel syscalls
-       mov     (__frame),fp
+       GET_THREAD_INFO a0
+       mov     (TI_frame,a0),fp
        mov     fp,sp
-       mov     (REG_NEXT,fp),d0                # userspace has regs->next == 0
-       mov     d0,(__frame)
+       mov     (REG_NEXT,fp),d0
+       mov     d0,(TI_frame,a0)                # userspace has regs->next == 0
 
 #ifndef CONFIG_MN10300_USING_JTAG
        mov     (REG_EPSW,fp),d0
index 41ed267..f5495ad 100644 (file)
@@ -110,7 +110,7 @@ extern asmlinkage void gdbstub_exception(struct pt_regs *, enum exception_code);
 extern asmlinkage void __gdbstub_bug_trap(void);
 extern asmlinkage void __gdbstub_pause(void);
 
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
 extern asmlinkage void gdbstub_purge_cache(void);
 #else
 #define gdbstub_purge_cache()  do {} while (0)
index 54d9501..0000d65 100644 (file)
 /* assembly code in softirq.h is sensitive to the offsets of these fields */
 typedef struct {
        unsigned int    __softirq_pending;
-       unsigned long   idle_timestamp;
+#ifdef CONFIG_MN10300_WD_TIMER
        unsigned int    __nmi_count;    /* arch dependent */
        unsigned int    __irq_count;    /* arch dependent */
+#endif
 } ____cacheline_aligned irq_cpustat_t;
 
 #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
index e2155e6..bfe2d88 100644 (file)
@@ -87,7 +87,7 @@ static inline unsigned long __kmap_atomic(struct page *page)
                BUG();
 #endif
        set_pte(kmap_pte - idx, mk_pte(page, kmap_prot));
-       __flush_tlb_one(vaddr);
+       local_flush_tlb_one(vaddr);
 
        return vaddr;
 }
@@ -116,7 +116,7 @@ static inline void __kunmap_atomic(unsigned long vaddr)
                 * this pte without first remap it
                 */
                pte_clear(kmap_pte - idx);
-               __flush_tlb_one(vaddr);
+               local_flush_tlb_one(vaddr);
        }
 #endif
 
index ba544c7..585b708 100644 (file)
 
 #ifdef __KERNEL__
 
-/* interrupt controller registers */
-#define GxICR(X)               __SYSREG(0xd4000000 + (X) * 4, u16)     /* group irq ctrl regs */
-
-#define IAGR                   __SYSREG(0xd4000100, u16)       /* intr acceptance group reg */
-#define IAGR_GN                        0x00fc          /* group number register
-                                                * (documentation _has_ to be wrong)
-                                                */
+/*
+ * Interrupt controller registers
+ * - Registers 64-191 are at addresses offset from the main array
+ */
+#define GxICR(X)                                               \
+       __SYSREG(0xd4000000 + (X) * 4 +                         \
+                (((X) >= 64) && ((X) < 192)) * 0xf00, u16)
 
-#define EXTMD                  __SYSREG(0xd4000200, u16)       /* external pin intr spec reg */
-#define GET_XIRQ_TRIGGER(X) ((EXTMD >> ((X) * 2)) & 3)
+#define GxICR_u8(X)                                                    \
+       __SYSREG(0xd4000000 + (X) * 4 +                                 \
+                (((X) >= 64) && ((X) < 192)) * 0xf00, u8)
 
-#define SET_XIRQ_TRIGGER(X,Y)                  \
-do {                                           \
-       u16 x = EXTMD;                          \
-       x &= ~(3 << ((X) * 2));                 \
-       x |= ((Y) & 3) << ((X) * 2);            \
-       EXTMD = x;                              \
-} while (0)
+#include <proc/intctl-regs.h>
 
 #define XIRQ_TRIGGER_LOWLEVEL  0
 #define XIRQ_TRIGGER_HILEVEL   1
@@ -59,10 +54,18 @@ do {                                                \
 #define GxICR_LEVEL_5          0x5000          /* - level 5 */
 #define GxICR_LEVEL_6          0x6000          /* - level 6 */
 #define GxICR_LEVEL_SHIFT      12
+#define GxICR_NMI              0x8000          /* nmi request flag */
+
+#define NUM2GxICR_LEVEL(num)   ((num) << GxICR_LEVEL_SHIFT)
 
 #ifndef __ASSEMBLY__
 extern void set_intr_level(int irq, u16 level);
-extern void set_intr_postackable(int irq);
+extern void mn10300_intc_set_level(unsigned int irq, unsigned int level);
+extern void mn10300_intc_clear(unsigned int irq);
+extern void mn10300_intc_set(unsigned int irq);
+extern void mn10300_intc_enable(unsigned int irq);
+extern void mn10300_intc_disable(unsigned int irq);
+extern void mn10300_set_lateack_irq_type(int irq);
 #endif
 
 /* external interrupts */
index c1a4119..787255d 100644 (file)
@@ -206,6 +206,19 @@ static inline void outsl(unsigned long addr, const void *buffer, int count)
 #define iowrite32_rep(p, src, count) \
        outsl((unsigned long) (p), (src), (count))
 
+#define readsb(p, dst, count) \
+       insb((unsigned long) (p), (dst), (count))
+#define readsw(p, dst, count) \
+       insw((unsigned long) (p), (dst), (count))
+#define readsl(p, dst, count) \
+       insl((unsigned long) (p), (dst), (count))
+
+#define writesb(p, src, count) \
+       outsb((unsigned long) (p), (src), (count))
+#define writesw(p, src, count) \
+       outsw((unsigned long) (p), (src), (count))
+#define writesl(p, src, count) \
+       outsl((unsigned long) (p), (src), (count))
 
 #define IO_SPACE_LIMIT 0xffffffff
 
index 25c045d..1a73fb3 100644 (file)
 /* this number is used when no interrupt has been assigned */
 #define NO_IRQ         INT_MAX
 
-/* hardware irq numbers */
-#define NR_IRQS                GxICR_NUM_IRQS
+/*
+ * hardware irq numbers
+ * - the ASB2364 has an FPGA with an IRQ multiplexer on it
+ */
+#ifdef CONFIG_MN10300_UNIT_ASB2364
+#include <unit/irq.h>
+#else
+#define NR_CPU_IRQS    GxICR_NUM_IRQS
+#define NR_IRQS                NR_CPU_IRQS
+#endif
 
 /* external hardware irq numbers */
 #define NR_XIRQS       GxICR_NUM_XIRQS
index a848cd2..97d0cb5 100644 (file)
 #define ARCH_HAS_OWN_IRQ_REGS
 
 #ifndef __ASSEMBLY__
-#define get_irq_regs() (__frame)
+static inline __attribute__((const))
+struct pt_regs *get_irq_regs(void)
+{
+       return current_frame();
+}
 #endif
 
 #endif /* _ASM_IRQ_REGS_H */
index 5e529a1..7a7ae12 100644 (file)
@@ -13,6 +13,9 @@
 #define _ASM_IRQFLAGS_H
 
 #include <asm/cpu-regs.h>
+#ifndef __ASSEMBLY__
+#include <linux/smp.h>
+#endif
 
 /*
  * interrupt control
  *   - level 6 - timer interrupt
  * - "enabled":  run in IM7
  */
-#ifdef CONFIG_MN10300_TTYSM
-#define MN10300_CLI_LEVEL      EPSW_IM_2
-#else
-#define MN10300_CLI_LEVEL      EPSW_IM_1
-#endif
+#define MN10300_CLI_LEVEL      (CONFIG_LINUX_CLI_LEVEL << EPSW_IM_SHIFT)
 
 #ifndef __ASSEMBLY__
 
@@ -64,11 +63,12 @@ static inline unsigned long arch_local_irq_save(void)
 /*
  * we make sure arch_irq_enable() doesn't cause priority inversion
  */
-extern unsigned long __mn10300_irq_enabled_epsw;
+extern unsigned long __mn10300_irq_enabled_epsw[];
 
 static inline void arch_local_irq_enable(void)
 {
        unsigned long tmp;
+       int cpu = raw_smp_processor_id();
 
        asm volatile(
                "       mov     epsw,%0         \n"
@@ -76,8 +76,8 @@ static inline void arch_local_irq_enable(void)
                "       or      %2,%0           \n"
                "       mov     %0,epsw         \n"
                : "=&d"(tmp)
-               : "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw)
-               : "memory");
+               : "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw[cpu])
+               : "memory", "cc");
 }
 
 static inline void arch_local_irq_restore(unsigned long flags)
@@ -94,7 +94,7 @@ static inline void arch_local_irq_restore(unsigned long flags)
 
 static inline bool arch_irqs_disabled_flags(unsigned long flags)
 {
-       return (flags & EPSW_IM) <= MN10300_CLI_LEVEL;
+       return (flags & (EPSW_IE | EPSW_IM)) != (EPSW_IE | EPSW_IM_7);
 }
 
 static inline bool arch_irqs_disabled(void)
@@ -109,6 +109,9 @@ static inline bool arch_irqs_disabled(void)
  */
 static inline void arch_safe_halt(void)
 {
+#ifdef CONFIG_SMP
+       arch_local_irq_enable();
+#else
        asm volatile(
                "       or      %0,epsw \n"
                "       nop             \n"
@@ -117,7 +120,97 @@ static inline void arch_safe_halt(void)
                :
                : "i"(EPSW_IE|EPSW_IM), "n"(&CPUM), "i"(CPUM_SLEEP)
                : "cc");
+#endif
 }
 
+#define __sleep_cpu()                          \
+do {                                           \
+       asm volatile(                           \
+               "       bset    %1,(%0)\n"      \
+               "1:     btst    %1,(%0)\n"      \
+               "       bne     1b\n"           \
+               :                               \
+               : "i"(&CPUM), "i"(CPUM_SLEEP)   \
+               : "cc"                          \
+               );                              \
+} while (0)
+
+static inline void arch_local_cli(void)
+{
+       asm volatile(
+               "       and     %0,epsw         \n"
+               "       nop                     \n"
+               "       nop                     \n"
+               "       nop                     \n"
+               :
+               : "i"(~EPSW_IE)
+               : "memory"
+               );
+}
+
+static inline unsigned long arch_local_cli_save(void)
+{
+       unsigned long flags = arch_local_save_flags();
+       arch_local_cli();
+       return flags;
+}
+
+static inline void arch_local_sti(void)
+{
+       asm volatile(
+               "       or      %0,epsw         \n"
+               :
+               : "i"(EPSW_IE)
+               : "memory");
+}
+
+static inline void arch_local_change_intr_mask_level(unsigned long level)
+{
+       asm volatile(
+               "       and     %0,epsw         \n"
+               "       or      %1,epsw         \n"
+               :
+               : "i"(~EPSW_IM), "i"(EPSW_IE | level)
+               : "cc", "memory");
+}
+
+#else /* !__ASSEMBLY__ */
+
+#define LOCAL_SAVE_FLAGS(reg)                  \
+       mov     epsw,reg
+
+#define LOCAL_IRQ_DISABLE                              \
+       and     ~EPSW_IM,epsw;                          \
+       or      EPSW_IE|MN10300_CLI_LEVEL,epsw;         \
+       nop;                                            \
+       nop;                                            \
+       nop
+
+#define LOCAL_IRQ_ENABLE               \
+       or      EPSW_IE|EPSW_IM_7,epsw
+
+#define LOCAL_IRQ_RESTORE(reg) \
+       mov     reg,epsw
+
+#define LOCAL_CLI_SAVE(reg)    \
+       mov     epsw,reg;       \
+       and     ~EPSW_IE,epsw;  \
+       nop;                    \
+       nop;                    \
+       nop
+
+#define LOCAL_CLI              \
+       and     ~EPSW_IE,epsw;  \
+       nop;                    \
+       nop;                    \
+       nop
+
+#define LOCAL_STI              \
+       or      EPSW_IE,epsw
+
+#define LOCAL_CHANGE_INTR_MASK_LEVEL(level)    \
+       and     ~EPSW_IM,epsw;                  \
+       or      EPSW_IE|(level),epsw
+
 #endif /* __ASSEMBLY__ */
 #endif /* _ASM_IRQFLAGS_H */
index cb294c2..c8f6c82 100644 (file)
 #include <asm/tlbflush.h>
 #include <asm-generic/mm_hooks.h>
 
+#define MMU_CONTEXT_TLBPID_NR          256
 #define MMU_CONTEXT_TLBPID_MASK                0x000000ffUL
 #define MMU_CONTEXT_VERSION_MASK       0xffffff00UL
 #define MMU_CONTEXT_FIRST_VERSION      0x00000100UL
 #define MMU_NO_CONTEXT                 0x00000000UL
-
-extern unsigned long mmu_context_cache[NR_CPUS];
-#define mm_context(mm) (mm->context.tlbpid[smp_processor_id()])
+#define MMU_CONTEXT_TLBPID_LOCK_NR     0
 
 #define enter_lazy_tlb(mm, tsk)        do {} while (0)
 
+static inline void cpu_ran_vm(int cpu, struct mm_struct *mm)
+{
 #ifdef CONFIG_SMP
-#define cpu_ran_vm(cpu, mm) \
-       cpumask_set_cpu((cpu), mm_cpumask(mm))
-#define cpu_maybe_ran_vm(cpu, mm) \
-       cpumask_test_and_set_cpu((cpu), mm_cpumask(mm))
+       cpumask_set_cpu(cpu, mm_cpumask(mm));
+#endif
+}
+
+static inline bool cpu_maybe_ran_vm(int cpu, struct mm_struct *mm)
+{
+#ifdef CONFIG_SMP
+       return cpumask_test_and_set_cpu(cpu, mm_cpumask(mm));
 #else
-#define cpu_ran_vm(cpu, mm)            do {} while (0)
-#define cpu_maybe_ran_vm(cpu, mm)      true
-#endif /* CONFIG_SMP */
+       return true;
+#endif
+}
 
-/*
- * allocate an MMU context
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
+extern unsigned long mmu_context_cache[NR_CPUS];
+#define mm_context(mm) (mm->context.tlbpid[smp_processor_id()])
+
+/**
+ * allocate_mmu_context - Allocate storage for the arch-specific MMU data
+ * @mm: The userspace VM context being set up
  */
 static inline unsigned long allocate_mmu_context(struct mm_struct *mm)
 {
@@ -58,7 +68,7 @@ static inline unsigned long allocate_mmu_context(struct mm_struct *mm)
        if (!(mc & MMU_CONTEXT_TLBPID_MASK)) {
                /* we exhausted the TLB PIDs of this version on this CPU, so we
                 * flush this CPU's TLB in its entirety and start new cycle */
-               flush_tlb_all();
+               local_flush_tlb_all();
 
                /* fix the TLB version if needed (we avoid version #0 so as to
                 * distingush MMU_NO_CONTEXT) */
@@ -101,22 +111,34 @@ static inline int init_new_context(struct task_struct *tsk,
 }
 
 /*
- * destroy context related info for an mm_struct that is about to be put to
- * rest
- */
-#define destroy_context(mm)    do { } while (0)
-
-/*
  * after we have set current->mm to a new value, this activates the context for
  * the new mm so we see the new mappings.
  */
-static inline void activate_context(struct mm_struct *mm, int cpu)
+static inline void activate_context(struct mm_struct *mm)
 {
        PIDR = get_mmu_context(mm) & MMU_CONTEXT_TLBPID_MASK;
 }
+#else  /* CONFIG_MN10300_TLB_USE_PIDR */
 
-/*
- * change between virtual memory sets
+#define init_new_context(tsk, mm)      (0)
+#define activate_context(mm)           local_flush_tlb()
+
+#endif /* CONFIG_MN10300_TLB_USE_PIDR */
+
+/**
+ * destroy_context - Destroy mm context information
+ * @mm: The MM being destroyed.
+ *
+ * Destroy context related info for an mm_struct that is about to be put to
+ * rest
+ */
+#define destroy_context(mm)    do {} while (0)
+
+/**
+ * switch_mm - Change between userspace virtual memory contexts
+ * @prev: The outgoing MM context.
+ * @next: The incoming MM context.
+ * @tsk: The incoming task.
  */
 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
                             struct task_struct *tsk)
@@ -124,11 +146,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
        int cpu = smp_processor_id();
 
        if (prev != next) {
+#ifdef CONFIG_SMP
+               per_cpu(cpu_tlbstate, cpu).active_mm = next;
+#endif
                cpu_ran_vm(cpu, next);
-               activate_context(next, cpu);
                PTBR = (unsigned long) next->pgd;
-       } else if (!cpu_maybe_ran_vm(cpu, next)) {
-               activate_context(next, cpu);
+               activate_context(next);
        }
 }
 
index a19f113..146bacf 100644 (file)
@@ -11,7 +11,6 @@
 #ifndef _ASM_PGALLOC_H
 #define _ASM_PGALLOC_H
 
-#include <asm/processor.h>
 #include <asm/page.h>
 #include <linux/threads.h>
 #include <linux/mm.h>          /* for struct page */
index b049a8b..a1e894b 100644 (file)
@@ -90,46 +90,58 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
  * The vmalloc() routines also leaves a hole of 4kB between each vmalloced
  * area to catch addressing errors.
  */
+#ifndef __ASSEMBLY__
+#define VMALLOC_OFFSET (8UL * 1024 * 1024)
+#define VMALLOC_START  (0x70000000UL)
+#define VMALLOC_END    (0x7C000000UL)
+#else
 #define VMALLOC_OFFSET (8 * 1024 * 1024)
 #define VMALLOC_START  (0x70000000)
 #define VMALLOC_END    (0x7C000000)
+#endif
 
 #ifndef __ASSEMBLY__
 extern pte_t kernel_vmalloc_ptes[(VMALLOC_END - VMALLOC_START) / PAGE_SIZE];
 #endif
 
-/* IPTEL/DPTEL bit assignments */
-#define _PAGE_BIT_VALID                xPTEL_V_BIT
-#define _PAGE_BIT_ACCESSED     xPTEL_UNUSED1_BIT       /* mustn't be loaded into IPTEL/DPTEL */
-#define _PAGE_BIT_NX           xPTEL_UNUSED2_BIT       /* mustn't be loaded into IPTEL/DPTEL */
-#define _PAGE_BIT_CACHE                xPTEL_C_BIT
-#define _PAGE_BIT_PRESENT      xPTEL_PV_BIT
-#define _PAGE_BIT_DIRTY                xPTEL_D_BIT
-#define _PAGE_BIT_GLOBAL       xPTEL_G_BIT
-
-#define _PAGE_VALID            xPTEL_V
-#define _PAGE_ACCESSED         xPTEL_UNUSED1
-#define _PAGE_NX               xPTEL_UNUSED2           /* no-execute bit */
-#define _PAGE_CACHE            xPTEL_C
-#define _PAGE_PRESENT          xPTEL_PV
-#define _PAGE_DIRTY            xPTEL_D
-#define _PAGE_PROT             xPTEL_PR
-#define _PAGE_PROT_RKNU                xPTEL_PR_ROK
-#define _PAGE_PROT_WKNU                xPTEL_PR_RWK
-#define _PAGE_PROT_RKRU                xPTEL_PR_ROK_ROU
-#define _PAGE_PROT_WKRU                xPTEL_PR_RWK_ROU
-#define _PAGE_PROT_WKWU                xPTEL_PR_RWK_RWU
-#define _PAGE_GLOBAL           xPTEL_G
-#define _PAGE_PSE              xPTEL_PS_4Mb            /* 4MB page */
-
-#define _PAGE_FILE             xPTEL_UNUSED1_BIT       /* set:pagecache unset:swap */
-
-#define __PAGE_PROT_UWAUX      0x040
-#define __PAGE_PROT_USER       0x080
-#define __PAGE_PROT_WRITE      0x100
+/* IPTEL2/DPTEL2 bit assignments */
+#define _PAGE_BIT_VALID                xPTEL2_V_BIT
+#define _PAGE_BIT_CACHE                xPTEL2_C_BIT
+#define _PAGE_BIT_PRESENT      xPTEL2_PV_BIT
+#define _PAGE_BIT_DIRTY                xPTEL2_D_BIT
+#define _PAGE_BIT_GLOBAL       xPTEL2_G_BIT
+#define _PAGE_BIT_ACCESSED     xPTEL2_UNUSED1_BIT      /* mustn't be loaded into IPTEL2/DPTEL2 */
+
+#define _PAGE_VALID            xPTEL2_V
+#define _PAGE_CACHE            xPTEL2_C
+#define _PAGE_PRESENT          xPTEL2_PV
+#define _PAGE_DIRTY            xPTEL2_D
+#define _PAGE_PROT             xPTEL2_PR
+#define _PAGE_PROT_RKNU                xPTEL2_PR_ROK
+#define _PAGE_PROT_WKNU                xPTEL2_PR_RWK
+#define _PAGE_PROT_RKRU                xPTEL2_PR_ROK_ROU
+#define _PAGE_PROT_WKRU                xPTEL2_PR_RWK_ROU
+#define _PAGE_PROT_WKWU                xPTEL2_PR_RWK_RWU
+#define _PAGE_GLOBAL           xPTEL2_G
+#define _PAGE_PS_MASK          xPTEL2_PS
+#define _PAGE_PS_4Kb           xPTEL2_PS_4Kb
+#define _PAGE_PS_128Kb         xPTEL2_PS_128Kb
+#define _PAGE_PS_1Kb           xPTEL2_PS_1Kb
+#define _PAGE_PS_4Mb           xPTEL2_PS_4Mb
+#define _PAGE_PSE              xPTEL2_PS_4Mb           /* 4MB page */
+#define _PAGE_CACHE_WT         xPTEL2_CWT
+#define _PAGE_ACCESSED         xPTEL2_UNUSED1
+#define _PAGE_NX               0                       /* no-execute bit */
+
+/* If _PAGE_VALID is clear, we use these: */
+#define _PAGE_FILE             xPTEL2_C        /* set:pagecache unset:swap */
+#define _PAGE_PROTNONE         0x000           /* If not present */
+
+#define __PAGE_PROT_UWAUX      0x010
+#define __PAGE_PROT_USER       0x020
+#define __PAGE_PROT_WRITE      0x040
 
 #define _PAGE_PRESENTV         (_PAGE_PRESENT|_PAGE_VALID)
-#define _PAGE_PROTNONE         0x000   /* If not present */
 
 #ifndef __ASSEMBLY__
 
@@ -170,6 +182,9 @@ extern pte_t kernel_vmalloc_ptes[(VMALLOC_END - VMALLOC_START) / PAGE_SIZE];
 #define PAGE_KERNEL_LARGE      __pgprot(__PAGE_KERNEL_LARGE)
 #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
 
+#define __PAGE_USERIO          (__PAGE_KERNEL_BASE | _PAGE_PROT_WKWU | _PAGE_NX)
+#define PAGE_USERIO            __pgprot(__PAGE_USERIO)
+
 /*
  * Whilst the MN10300 can do page protection for execute (given separate data
  * and insn TLBs), we are not supporting it at the moment. Write permission,
@@ -323,11 +338,7 @@ static inline int pte_exec_kernel(pte_t pte)
        return 1;
 }
 
-/*
- * Bits 0 and 1 are taken, split up the 29 bits of offset
- * into this range:
- */
-#define PTE_FILE_MAX_BITS      29
+#define PTE_FILE_MAX_BITS      30
 
 #define pte_to_pgoff(pte)      (pte_val(pte) >> 2)
 #define pgoff_to_pte(off)      __pte((off) << 2 | _PAGE_FILE)
@@ -373,8 +384,13 @@ static inline void ptep_mkdirty(pte_t *ptep)
  * Macro to mark a page protection value as "uncacheable".  On processors which
  * do not support it, this is a no-op.
  */
-#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_CACHE)
+#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHE)
 
+/*
+ * Macro to mark a page protection value as "Write-Through".
+ * On processors which do not support it, this is a no-op.
+ */
+#define pgprot_through(prot)   __pgprot(pgprot_val(prot) | _PAGE_CACHE_WT)
 
 /*
  * Conversion functions: convert a page and protection to a page entry,
index f7d4b0d..4c1b5cc 100644 (file)
 #ifndef _ASM_PROCESSOR_H
 #define _ASM_PROCESSOR_H
 
+#include <linux/threads.h>
+#include <linux/thread_info.h>
 #include <asm/page.h>
 #include <asm/ptrace.h>
 #include <asm/cpu-regs.h>
-#include <linux/threads.h>
+#include <asm/uaccess.h>
+#include <asm/current.h>
 
 /* Forward declaration, a strange C thing */
 struct task_struct;
@@ -33,6 +36,8 @@ struct mm_struct;
        __pc;                                   \
 })
 
+extern void get_mem_info(unsigned long *mem_base, unsigned long *mem_size);
+
 extern void show_registers(struct pt_regs *regs);
 
 /*
@@ -43,17 +48,22 @@ extern void show_registers(struct pt_regs *regs);
 
 struct mn10300_cpuinfo {
        int             type;
-       unsigned long   loops_per_sec;
+       unsigned long   loops_per_jiffy;
        char            hard_math;
-       unsigned long   *pgd_quick;
-       unsigned long   *pte_quick;
-       unsigned long   pgtable_cache_sz;
 };
 
 extern struct mn10300_cpuinfo boot_cpu_data;
 
+#ifdef CONFIG_SMP
+#if CONFIG_NR_CPUS < 2 || CONFIG_NR_CPUS > 8
+# error Sorry, NR_CPUS should be 2 to 8
+#endif
+extern struct mn10300_cpuinfo cpu_data[];
+#define current_cpu_data cpu_data[smp_processor_id()]
+#else  /* CONFIG_SMP */
 #define cpu_data &boot_cpu_data
 #define current_cpu_data boot_cpu_data
+#endif /* CONFIG_SMP */
 
 extern void identify_cpu(struct mn10300_cpuinfo *);
 extern void print_cpu_info(struct mn10300_cpuinfo *);
@@ -76,10 +86,6 @@ extern void dodgy_tsc(void);
  */
 #define TASK_UNMAPPED_BASE     0x30000000
 
-typedef struct {
-       unsigned long   seg;
-} mm_segment_t;
-
 struct fpu_state_struct {
        unsigned long   fs[32];         /* fpu registers */
        unsigned long   fpcr;           /* fpu control register */
@@ -92,20 +98,19 @@ struct thread_struct {
        unsigned long           a3;             /* kernel FP */
        unsigned long           wchan;
        unsigned long           usp;
-       struct pt_regs          *__frame;
        unsigned long           fpu_flags;
 #define THREAD_USING_FPU       0x00000001      /* T if this task is using the FPU */
+#define THREAD_HAS_FPU         0x00000002      /* T if this task owns the FPU right now */
        struct fpu_state_struct fpu_state;
 };
 
-#define INIT_THREAD                            \
-{                                              \
-       .uregs          = init_uregs,           \
-       .pc             = 0,                    \
-       .sp             = 0,                    \
-       .a3             = 0,                    \
-       .wchan          = 0,                    \
-       .__frame        = NULL,                 \
+#define INIT_THREAD            \
+{                              \
+       .uregs  = init_uregs,   \
+       .pc     = 0,            \
+       .sp     = 0,            \
+       .a3     = 0,            \
+       .wchan  = 0,            \
 }
 
 #define INIT_MMAP \
@@ -117,13 +122,20 @@ struct thread_struct {
  * - need to discard the frame stacked by the kernel thread invoking the execve
  *   syscall (see RESTORE_ALL macro)
  */
-#define start_thread(regs, new_pc, new_sp) do {                \
-       set_fs(USER_DS);                                \
-       __frame = current->thread.uregs;                \
-       __frame->epsw = EPSW_nSL | EPSW_IE | EPSW_IM;   \
-       __frame->pc = new_pc;                           \
-       __frame->sp = new_sp;                           \
-} while (0)
+static inline void start_thread(struct pt_regs *regs,
+                               unsigned long new_pc, unsigned long new_sp)
+{
+       struct thread_info *ti = current_thread_info();
+       struct pt_regs *frame0;
+       set_fs(USER_DS);
+
+       frame0 = thread_info_to_uregs(ti);
+       frame0->epsw = EPSW_nSL | EPSW_IE | EPSW_IM;
+       frame0->pc = new_pc;
+       frame0->sp = new_sp;
+       ti->frame = frame0;
+}
+
 
 /* Free all resources held by a thread. */
 extern void release_thread(struct task_struct *);
@@ -157,7 +169,7 @@ unsigned long get_wchan(struct task_struct *p);
 
 static inline void prefetch(const void *x)
 {
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
 #ifdef CONFIG_MN10300_PROC_MN103E010
        asm volatile ("nop; nop; dcpf (%0)" : : "r"(x));
 #else
@@ -168,7 +180,7 @@ static inline void prefetch(const void *x)
 
 static inline void prefetchw(const void *x)
 {
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
 #ifdef CONFIG_MN10300_PROC_MN103E010
        asm volatile ("nop; nop; dcpf (%0)" : : "r"(x));
 #else
index 7c2e911..b696181 100644 (file)
@@ -40,7 +40,6 @@
 #define        PT_PC           26
 #define NR_PTREGS      27
 
-#ifndef __ASSEMBLY__
 /*
  * This defines the way registers are stored in the event of an exception
  * - the strange order is due to the MOVM instruction
@@ -75,7 +74,6 @@ struct pt_regs {
        unsigned long           epsw;
        unsigned long           pc;
 };
-#endif
 
 /* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
 #define PTRACE_GETREGS            12
@@ -86,12 +84,7 @@ struct pt_regs {
 /* options set using PTRACE_SETOPTIONS */
 #define PTRACE_O_TRACESYSGOOD     0x00000001
 
-#if defined(__KERNEL__)
-
-extern struct pt_regs *__frame;                /* current frame pointer */
-
-#if !defined(__ASSEMBLY__)
-struct task_struct;
+#ifdef __KERNEL__
 
 #define user_mode(regs)                        (((regs)->epsw & EPSW_nSL) == EPSW_nSL)
 #define instruction_pointer(regs)      ((regs)->pc)
@@ -100,9 +93,7 @@ extern void show_regs(struct pt_regs *);
 
 #define arch_has_single_step() (1)
 
-#endif  /*  !__ASSEMBLY  */
-
 #define profile_pc(regs) ((regs)->pc)
 
-#endif  /*  __KERNEL__  */
+#endif /* __KERNEL__  */
 #endif /* _ASM_PTRACE_H */
index 174523d..10c7502 100644 (file)
@@ -50,7 +50,7 @@ static inline void mn10300_proc_hard_reset(void)
        RSTCTR |= RSTCTR_CHIPRST;
 }
 
-extern unsigned int watchdog_alert_counter;
+extern unsigned int watchdog_alert_counter[];
 
 extern void watchdog_go(void);
 extern asmlinkage void watchdog_handler(void);
index c295194..6c14bb1 100644 (file)
 
 #include <linux/init.h>
 
-extern void check_rtc_time(void);
 extern void __init calibrate_clock(void);
-extern unsigned long __init get_initial_rtc_time(void);
 
 #else /* !CONFIG_MN10300_RTC */
 
-static inline void check_rtc_time(void)
-{
-}
-
 static inline void calibrate_clock(void)
 {
 }
 
-static inline unsigned long get_initial_rtc_time(void)
-{
-       return 0;
-}
-
 #endif /* !CONFIG_MN10300_RTC */
 
 #include <asm-generic/rtc.h>
diff --git a/arch/mn10300/include/asm/rwlock.h b/arch/mn10300/include/asm/rwlock.h
new file mode 100644 (file)
index 0000000..6d594d4
--- /dev/null
@@ -0,0 +1,125 @@
+/*
+ * Helpers used by both rw spinlocks and rw semaphores.
+ *
+ * Based in part on code from semaphore.h and
+ * spinlock.h Copyright 1996 Linus Torvalds.
+ *
+ * Copyright 1999 Red Hat, Inc.
+ *
+ * Written by Benjamin LaHaise.
+ *
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ * 13-Nov-2006 MEI Temporarily delete lock functions for SMP support.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+#ifndef _ASM_RWLOCK_H
+#define _ASM_RWLOCK_H
+
+#define RW_LOCK_BIAS            0x01000000
+
+#ifndef CONFIG_SMP
+
+typedef struct { unsigned long a[100]; } __dummy_lock_t;
+#define __dummy_lock(lock) (*(__dummy_lock_t *)(lock))
+
+#define RW_LOCK_BIAS_STR       "0x01000000"
+
+#define __build_read_lock_ptr(rw, helper)                              \
+       do {                                                            \
+               asm volatile(                                           \
+                       "       mov     (%0),d3                 \n"     \
+                       "       sub     1,d3                    \n"     \
+                       "       mov     d3,(%0)                 \n"     \
+                       "       blt     1f                      \n"     \
+                       "       bra     2f                      \n"     \
+                       "1:     jmp     3f                      \n"     \
+                       "2:                                     \n"     \
+                       "       .section .text.lock,\"ax\"      \n"     \
+                       "3:     call    "helper"[],0            \n"     \
+                       "       jmp     2b                      \n"     \
+                       "       .previous"                              \
+                       :                                               \
+                       : "d" (rw)                                      \
+                       : "memory", "d3", "cc");                        \
+       } while (0)
+
+#define __build_read_lock_const(rw, helper)                            \
+       do {                                                            \
+               asm volatile(                                           \
+                       "       mov     (%0),d3                 \n"     \
+                       "       sub     1,d3                    \n"     \
+                       "       mov     d3,(%0)                 \n"     \
+                       "       blt     1f                      \n"     \
+                       "       bra     2f                      \n"     \
+                       "1:     jmp     3f                      \n"     \
+                       "2:                                     \n"     \
+                       "       .section .text.lock,\"ax\"      \n"     \
+                       "3:     call    "helper"[],0            \n"     \
+                       "       jmp     2b                      \n"     \
+                       "       .previous"                              \
+                       :                                               \
+                       : "d" (rw)                                      \
+                       : "memory", "d3", "cc");                        \
+       } while (0)
+
+#define __build_read_lock(rw, helper) \
+       do {                                                            \
+               if (__builtin_constant_p(rw))                           \
+                       __build_read_lock_const(rw, helper);            \
+               else                                                    \
+                       __build_read_lock_ptr(rw, helper);              \
+       } while (0)
+
+#define __build_write_lock_ptr(rw, helper)                             \
+       do {                                                            \
+               asm volatile(                                           \
+                       "       mov     (%0),d3                 \n"     \
+                       "       sub     1,d3                    \n"     \
+                       "       mov     d3,(%0)                 \n"     \
+                       "       blt     1f                      \n"     \
+                       "       bra     2f                      \n"     \
+                       "1:     jmp     3f                      \n"     \
+                       "2:                                     \n"     \
+                       "       .section .text.lock,\"ax\"      \n"     \
+                       "3:     call    "helper"[],0            \n"     \
+                       "       jmp     2b                      \n"     \
+                       "       .previous"                              \
+                       :                                               \
+                       : "d" (rw)                                      \
+                       : "memory", "d3", "cc");                        \
+       } while (0)
+
+#define __build_write_lock_const(rw, helper)                           \
+       do {                                                            \
+               asm volatile(                                           \
+                       "       mov     (%0),d3                 \n"     \
+                       "       sub     1,d3                    \n"     \
+                       "       mov     d3,(%0)                 \n"     \
+                       "       blt     1f                      \n"     \
+                       "       bra     2f                      \n"     \
+                       "1:     jmp     3f                      \n"     \
+                       "2:                                     \n"     \
+                       "       .section .text.lock,\"ax\"      \n"     \
+                       "3:     call    "helper"[],0            \n"     \
+                       "       jmp     2b                      \n"     \
+                       "       .previous"                              \
+                       :                                               \
+                       : "d" (rw)                                      \
+                       : "memory", "d3", "cc");                        \
+       } while (0)
+
+#define __build_write_lock(rw, helper)                                 \
+       do {                                                            \
+               if (__builtin_constant_p(rw))                           \
+                       __build_write_lock_const(rw, helper);           \
+               else                                                    \
+                       __build_write_lock_ptr(rw, helper);             \
+       } while (0)
+
+#endif /* CONFIG_SMP */
+#endif /* _ASM_RWLOCK_H */
index 6498469..8320cda 100644 (file)
 /* serial port 0 */
 #define        SC0CTR                  __SYSREG(0xd4002000, u16)       /* control reg */
 #define        SC01CTR_CK              0x0007  /* clock source select */
-#define        SC0CTR_CK_TM8UFLOW_8    0x0000  /* - 1/8 timer 8 underflow (serial port 0 only) */
-#define        SC1CTR_CK_TM9UFLOW_8    0x0000  /* - 1/8 timer 9 underflow (serial port 1 only) */
 #define        SC01CTR_CK_IOCLK_8      0x0001  /* - 1/8 IOCLK */
 #define        SC01CTR_CK_IOCLK_32     0x0002  /* - 1/32 IOCLK */
+#define        SC01CTR_CK_EXTERN_8     0x0006  /* - 1/8 external closk */
+#define        SC01CTR_CK_EXTERN       0x0007  /* - external closk */
+#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
+#define        SC0CTR_CK_TM8UFLOW_8    0x0000  /* - 1/8 timer 8 underflow (serial port 0 only) */
 #define        SC0CTR_CK_TM2UFLOW_2    0x0003  /* - 1/2 timer 2 underflow (serial port 0 only) */
-#define        SC1CTR_CK_TM3UFLOW_2    0x0003  /* - 1/2 timer 3 underflow (serial port 1 only) */
-#define        SC0CTR_CK_TM0UFLOW_8    0x0004  /* - 1/8 timer 1 underflow (serial port 0 only) */
-#define        SC1CTR_CK_TM1UFLOW_8    0x0004  /* - 1/8 timer 2 underflow (serial port 1 only) */
+#define        SC0CTR_CK_TM0UFLOW_8    0x0004  /* - 1/8 timer 0 underflow (serial port 0 only) */
 #define        SC0CTR_CK_TM2UFLOW_8    0x0005  /* - 1/8 timer 2 underflow (serial port 0 only) */
+#define        SC1CTR_CK_TM9UFLOW_8    0x0000  /* - 1/8 timer 9 underflow (serial port 1 only) */
+#define        SC1CTR_CK_TM3UFLOW_2    0x0003  /* - 1/2 timer 3 underflow (serial port 1 only) */
+#define        SC1CTR_CK_TM1UFLOW_8    0x0004  /* - 1/8 timer 1 underflow (serial port 1 only) */
 #define        SC1CTR_CK_TM3UFLOW_8    0x0005  /* - 1/8 timer 3 underflow (serial port 1 only) */
-#define        SC01CTR_CK_EXTERN_8     0x0006  /* - 1/8 external closk */
-#define        SC01CTR_CK_EXTERN       0x0007  /* - external closk */
+#else  /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+#define        SC0CTR_CK_TM8UFLOW_8    0x0000  /* - 1/8 timer 8 underflow (serial port 0 only) */
+#define        SC0CTR_CK_TM0UFLOW_8    0x0004  /* - 1/8 timer 0 underflow (serial port 0 only) */
+#define        SC0CTR_CK_TM2UFLOW_8    0x0005  /* - 1/8 timer 2 underflow (serial port 0 only) */
+#define        SC1CTR_CK_TM12UFLOW_8   0x0000  /* - 1/8 timer 12 underflow (serial port 1 only) */
+#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
 #define        SC01CTR_STB             0x0008  /* stop bit select */
 #define        SC01CTR_STB_1BIT        0x0000  /* - 1 stop bit */
 #define        SC01CTR_STB_2BIT        0x0008  /* - 2 stop bits */
 
 /* serial port 2 */
 #define        SC2CTR                  __SYSREG(0xd4002020, u16)       /* control reg */
+#ifdef CONFIG_AM33_2
 #define        SC2CTR_CK               0x0003  /* clock source select */
 #define        SC2CTR_CK_TM10UFLOW     0x0000  /* - timer 10 underflow */
 #define        SC2CTR_CK_TM2UFLOW      0x0001  /* - timer 2 underflow */
 #define        SC2CTR_CK_EXTERN        0x0002  /* - external closk */
 #define        SC2CTR_CK_TM3UFLOW      0x0003  /* - timer 3 underflow */
+#else  /* CONFIG_AM33_2 */
+#define        SC2CTR_CK               0x0007  /* clock source select */
+#define        SC2CTR_CK_TM9UFLOW_8    0x0000  /* - 1/8 timer 9 underflow */
+#define        SC2CTR_CK_IOCLK_8       0x0001  /* - 1/8 IOCLK */
+#define        SC2CTR_CK_IOCLK_32      0x0002  /* - 1/32 IOCLK */
+#define        SC2CTR_CK_TM3UFLOW_2    0x0003  /* - 1/2 timer 3 underflow */
+#define        SC2CTR_CK_TM1UFLOW_8    0x0004  /* - 1/8 timer 1 underflow */
+#define        SC2CTR_CK_TM3UFLOW_8    0x0005  /* - 1/8 timer 3 underflow */
+#define        SC2CTR_CK_EXTERN_8      0x0006  /* - 1/8 external closk */
+#define        SC2CTR_CK_EXTERN        0x0007  /* - external closk */
+#endif /* CONFIG_AM33_2 */
 #define        SC2CTR_STB              0x0008  /* stop bit select */
 #define        SC2CTR_STB_1BIT         0x0000  /* - 1 stop bit */
 #define        SC2CTR_STB_2BIT         0x0008  /* - 2 stop bits */
 #define SC2ICR_RES             0x04    /* receive error select */
 #define SC2ICR_RI              0x01    /* receive interrupt cause */
 
-#define        SC2TXB                  __SYSREG(0xd4002018, u8)        /* transmit buffer reg */
-#define        SC2RXB                  __SYSREG(0xd4002019, u8)        /* receive buffer reg */
-#define        SC2STR                  __SYSREG(0xd400201c, u8)        /* status reg */
+#define        SC2TXB                  __SYSREG(0xd4002028, u8)        /* transmit buffer reg */
+#define        SC2RXB                  __SYSREG(0xd4002029, u8)        /* receive buffer reg */
+
+#ifdef CONFIG_AM33_2
+#define        SC2STR                  __SYSREG(0xd400202c, u8)        /* status reg */
+#else  /* CONFIG_AM33_2 */
+#define        SC2STR                  __SYSREG(0xd400202c, u16)       /* status reg */
+#endif /* CONFIG_AM33_2 */
 #define SC2STR_OEF             0x0001  /* overrun error found */
 #define SC2STR_PEF             0x0002  /* parity error found */
 #define SC2STR_FEF             0x0004  /* framing error found */
 #define SC2STR_RXF             0x0040  /* receive status */
 #define SC2STR_TXF             0x0080  /* transmit status */
 
+#ifdef CONFIG_AM33_2
 #define        SC2TIM                  __SYSREG(0xd400202d, u8)        /* status reg */
+#endif
 
+#ifdef CONFIG_AM33_2
 #define SC2RXIRQ               24      /* serial 2 Receive IRQ */
 #define SC2TXIRQ               25      /* serial 2 Transmit IRQ */
+#else  /* CONFIG_AM33_2 */
+#define SC2RXIRQ               68      /* serial 2 Receive IRQ */
+#define SC2TXIRQ               69      /* serial 2 Transmit IRQ */
+#endif /* CONFIG_AM33_2 */
 
 #define        SC2RXICR                GxICR(SC2RXIRQ) /* serial 2 receive intr ctrl reg */
 #define        SC2TXICR                GxICR(SC2TXIRQ) /* serial 2 transmit intr ctrl reg */
index a29445c..23a7992 100644 (file)
@@ -9,10 +9,8 @@
  * 2 of the Licence, or (at your option) any later version.
  */
 
-/*
- * The ASB2305 has an 18.432 MHz clock the UART
- */
-#define BASE_BAUD      (18432000 / 16)
+#ifndef _ASM_SERIAL_H
+#define _ASM_SERIAL_H
 
 /* Standard COM flags (except for COM4, because of the 8514 problem) */
 #ifdef CONFIG_SERIAL_DETECT_IRQ
@@ -34,3 +32,5 @@
 #endif
 
 #include <unit/serial.h>
+
+#endif /* _ASM_SERIAL_H */
index 4eb8c61..a3930e4 100644 (file)
@@ -3,6 +3,16 @@
  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
  *
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ *  13-Nov-2006 MEI Define IPI-IRQ number and add inline/macro function
+ *                  for SMP support.
+ *  22-Jan-2007 MEI Add the define related to SMP_BOOT_IRQ.
+ *  23-Feb-2007 MEI Add the define related to SMP icahce invalidate.
+ *  23-Jun-2008 MEI Delete INTC_IPI.
+ *  22-Jul-2008 MEI Add smp_nmi_call_function and related defines.
+ *  04-Aug-2008 MEI Delete USE_DOIRQ_CACHE_IPI.
+ *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public Licence
  * as published by the Free Software Foundation; either version
 #ifndef _ASM_SMP_H
 #define _ASM_SMP_H
 
-#ifdef CONFIG_SMP
-#error SMP not yet supported for MN10300
+#ifndef __ASSEMBLY__
+#include <linux/threads.h>
+#include <linux/cpumask.h>
 #endif
 
+#ifdef CONFIG_SMP
+#include <proc/smp-regs.h>
+
+#define RESCHEDULE_IPI         63
+#define CALL_FUNC_SINGLE_IPI   192
+#define LOCAL_TIMER_IPI                193
+#define FLUSH_CACHE_IPI                194
+#define CALL_FUNCTION_NMI_IPI  195
+#define GDB_NMI_IPI            196
+
+#define SMP_BOOT_IRQ           195
+
+#define RESCHEDULE_GxICR_LV    GxICR_LEVEL_6
+#define CALL_FUNCTION_GxICR_LV GxICR_LEVEL_4
+#define LOCAL_TIMER_GxICR_LV   GxICR_LEVEL_4
+#define FLUSH_CACHE_GxICR_LV   GxICR_LEVEL_0
+#define SMP_BOOT_GxICR_LV      GxICR_LEVEL_0
+
+#define TIME_OUT_COUNT_BOOT_IPI        100
+#define DELAY_TIME_BOOT_IPI    75000
+
+
+#ifndef __ASSEMBLY__
+
+/**
+ * raw_smp_processor_id - Determine the raw CPU ID of the CPU running it
+ *
+ * What we really want to do is to use the CPUID hardware CPU register to get
+ * this information, but accesses to that aren't cached, and run at system bus
+ * speed, not CPU speed.  A copy of this value is, however, stored in the
+ * thread_info struct, and that can be cached.
+ *
+ * An alternate way of dealing with this could be to use the EPSW.S bits to
+ * cache this information for systems with up to four CPUs.
+ */
+#if 0
+#define raw_smp_processor_id() (CPUID)
+#else
+#define raw_smp_processor_id() (current_thread_info()->cpu)
 #endif
+
+static inline int cpu_logical_map(int cpu)
+{
+       return cpu;
+}
+
+static inline int cpu_number_map(int cpu)
+{
+       return cpu;
+}
+
+
+extern cpumask_t cpu_boot_map;
+
+extern void smp_init_cpus(void);
+extern void smp_cache_interrupt(void);
+extern void send_IPI_allbutself(int irq);
+extern int smp_nmi_call_function(smp_call_func_t func, void *info, int wait);
+
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+
+#ifdef CONFIG_HOTPLUG_CPU
+extern int __cpu_disable(void);
+extern void __cpu_die(unsigned int cpu);
+#endif /* CONFIG_HOTPLUG_CPU */
+
+#endif /* __ASSEMBLY__ */
+#else /* CONFIG_SMP */
+#ifndef __ASSEMBLY__
+
+static inline void smp_init_cpus(void) {}
+
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_SMP */
+
+#endif /* _ASM_SMP_H */
diff --git a/arch/mn10300/include/asm/smsc911x.h b/arch/mn10300/include/asm/smsc911x.h
new file mode 100644 (file)
index 0000000..2fcd108
--- /dev/null
@@ -0,0 +1 @@
+#include <unit/smsc911x.h>
index 4bf9c8b..9342915 100644 (file)
 #ifndef _ASM_SPINLOCK_H
 #define _ASM_SPINLOCK_H
 
-#error SMP spinlocks not implemented for MN10300
+#include <asm/atomic.h>
+#include <asm/rwlock.h>
+#include <asm/page.h>
 
+/*
+ * Simple spin lock operations.  There are two variants, one clears IRQ's
+ * on the local processor, one does not.
+ *
+ * We make no fairness assumptions. They have a cost.
+ */
+
+#define arch_spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) != 0)
+#define arch_spin_unlock_wait(x) do { barrier(); } while (arch_spin_is_locked(x))
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+       asm volatile(
+               "       bclr    1,(0,%0)        \n"
+               :
+               : "a"(&lock->slock)
+               : "memory", "cc");
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+       int ret;
+
+       asm volatile(
+               "       mov     1,%0            \n"
+               "       bset    %0,(%1)         \n"
+               "       bne     1f              \n"
+               "       clr     %0              \n"
+               "1:     xor     1,%0            \n"
+               : "=d"(ret)
+               : "a"(&lock->slock)
+               : "memory", "cc");
+
+       return ret;
+}
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+       asm volatile(
+               "1:     bset    1,(0,%0)        \n"
+               "       bne     1b              \n"
+               :
+               : "a"(&lock->slock)
+               : "memory", "cc");
+}
+
+static inline void arch_spin_lock_flags(arch_spinlock_t *lock,
+                                        unsigned long flags)
+{
+       int temp;
+
+       asm volatile(
+               "1:     bset    1,(0,%2)        \n"
+               "       beq     3f              \n"
+               "       mov     %1,epsw         \n"
+               "2:     mov     (0,%2),%0       \n"
+               "       or      %0,%0           \n"
+               "       bne     2b              \n"
+               "       mov     %3,%0           \n"
+               "       mov     %0,epsw         \n"
+               "       nop                     \n"
+               "       nop                     \n"
+               "       bra     1b\n"
+               "3:                             \n"
+               : "=&d" (temp)
+               : "d" (flags), "a"(&lock->slock), "i"(EPSW_IE | MN10300_CLI_LEVEL)
+               : "memory", "cc");
+}
+
+#ifdef __KERNEL__
+
+/*
+ * Read-write spinlocks, allowing multiple readers
+ * but only one writer.
+ *
+ * NOTE! it is quite common to have readers in interrupts
+ * but no interrupt writers. For those circumstances we
+ * can "mix" irq-safe locks - any writer needs to get a
+ * irq-safe write-lock, but readers can get non-irqsafe
+ * read-locks.
+ */
+
+/**
+ * read_can_lock - would read_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define arch_read_can_lock(x) ((int)(x)->lock > 0)
+
+/**
+ * write_can_lock - would write_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
+
+/*
+ * On mn10300, we implement read-write locks as a 32-bit counter
+ * with the high bit (sign) being the "contended" bit.
+ */
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+       __build_read_lock(rw, "__read_lock_failed");
+#else
+       {
+               atomic_t *count = (atomic_t *)rw;
+               while (atomic_dec_return(count) < 0)
+                       atomic_inc(count);
+       }
+#endif
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+       __build_write_lock(rw, "__write_lock_failed");
+#else
+       {
+               atomic_t *count = (atomic_t *)rw;
+               while (!atomic_sub_and_test(RW_LOCK_BIAS, count))
+                       atomic_add(RW_LOCK_BIAS, count);
+       }
+#endif
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+       __build_read_unlock(rw);
+#else
+       {
+               atomic_t *count = (atomic_t *)rw;
+               atomic_inc(count);
+       }
+#endif
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+       __build_write_unlock(rw);
+#else
+       {
+               atomic_t *count = (atomic_t *)rw;
+               atomic_add(RW_LOCK_BIAS, count);
+       }
+#endif
+}
+
+static inline int arch_read_trylock(arch_rwlock_t *lock)
+{
+       atomic_t *count = (atomic_t *)lock;
+       atomic_dec(count);
+       if (atomic_read(count) >= 0)
+               return 1;
+       atomic_inc(count);
+       return 0;
+}
+
+static inline int arch_write_trylock(arch_rwlock_t *lock)
+{
+       atomic_t *count = (atomic_t *)lock;
+       if (atomic_sub_and_test(RW_LOCK_BIAS, count))
+               return 1;
+       atomic_add(RW_LOCK_BIAS, count);
+       return 0;
+}
+
+#define arch_read_lock_flags(lock, flags)  arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+
+#define _raw_spin_relax(lock)  cpu_relax()
+#define _raw_read_relax(lock)  cpu_relax()
+#define _raw_write_relax(lock) cpu_relax()
+
+#endif /* __KERNEL__ */
 #endif /* _ASM_SPINLOCK_H */
diff --git a/arch/mn10300/include/asm/spinlock_types.h b/arch/mn10300/include/asm/spinlock_types.h
new file mode 100644 (file)
index 0000000..653dc51
--- /dev/null
@@ -0,0 +1,20 @@
+#ifndef _ASM_SPINLOCK_TYPES_H
+#define _ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct arch_spinlock {
+       unsigned int slock;
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED      { 0 }
+
+typedef struct {
+       unsigned int lock;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED                { RW_LOCK_BIAS }
+
+#endif /* _ASM_SPINLOCK_TYPES_H */
index 9f7c7e1..8ff3e5a 100644 (file)
 #define _ASM_SYSTEM_H
 
 #include <asm/cpu-regs.h>
+#include <asm/intctl-regs.h>
 
 #ifdef __KERNEL__
 #ifndef __ASSEMBLY__
 
 #include <linux/kernel.h>
 #include <linux/irqflags.h>
+#include <asm/atomic.h>
+
+#if !defined(CONFIG_LAZY_SAVE_FPU)
+struct fpu_state_struct;
+extern asmlinkage void fpu_save(struct fpu_state_struct *);
+#define switch_fpu(prev, next)                                         \
+       do {                                                            \
+               if ((prev)->thread.fpu_flags & THREAD_HAS_FPU) {        \
+                       (prev)->thread.fpu_flags &= ~THREAD_HAS_FPU;    \
+                       (prev)->thread.uregs->epsw &= ~EPSW_FE;         \
+                       fpu_save(&(prev)->thread.fpu_state);            \
+               }                                                       \
+       } while (0)
+#else
+#define switch_fpu(prev, next) do {} while (0)
+#endif
 
 struct task_struct;
 struct thread_struct;
@@ -30,6 +47,7 @@ struct task_struct *__switch_to(struct thread_struct *prev,
 /* context switching is now performed out-of-line in switch_to.S */
 #define switch_to(prev, next, last)                                    \
 do {                                                                   \
+       switch_fpu(prev, next);                                         \
        current->thread.wchan = (u_long) __builtin_return_address(0);   \
        (last) = __switch_to(&(prev)->thread, &(next)->thread, (prev)); \
        mb();                                                           \
@@ -40,8 +58,6 @@ do {                                                                  \
 
 #define nop() asm volatile ("nop")
 
-#endif /* !__ASSEMBLY__ */
-
 /*
  * Force strict CPU ordering.
  * And yes, this is required on UP too when we're talking
@@ -68,64 +84,19 @@ do {                                                                        \
 #define smp_mb()       mb()
 #define smp_rmb()      rmb()
 #define smp_wmb()      wmb()
-#else
+#define set_mb(var, value)  do { xchg(&var, value); } while (0)
+#else  /* CONFIG_SMP */
 #define smp_mb()       barrier()
 #define smp_rmb()      barrier()
 #define smp_wmb()      barrier()
-#endif
-
 #define set_mb(var, value)  do { var = value;  mb(); } while (0)
+#endif /* CONFIG_SMP */
+
 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
 
 #define read_barrier_depends()         do {} while (0)
 #define smp_read_barrier_depends()     do {} while (0)
 
-/*****************************************************************************/
-/*
- * MN10300 doesn't actually have an exchange instruction
- */
-#ifndef __ASSEMBLY__
-
-struct __xchg_dummy { unsigned long a[100]; };
-#define __xg(x) ((struct __xchg_dummy *)(x))
-
-static inline
-unsigned long __xchg(volatile unsigned long *m, unsigned long val)
-{
-       unsigned long retval;
-       unsigned long flags;
-
-       local_irq_save(flags);
-       retval = *m;
-       *m = val;
-       local_irq_restore(flags);
-       return retval;
-}
-
-#define xchg(ptr, v)                                           \
-       ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr),    \
-                                    (unsigned long)(v)))
-
-static inline unsigned long __cmpxchg(volatile unsigned long *m,
-                                     unsigned long old, unsigned long new)
-{
-       unsigned long retval;
-       unsigned long flags;
-
-       local_irq_save(flags);
-       retval = *m;
-       if (retval == old)
-               *m = new;
-       local_irq_restore(flags);
-       return retval;
-}
-
-#define cmpxchg(ptr, o, n)                                     \
-       ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \
-                                       (unsigned long)(o),     \
-                                       (unsigned long)(n)))
-
 #endif /* !__ASSEMBLY__ */
-
 #endif /* __KERNEL__ */
 #endif /* _ASM_SYSTEM_H */
index 2001cb6..aa07a4a 100644 (file)
 
 #include <asm/page.h>
 
-#ifndef __ASSEMBLY__
-#include <asm/processor.h>
-#endif
-
 #define PREEMPT_ACTIVE         0x10000000
 
 #ifdef CONFIG_4KSTACKS
  *   must also be changed
  */
 #ifndef __ASSEMBLY__
+typedef struct {
+       unsigned long   seg;
+} mm_segment_t;
 
 struct thread_info {
        struct task_struct      *task;          /* main task structure */
        struct exec_domain      *exec_domain;   /* execution domain */
+       struct pt_regs          *frame;         /* current exception frame */
        unsigned long           flags;          /* low level flags */
        __u32                   cpu;            /* current CPU */
        __s32                   preempt_count;  /* 0 => preemptable, <0 => BUG */
@@ -55,6 +55,10 @@ struct thread_info {
        __u8                    supervisor_stack[0];
 };
 
+#define thread_info_to_uregs(ti)                                       \
+       ((struct pt_regs *)                                             \
+        ((unsigned long)ti + THREAD_SIZE - sizeof(struct pt_regs)))
+
 #else /* !__ASSEMBLY__ */
 
 #ifndef __ASM_OFFSETS_H__
@@ -102,6 +106,12 @@ struct thread_info *current_thread_info(void)
        return ti;
 }
 
+static inline __attribute__((const))
+struct pt_regs *current_frame(void)
+{
+       return current_thread_info()->frame;
+}
+
 /* how to get the current stack pointer from C */
 static inline unsigned long current_stack_pointer(void)
 {
index 1d883b7..c634977 100644 (file)
 
 #ifdef __KERNEL__
 
-/* timer prescalar control */
+/*
+ * Timer prescalar control
+ */
 #define        TMPSCNT                 __SYSREG(0xd4003071, u8) /* timer prescaler control */
 #define        TMPSCNT_ENABLE          0x80    /* timer prescaler enable */
 #define        TMPSCNT_DISABLE         0x00    /* timer prescaler disable */
 
-/* 8 bit timers */
+/*
+ * 8-bit timers
+ */
 #define        TM0MD                   __SYSREG(0xd4003000, u8) /* timer 0 mode register */
 #define        TM0MD_SRC               0x07    /* timer source */
 #define        TM0MD_SRC_IOCLK         0x00    /* - IOCLK */
 #define        TM0MD_SRC_IOCLK_8       0x01    /* - 1/8 IOCLK */
 #define        TM0MD_SRC_IOCLK_32      0x02    /* - 1/32 IOCLK */
-#define        TM0MD_SRC_TM2IO         0x03    /* - TM2IO pin input */
 #define        TM0MD_SRC_TM1UFLOW      0x05    /* - timer 1 underflow */
 #define        TM0MD_SRC_TM2UFLOW      0x06    /* - timer 2 underflow */
+#if    defined(CONFIG_AM33_2)
+#define        TM0MD_SRC_TM2IO         0x03    /* - TM2IO pin input */
 #define        TM0MD_SRC_TM0IO         0x07    /* - TM0IO pin input */
+#endif /* CONFIG_AM33_2 */
 #define        TM0MD_INIT_COUNTER      0x40    /* initialize TMnBC = TMnBR */
 #define        TM0MD_COUNT_ENABLE      0x80    /* timer count enable */
 
@@ -43,7 +49,9 @@
 #define        TM1MD_SRC_TM0CASCADE    0x03    /* - cascade with timer 0 */
 #define        TM1MD_SRC_TM0UFLOW      0x04    /* - timer 0 underflow */
 #define        TM1MD_SRC_TM2UFLOW      0x06    /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define        TM1MD_SRC_TM1IO         0x07    /* - TM1IO pin input */
+#endif /* CONFIG_AM33_2 */
 #define        TM1MD_INIT_COUNTER      0x40    /* initialize TMnBC = TMnBR */
 #define        TM1MD_COUNT_ENABLE      0x80    /* timer count enable */
 
@@ -55,7 +63,9 @@
 #define        TM2MD_SRC_TM1CASCADE    0x03    /* - cascade with timer 1 */
 #define        TM2MD_SRC_TM0UFLOW      0x04    /* - timer 0 underflow */
 #define        TM2MD_SRC_TM1UFLOW      0x05    /* - timer 1 underflow */
+#if defined(CONFIG_AM33_2)
 #define        TM2MD_SRC_TM2IO         0x07    /* - TM2IO pin input */
+#endif /* CONFIG_AM33_2 */
 #define        TM2MD_INIT_COUNTER      0x40    /* initialize TMnBC = TMnBR */
 #define        TM2MD_COUNT_ENABLE      0x80    /* timer count enable */
 
 #define        TM3MD_SRC_IOCLK         0x00    /* - IOCLK */
 #define        TM3MD_SRC_IOCLK_8       0x01    /* - 1/8 IOCLK */
 #define        TM3MD_SRC_IOCLK_32      0x02    /* - 1/32 IOCLK */
-#define        TM3MD_SRC_TM1CASCADE    0x03    /* - cascade with timer 2 */
+#define        TM3MD_SRC_TM2CASCADE    0x03    /* - cascade with timer 2 */
 #define        TM3MD_SRC_TM0UFLOW      0x04    /* - timer 0 underflow */
 #define        TM3MD_SRC_TM1UFLOW      0x05    /* - timer 1 underflow */
 #define        TM3MD_SRC_TM2UFLOW      0x06    /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define        TM3MD_SRC_TM3IO         0x07    /* - TM3IO pin input */
+#endif /* CONFIG_AM33_2 */
 #define        TM3MD_INIT_COUNTER      0x40    /* initialize TMnBC = TMnBR */
 #define        TM3MD_COUNT_ENABLE      0x80    /* timer count enable */
 
 #define        TM2ICR                  GxICR(TM2IRQ)   /* timer 2 uflow intr ctrl reg */
 #define        TM3ICR                  GxICR(TM3IRQ)   /* timer 3 uflow intr ctrl reg */
 
-/* 16-bit timers 4,5 & 7-11 */
+/*
+ * 16-bit timers 4,5 & 7-15
+ */
 #define        TM4MD                   __SYSREG(0xd4003080, u8)   /* timer 4 mode register */
 #define        TM4MD_SRC               0x07    /* timer source */
 #define        TM4MD_SRC_IOCLK         0x00    /* - IOCLK */
 #define        TM4MD_SRC_TM0UFLOW      0x04    /* - timer 0 underflow */
 #define        TM4MD_SRC_TM1UFLOW      0x05    /* - timer 1 underflow */
 #define        TM4MD_SRC_TM2UFLOW      0x06    /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define        TM4MD_SRC_TM4IO         0x07    /* - TM4IO pin input */
+#endif /* CONFIG_AM33_2 */
 #define        TM4MD_INIT_COUNTER      0x40    /* initialize TMnBC = TMnBR */
 #define        TM4MD_COUNT_ENABLE      0x80    /* timer count enable */
 
 #define        TM5MD_SRC_TM0UFLOW      0x04    /* - timer 0 underflow */
 #define        TM5MD_SRC_TM1UFLOW      0x05    /* - timer 1 underflow */
 #define        TM5MD_SRC_TM2UFLOW      0x06    /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define        TM5MD_SRC_TM5IO         0x07    /* - TM5IO pin input */
+#else  /* !CONFIG_AM33_2 */
+#define        TM5MD_SRC_TM7UFLOW      0x07    /* - timer 7 underflow */
+#endif /* CONFIG_AM33_2 */
 #define        TM5MD_INIT_COUNTER      0x40    /* initialize TMnBC = TMnBR */
 #define        TM5MD_COUNT_ENABLE      0x80    /* timer count enable */
 
 #define        TM7MD_SRC_TM0UFLOW      0x04    /* - timer 0 underflow */
 #define        TM7MD_SRC_TM1UFLOW      0x05    /* - timer 1 underflow */
 #define        TM7MD_SRC_TM2UFLOW      0x06    /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define        TM7MD_SRC_TM7IO         0x07    /* - TM7IO pin input */
+#endif /* CONFIG_AM33_2 */
 #define        TM7MD_INIT_COUNTER      0x40    /* initialize TMnBC = TMnBR */
 #define        TM7MD_COUNT_ENABLE      0x80    /* timer count enable */
 
 #define        TM8MD_SRC_TM0UFLOW      0x04    /* - timer 0 underflow */
 #define        TM8MD_SRC_TM1UFLOW      0x05    /* - timer 1 underflow */
 #define        TM8MD_SRC_TM2UFLOW      0x06    /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define        TM8MD_SRC_TM8IO         0x07    /* - TM8IO pin input */
+#else  /* !CONFIG_AM33_2 */
+#define        TM8MD_SRC_TM7UFLOW      0x07    /* - timer 7 underflow */
+#endif /* CONFIG_AM33_2 */
 #define        TM8MD_INIT_COUNTER      0x40    /* initialize TMnBC = TMnBR */
 #define        TM8MD_COUNT_ENABLE      0x80    /* timer count enable */
 
 #define        TM9MD_SRC_TM0UFLOW      0x04    /* - timer 0 underflow */
 #define        TM9MD_SRC_TM1UFLOW      0x05    /* - timer 1 underflow */
 #define        TM9MD_SRC_TM2UFLOW      0x06    /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define        TM9MD_SRC_TM9IO         0x07    /* - TM9IO pin input */
+#else  /* !CONFIG_AM33_2 */
+#define        TM9MD_SRC_TM7UFLOW      0x07    /* - timer 7 underflow */
+#endif /* CONFIG_AM33_2 */
 #define        TM9MD_INIT_COUNTER      0x40    /* initialize TMnBC = TMnBR */
 #define        TM9MD_COUNT_ENABLE      0x80    /* timer count enable */
 
 #define        TM10MD_SRC_TM0UFLOW     0x04    /* - timer 0 underflow */
 #define        TM10MD_SRC_TM1UFLOW     0x05    /* - timer 1 underflow */
 #define        TM10MD_SRC_TM2UFLOW     0x06    /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define        TM10MD_SRC_TM10IO       0x07    /* - TM10IO pin input */
+#else  /* !CONFIG_AM33_2 */
+#define        TM10MD_SRC_TM7UFLOW     0x07    /* - timer 7 underflow */
+#endif /* CONFIG_AM33_2 */
 #define        TM10MD_INIT_COUNTER     0x40    /* initialize TMnBC = TMnBR */
 #define        TM10MD_COUNT_ENABLE     0x80    /* timer count enable */
 
 #define        TM11MD_SRC_IOCLK        0x00    /* - IOCLK */
 #define        TM11MD_SRC_IOCLK_8      0x01    /* - 1/8 IOCLK */
 #define        TM11MD_SRC_IOCLK_32     0x02    /* - 1/32 IOCLK */
-#define        TM11MD_SRC_TM7CASCADE   0x03    /* - cascade with timer 7 */
 #define        TM11MD_SRC_TM0UFLOW     0x04    /* - timer 0 underflow */
 #define        TM11MD_SRC_TM1UFLOW     0x05    /* - timer 1 underflow */
 #define        TM11MD_SRC_TM2UFLOW     0x06    /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define        TM11MD_SRC_TM11IO       0x07    /* - TM11IO pin input */
+#else  /* !CONFIG_AM33_2 */
+#define        TM11MD_SRC_TM7UFLOW     0x07    /* - timer 7 underflow */
+#endif /* CONFIG_AM33_2 */
 #define        TM11MD_INIT_COUNTER     0x40    /* initialize TMnBC = TMnBR */
 #define        TM11MD_COUNT_ENABLE     0x80    /* timer count enable */
 
+#if defined(CONFIG_AM34_2)
+#define        TM12MD                  __SYSREG(0xd4003180, u8)   /* timer 11 mode register */
+#define        TM12MD_SRC              0x07    /* timer source */
+#define        TM12MD_SRC_IOCLK        0x00    /* - IOCLK */
+#define        TM12MD_SRC_IOCLK_8      0x01    /* - 1/8 IOCLK */
+#define        TM12MD_SRC_IOCLK_32     0x02    /* - 1/32 IOCLK */
+#define        TM12MD_SRC_TM0UFLOW     0x04    /* - timer 0 underflow */
+#define        TM12MD_SRC_TM1UFLOW     0x05    /* - timer 1 underflow */
+#define        TM12MD_SRC_TM2UFLOW     0x06    /* - timer 2 underflow */
+#define        TM12MD_SRC_TM7UFLOW     0x07    /* - timer 7 underflow */
+#define        TM12MD_INIT_COUNTER     0x40    /* initialize TMnBC = TMnBR */
+#define        TM12MD_COUNT_ENABLE     0x80    /* timer count enable */
+
+#define        TM13MD                  __SYSREG(0xd4003182, u8)   /* timer 11 mode register */
+#define        TM13MD_SRC              0x07    /* timer source */
+#define        TM13MD_SRC_IOCLK        0x00    /* - IOCLK */
+#define        TM13MD_SRC_IOCLK_8      0x01    /* - 1/8 IOCLK */
+#define        TM13MD_SRC_IOCLK_32     0x02    /* - 1/32 IOCLK */
+#define        TM13MD_SRC_TM12CASCADE  0x03    /* - cascade with timer 12 */
+#define        TM13MD_SRC_TM0UFLOW     0x04    /* - timer 0 underflow */
+#define        TM13MD_SRC_TM1UFLOW     0x05    /* - timer 1 underflow */
+#define        TM13MD_SRC_TM2UFLOW     0x06    /* - timer 2 underflow */
+#define        TM13MD_SRC_TM7UFLOW     0x07    /* - timer 7 underflow */
+#define        TM13MD_INIT_COUNTER     0x40    /* initialize TMnBC = TMnBR */
+#define        TM13MD_COUNT_ENABLE     0x80    /* timer count enable */
+
+#define        TM14MD                  __SYSREG(0xd4003184, u8)   /* timer 11 mode register */
+#define        TM14MD_SRC              0x07    /* timer source */
+#define        TM14MD_SRC_IOCLK        0x00    /* - IOCLK */
+#define        TM14MD_SRC_IOCLK_8      0x01    /* - 1/8 IOCLK */
+#define        TM14MD_SRC_IOCLK_32     0x02    /* - 1/32 IOCLK */
+#define        TM14MD_SRC_TM13CASCADE  0x03    /* - cascade with timer 13 */
+#define        TM14MD_SRC_TM0UFLOW     0x04    /* - timer 0 underflow */
+#define        TM14MD_SRC_TM1UFLOW     0x05    /* - timer 1 underflow */
+#define        TM14MD_SRC_TM2UFLOW     0x06    /* - timer 2 underflow */
+#define        TM14MD_SRC_TM7UFLOW     0x07    /* - timer 7 underflow */
+#define        TM14MD_INIT_COUNTER     0x40    /* initialize TMnBC = TMnBR */
+#define        TM14MD_COUNT_ENABLE     0x80    /* timer count enable */
+
+#define        TM15MD                  __SYSREG(0xd4003186, u8)   /* timer 11 mode register */
+#define        TM15MD_SRC              0x07    /* timer source */
+#define        TM15MD_SRC_IOCLK        0x00    /* - IOCLK */
+#define        TM15MD_SRC_IOCLK_8      0x01    /* - 1/8 IOCLK */
+#define        TM15MD_SRC_IOCLK_32     0x02    /* - 1/32 IOCLK */
+#define        TM15MD_SRC_TM0UFLOW     0x04    /* - timer 0 underflow */
+#define        TM15MD_SRC_TM1UFLOW     0x05    /* - timer 1 underflow */
+#define        TM15MD_SRC_TM2UFLOW     0x06    /* - timer 2 underflow */
+#define        TM15MD_SRC_TM7UFLOW     0x07    /* - timer 7 underflow */
+#define        TM15MD_INIT_COUNTER     0x40    /* initialize TMnBC = TMnBR */
+#define        TM15MD_COUNT_ENABLE     0x80    /* timer count enable */
+#endif /* CONFIG_AM34_2 */
+
+
 #define        TM4BR                   __SYSREG(0xd4003090, u16)  /* timer 4 base register */
 #define        TM5BR                   __SYSREG(0xd4003092, u16)  /* timer 5 base register */
+#define        TM45BR                  __SYSREG(0xd4003090, u32)  /* timer 4:5 base register */
 #define        TM7BR                   __SYSREG(0xd4003096, u16)  /* timer 7 base register */
 #define        TM8BR                   __SYSREG(0xd4003098, u16)  /* timer 8 base register */
 #define        TM9BR                   __SYSREG(0xd400309a, u16)  /* timer 9 base register */
+#define        TM89BR                  __SYSREG(0xd4003098, u32)  /* timer 8:9 base register */
 #define        TM10BR                  __SYSREG(0xd400309c, u16)  /* timer 10 base register */
 #define        TM11BR                  __SYSREG(0xd400309e, u16)  /* timer 11 base register */
-#define        TM45BR                  __SYSREG(0xd4003090, u32)  /* timer 4:5 base register */
+#if defined(CONFIG_AM34_2)
+#define        TM12BR                  __SYSREG(0xd4003190, u16)  /* timer 12 base register */
+#define        TM13BR                  __SYSREG(0xd4003192, u16)  /* timer 13 base register */
+#define        TM14BR                  __SYSREG(0xd4003194, u16)  /* timer 14 base register */
+#define        TM15BR                  __SYSREG(0xd4003196, u16)  /* timer 15 base register */
+#endif /* CONFIG_AM34_2 */
 
 #define        TM4BC                   __SYSREG(0xd40030a0, u16)  /* timer 4 binary counter */
 #define        TM5BC                   __SYSREG(0xd40030a2, u16)  /* timer 5 binary counter */
 #define        TM45BC                  __SYSREG(0xd40030a0, u32)  /* timer 4:5 binary counter */
-
 #define        TM7BC                   __SYSREG(0xd40030a6, u16)  /* timer 7 binary counter */
 #define        TM8BC                   __SYSREG(0xd40030a8, u16)  /* timer 8 binary counter */
 #define        TM9BC                   __SYSREG(0xd40030aa, u16)  /* timer 9 binary counter */
+#define        TM89BC                  __SYSREG(0xd40030a8, u32)  /* timer 8:9 binary counter */
 #define        TM10BC                  __SYSREG(0xd40030ac, u16)  /* timer 10 binary counter */
 #define        TM11BC                  __SYSREG(0xd40030ae, u16)  /* timer 11 binary counter */
+#if defined(CONFIG_AM34_2)
+#define        TM12BC                  __SYSREG(0xd40031a0, u16)  /* timer 12 binary counter */
+#define        TM13BC                  __SYSREG(0xd40031a2, u16)  /* timer 13 binary counter */
+#define        TM14BC                  __SYSREG(0xd40031a4, u16)  /* timer 14 binary counter */
+#define        TM15BC                  __SYSREG(0xd40031a6, u16)  /* timer 15 binary counter */
+#endif /* CONFIG_AM34_2 */
 
 #define TM4IRQ                 6       /* timer 4 IRQ */
 #define TM5IRQ                 7       /* timer 5 IRQ */
 #define TM9IRQ                 13      /* timer 9 IRQ */
 #define TM10IRQ                        14      /* timer 10 IRQ */
 #define TM11IRQ                        15      /* timer 11 IRQ */
+#if defined(CONFIG_AM34_2)
+#define TM12IRQ                        64      /* timer 12 IRQ */
+#define TM13IRQ                        65      /* timer 13 IRQ */
+#define TM14IRQ                        66      /* timer 14 IRQ */
+#define TM15IRQ                        67      /* timer 15 IRQ */
+#endif /* CONFIG_AM34_2 */
 
 #define        TM4ICR                  GxICR(TM4IRQ)   /* timer 4 uflow intr ctrl reg */
 #define        TM5ICR                  GxICR(TM5IRQ)   /* timer 5 uflow intr ctrl reg */
 #define        TM9ICR                  GxICR(TM9IRQ)   /* timer 9 uflow intr ctrl reg */
 #define        TM10ICR                 GxICR(TM10IRQ)  /* timer 10 uflow intr ctrl reg */
 #define        TM11ICR                 GxICR(TM11IRQ)  /* timer 11 uflow intr ctrl reg */
-
-/* 16-bit timer 6 */
+#if defined(CONFIG_AM34_2)
+#define        TM12ICR                 GxICR(TM12IRQ)  /* timer 12 uflow intr ctrl reg */
+#define        TM13ICR                 GxICR(TM13IRQ)  /* timer 13 uflow intr ctrl reg */
+#define        TM14ICR                 GxICR(TM14IRQ)  /* timer 14 uflow intr ctrl reg */
+#define        TM15ICR                 GxICR(TM15IRQ)  /* timer 15 uflow intr ctrl reg */
+#endif /* CONFIG_AM34_2 */
+
+/*
+ * 16-bit timer 6
+ */
 #define        TM6MD                   __SYSREG(0xd4003084, u16)  /* timer6 mode register */
 #define        TM6MD_SRC               0x0007  /* timer source */
 #define        TM6MD_SRC_IOCLK         0x0000  /* - IOCLK */
 #define        TM6MD_SRC_IOCLK_32      0x0002  /* - 1/32 IOCLK */
 #define        TM6MD_SRC_TM0UFLOW      0x0004  /* - timer 0 underflow */
 #define        TM6MD_SRC_TM1UFLOW      0x0005  /* - timer 1 underflow */
-#define        TM6MD_SRC_TM6IOB_BOTH   0x0006  /* - TM6IOB pin input (both edges) */
+#define        TM6MD_SRC_TM2UFLOW      0x0006  /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
+/* #define     TM6MD_SRC_TM6IOB_BOTH   0x0006 */       /* - TM6IOB pin input (both edges) */
 #define        TM6MD_SRC_TM6IOB_SINGLE 0x0007  /* - TM6IOB pin input (single edge) */
-#define        TM6MD_CLR_ENABLE        0x0010  /* clear count enable */
+#endif /* CONFIG_AM33_2 */
 #define        TM6MD_ONESHOT_ENABLE    0x0040  /* oneshot count */
+#define        TM6MD_CLR_ENABLE        0x0010  /* clear count enable */
+#if    defined(CONFIG_AM33_2)
 #define        TM6MD_TRIG_ENABLE       0x0080  /* TM6IOB pin trigger enable */
 #define TM6MD_PWM              0x3800  /* PWM output mode */
 #define TM6MD_PWM_DIS          0x0000  /* - disabled */
 #define        TM6MD_PWM_11BIT         0x1800  /* - 11 bits mode */
 #define        TM6MD_PWM_12BIT         0x3000  /* - 12 bits mode */
 #define        TM6MD_PWM_14BIT         0x3800  /* - 14 bits mode */
+#endif /* CONFIG_AM33_2 */
+
 #define        TM6MD_INIT_COUNTER      0x4000  /* initialize TMnBC to zero */
 #define        TM6MD_COUNT_ENABLE      0x8000  /* timer count enable */
 
 #define        TM6MDA                  __SYSREG(0xd40030b4, u8)   /* timer6 cmp/cap A mode reg */
+#define        TM6MDA_MODE_CMP_SINGLE  0x00    /* - compare, single buffer mode */
+#define        TM6MDA_MODE_CMP_DOUBLE  0x40    /* - compare, double buffer mode */
+#if    defined(CONFIG_AM33_2)
 #define TM6MDA_OUT             0x07    /* output select */
 #define        TM6MDA_OUT_SETA_RESETB  0x00    /* - set at match A, reset at match B */
 #define        TM6MDA_OUT_SETA_RESETOV 0x01    /* - set at match A, reset at overflow */
 #define        TM6MDA_OUT_RESETA       0x03    /* - reset at match A */
 #define        TM6MDA_OUT_TOGGLE       0x04    /* - toggle on match A */
 #define TM6MDA_MODE            0xc0    /* compare A register mode */
-#define        TM6MDA_MODE_CMP_SINGLE  0x00    /* - compare, single buffer mode */
-#define        TM6MDA_MODE_CMP_DOUBLE  0x40    /* - compare, double buffer mode */
 #define        TM6MDA_MODE_CAP_S_EDGE  0x80    /* - capture, single edge mode */
 #define        TM6MDA_MODE_CAP_D_EDGE  0xc0    /* - capture, double edge mode */
 #define TM6MDA_EDGE            0x20    /* compare A edge select */
 #define        TM6MDA_EDGE_FALLING     0x00    /* capture on falling edge */
 #define        TM6MDA_EDGE_RISING      0x20    /* capture on rising edge */
 #define        TM6MDA_CAPTURE_ENABLE   0x10    /* capture enable */
+#else  /* !CONFIG_AM33_2 */
+#define        TM6MDA_MODE             0x40    /* compare A register mode */
+#endif /* CONFIG_AM33_2 */
 
 #define        TM6MDB                  __SYSREG(0xd40030b5, u8)   /* timer6 cmp/cap B mode reg */
+#define        TM6MDB_MODE_CMP_SINGLE  0x00    /* - compare, single buffer mode */
+#define        TM6MDB_MODE_CMP_DOUBLE  0x40    /* - compare, double buffer mode */
+#if defined(CONFIG_AM33_2)
 #define TM6MDB_OUT             0x07    /* output select */
 #define        TM6MDB_OUT_SETB_RESETA  0x00    /* - set at match B, reset at match A */
 #define        TM6MDB_OUT_SETB_RESETOV 0x01    /* - set at match B */
 #define        TM6MDB_OUT_RESETB       0x03    /* - reset at match B */
 #define        TM6MDB_OUT_TOGGLE       0x04    /* - toggle on match B */
 #define TM6MDB_MODE            0xc0    /* compare B register mode */
-#define        TM6MDB_MODE_CMP_SINGLE  0x00    /* - compare, single buffer mode */
-#define        TM6MDB_MODE_CMP_DOUBLE  0x40    /* - compare, double buffer mode */
 #define        TM6MDB_MODE_CAP_S_EDGE  0x80    /* - capture, single edge mode */
 #define        TM6MDB_MODE_CAP_D_EDGE  0xc0    /* - capture, double edge mode */
 #define TM6MDB_EDGE            0x20    /* compare B edge select */
 #define        TM6MDB_EDGE_FALLING     0x00    /* capture on falling edge */
 #define        TM6MDB_EDGE_RISING      0x20    /* capture on rising edge */
 #define        TM6MDB_CAPTURE_ENABLE   0x10    /* capture enable */
+#else  /* !CONFIG_AM33_2 */
+#define        TM6MDB_MODE             0x40    /* compare B register mode */
+#endif /* CONFIG_AM33_2 */
 
 #define        TM6CA                   __SYSREG(0xd40030c4, u16)   /* timer6 cmp/capture reg A */
 #define        TM6CB                   __SYSREG(0xd40030d4, u16)   /* timer6 cmp/capture reg B */
 #define        TM6AICR                 GxICR(TM6AIRQ)  /* timer 6A intr control reg */
 #define        TM6BICR                 GxICR(TM6BIRQ)  /* timer 6B intr control reg */
 
+#if defined(CONFIG_AM34_2)
+/*
+ * MTM: OS Tick-Timer
+ */
+#define        TMTMD                   __SYSREG(0xd4004100, u8)        /* Tick Timer mode register */
+#define        TMTMD_TMTLDE            0x40    /* initialize TMTBC = TMTBR */
+#define        TMTMD_TMTCNE            0x80    /* timer count enable       */
+
+#define        TMTBR                   __SYSREG(0xd4004110, u32)       /* Tick Timer mode reg */
+#define        TMTBC                   __SYSREG(0xd4004120, u32)       /* Tick Timer mode reg */
+
+/*
+ * MTM: OS Timestamp-Timer
+ */
+#define        TMSMD                   __SYSREG(0xd4004140, u8)        /* Tick Timer mode register */
+#define        TMSMD_TMSLDE            0x40            /* initialize TMSBC = TMSBR */
+#define        TMSMD_TMSCNE            0x80            /* timer count enable       */
+
+#define        TMSBR                   __SYSREG(0xd4004150, u32)       /* Tick Timer mode register */
+#define        TMSBC                   __SYSREG(0xd4004160, u32)       /* Tick Timer mode register */
+
+#define TMTIRQ                 119             /* OS Tick timer   IRQ */
+#define TMSIRQ                 120             /* Timestamp timer IRQ */
+
+#define        TMTICR                  GxICR(TMTIRQ)   /* OS Tick timer   uflow intr ctrl reg */
+#define        TMSICR                  GxICR(TMSIRQ)   /* Timestamp timer uflow intr ctrl reg */
+#endif /* CONFIG_AM34_2 */
+
 #endif /* __KERNEL__ */
 
 #endif /* _ASM_TIMER_REGS_H */
index 8d031f9..bd4e90d 100644 (file)
 
 #define TICK_SIZE (tick_nsec / 1000)
 
-#define CLOCK_TICK_RATE 1193180 /* Underlying HZ - this should probably be set
-                                * to something appropriate, but what? */
-
-extern cycles_t cacheflush_time;
+#define CLOCK_TICK_RATE MN10300_JCCLK /* Underlying HZ */
 
 #ifdef __KERNEL__
 
+extern cycles_t cacheflush_time;
+
 static inline cycles_t get_cycles(void)
 {
        return read_timestamp_counter();
 }
 
+extern int init_clockevents(void);
+extern int init_clocksource(void);
+
+static inline void setup_jiffies_interrupt(int irq,
+                                          struct irqaction *action)
+{
+       u16 tmp;
+       setup_irq(irq, action);
+       set_intr_level(irq, NUM2GxICR_LEVEL(CONFIG_TIMER_IRQ_LEVEL));
+       GxICR(irq) |= GxICR_ENABLE | GxICR_DETECT | GxICR_REQUEST;
+       tmp = GxICR(irq);
+}
+
 #endif /* __KERNEL__ */
 
 #endif /* _ASM_TIMEX_H */
index 1a7e292..efddd6e 100644 (file)
 #ifndef _ASM_TLBFLUSH_H
 #define _ASM_TLBFLUSH_H
 
+#include <linux/mm.h>
 #include <asm/processor.h>
 
-#define __flush_tlb()                                          \
-do {                                                           \
-       int w;                                                  \
-       __asm__ __volatile__                                    \
-               ("      mov %1,%0               \n"             \
-                "      or %2,%0                \n"             \
-                "      mov %0,%1               \n"             \
-                : "=d"(w)                                      \
-                : "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV)      \
-                : "cc", "memory"                               \
-                );                                             \
-} while (0)
+struct tlb_state {
+       struct mm_struct        *active_mm;
+       int                     state;
+};
+DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
 
-#define __flush_tlb_all() __flush_tlb()
-#define __flush_tlb_one(addr) __flush_tlb()
+/**
+ * local_flush_tlb - Flush the current MM's entries from the local CPU's TLBs
+ */
+static inline void local_flush_tlb(void)
+{
+       int w;
+       asm volatile(
+               "       mov     %1,%0           \n"
+               "       or      %2,%0           \n"
+               "       mov     %0,%1           \n"
+               : "=d"(w)
+               : "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV)
+               : "cc", "memory");
+}
+
+/**
+ * local_flush_tlb_all - Flush all entries from the local CPU's TLBs
+ */
+static inline void local_flush_tlb_all(void)
+{
+       local_flush_tlb();
+}
 
+/**
+ * local_flush_tlb_one - Flush one entry from the local CPU's TLBs
+ */
+static inline void local_flush_tlb_one(unsigned long addr)
+{
+       local_flush_tlb();
+}
+
+/**
+ * local_flush_tlb_page - Flush a page's entry from the local CPU's TLBs
+ * @mm: The MM to flush for
+ * @addr: The address of the target page in RAM (not its page struct)
+ */
+static inline
+void local_flush_tlb_page(struct mm_struct *mm, unsigned long addr)
+{
+       unsigned long pteu, flags, cnx;
+
+       addr &= PAGE_MASK;
+
+       local_irq_save(flags);
+
+       cnx = 1;
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
+       cnx = mm->context.tlbpid[smp_processor_id()];
+#endif
+       if (cnx) {
+               pteu = addr;
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
+               pteu |= cnx & xPTEU_PID;
+#endif
+               IPTEU = pteu;
+               DPTEU = pteu;
+               if (IPTEL & xPTEL_V)
+                       IPTEL = 0;
+               if (DPTEL & xPTEL_V)
+                       DPTEL = 0;
+       }
+       local_irq_restore(flags);
+}
 
 /*
  * TLB flushing:
@@ -40,41 +94,61 @@ do {                                                                \
  *  - flush_tlb_range(mm, start, end) flushes a range of pages
  *  - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
  */
-#define flush_tlb_all()                                \
-do {                                           \
-       preempt_disable();                      \
-       __flush_tlb_all();                      \
-       preempt_enable();                       \
-} while (0)
-
-#define flush_tlb_mm(mm)                       \
-do {                                           \
-       preempt_disable();                      \
-       __flush_tlb_all();                      \
-       preempt_enable();                       \
-} while (0)
-
-#define flush_tlb_range(vma, start, end)                       \
-do {                                                           \
-       unsigned long __s __attribute__((unused)) = (start);    \
-       unsigned long __e __attribute__((unused)) = (end);      \
-       preempt_disable();                                      \
-       __flush_tlb_all();                                      \
-       preempt_enable();                                       \
-} while (0)
-
-
-#define __flush_tlb_global()                   flush_tlb_all()
-#define flush_tlb()                            flush_tlb_all()
-#define flush_tlb_kernel_range(start, end)                     \
-do {                                                           \
-       unsigned long __s __attribute__((unused)) = (start);    \
-       unsigned long __e __attribute__((unused)) = (end);      \
-       flush_tlb_all();                                        \
-} while (0)
-
-extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
-
-#define flush_tlb_pgtables(mm, start, end)     do {} while (0)
+#ifdef CONFIG_SMP
+
+#include <asm/smp.h>
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_current_task(void);
+extern void flush_tlb_mm(struct mm_struct *);
+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+
+#define flush_tlb()            flush_tlb_current_task()
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+                                  unsigned long start, unsigned long end)
+{
+       flush_tlb_mm(vma->vm_mm);
+}
+
+#else   /* CONFIG_SMP */
+
+static inline void flush_tlb_all(void)
+{
+       preempt_disable();
+       local_flush_tlb_all();
+       preempt_enable();
+}
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+       preempt_disable();
+       local_flush_tlb_all();
+       preempt_enable();
+}
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+                                  unsigned long start, unsigned long end)
+{
+       preempt_disable();
+       local_flush_tlb_all();
+       preempt_enable();
+}
+
+#define flush_tlb_page(vma, addr)      local_flush_tlb_page((vma)->vm_mm, addr)
+#define flush_tlb()                    flush_tlb_all()
+
+#endif /* CONFIG_SMP */
+
+static inline void flush_tlb_kernel_range(unsigned long start,
+                                         unsigned long end)
+{
+       flush_tlb_all();
+}
+
+static inline void flush_tlb_pgtables(struct mm_struct *mm,
+                                     unsigned long start, unsigned long end)
+{
+}
 
 #endif /* _ASM_TLBFLUSH_H */
index 197a7af..679dee0 100644 (file)
@@ -14,9 +14,8 @@
 /*
  * User space memory access functions
  */
-#include <linux/sched.h>
+#include <linux/thread_info.h>
 #include <asm/page.h>
-#include <asm/pgtable.h>
 #include <asm/errno.h>
 
 #define VERIFY_READ 0
@@ -29,7 +28,6 @@
  *
  * For historical reasons, these macros are grossly misnamed.
  */
-
 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
 
 #define KERNEL_XDS     MAKE_MM_SEG(0xBFFFFFFF)
@@ -377,7 +375,7 @@ unsigned long __generic_copy_to_user_nocheck(void *to, const void *from,
 
 
 #if 0
-#error don't use - these macros don't increment to & from pointers
+#error "don't use - these macros don't increment to & from pointers"
 /* Optimize just a little bit when we know the size of the move. */
 #define __constant_copy_user(to, from, size)   \
 do {                                           \
index 23f2ab6..8f5f1e8 100644 (file)
@@ -3,13 +3,16 @@
 #
 extra-y := head.o init_task.o vmlinux.lds
 
-obj-y   := process.o signal.o entry.o fpu.o traps.o irq.o \
+fpu-obj-y := fpu-nofpu.o fpu-nofpu-low.o
+fpu-obj-$(CONFIG_FPU) := fpu.o fpu-low.o
+
+obj-y   := process.o signal.o entry.o traps.o irq.o \
           ptrace.o setup.o time.o sys_mn10300.o io.o kthread.o \
-          switch_to.o mn10300_ksyms.o kernel_execve.o
+          switch_to.o mn10300_ksyms.o kernel_execve.o $(fpu-obj-y)
 
-obj-$(CONFIG_MN10300_WD_TIMER) += mn10300-watchdog.o mn10300-watchdog-low.o
+obj-$(CONFIG_SMP) += smp.o smp-low.o
 
-obj-$(CONFIG_FPU) += fpu-low.o
+obj-$(CONFIG_MN10300_WD_TIMER) += mn10300-watchdog.o mn10300-watchdog-low.o
 
 obj-$(CONFIG_MN10300_TTYSM) += mn10300-serial.o mn10300-serial-low.o \
                               mn10300-debug.o
@@ -17,7 +20,7 @@ obj-$(CONFIG_GDBSTUB) += gdb-stub.o gdb-low.o
 obj-$(CONFIG_GDBSTUB_ON_TTYSx) += gdb-io-serial.o gdb-io-serial-low.o
 obj-$(CONFIG_GDBSTUB_ON_TTYSMx) += gdb-io-ttysm.o gdb-io-ttysm-low.o
 
-ifneq ($(CONFIG_MN10300_CACHE_DISABLED),y)
+ifeq ($(CONFIG_MN10300_CACHE_ENABLED),y)
 obj-$(CONFIG_GDBSTUB) += gdb-cache.o
 endif
 
@@ -25,3 +28,5 @@ obj-$(CONFIG_MN10300_RTC) += rtc.o
 obj-$(CONFIG_PROFILE) += profile.o profile-low.o
 obj-$(CONFIG_MODULES) += module.o
 obj-$(CONFIG_KPROBES) += kprobes.o
+obj-$(CONFIG_CSRC_MN10300) += csrc-mn10300.o
+obj-$(CONFIG_CEVT_MN10300) += cevt-mn10300.o
index 02dc7e4..96f24fa 100644 (file)
@@ -23,6 +23,7 @@ void foo(void)
 
        OFFSET(TI_task,                 thread_info, task);
        OFFSET(TI_exec_domain,          thread_info, exec_domain);
+       OFFSET(TI_frame,                thread_info, frame);
        OFFSET(TI_flags,                thread_info, flags);
        OFFSET(TI_cpu,                  thread_info, cpu);
        OFFSET(TI_preempt_count,        thread_info, preempt_count);
@@ -66,7 +67,15 @@ void foo(void)
        OFFSET(THREAD_SP,               thread_struct, sp);
        OFFSET(THREAD_A3,               thread_struct, a3);
        OFFSET(THREAD_USP,              thread_struct, usp);
-       OFFSET(THREAD_FRAME,            thread_struct, __frame);
+#ifdef CONFIG_FPU
+       OFFSET(THREAD_FPU_FLAGS,        thread_struct, fpu_flags);
+       OFFSET(THREAD_FPU_STATE,        thread_struct, fpu_state);
+       DEFINE(__THREAD_USING_FPU,      THREAD_USING_FPU);
+       DEFINE(__THREAD_HAS_FPU,        THREAD_HAS_FPU);
+#endif /* CONFIG_FPU */
+       BLANK();
+
+       OFFSET(TASK_THREAD,             task_struct, thread);
        BLANK();
 
        DEFINE(CLONE_VM_asm,            CLONE_VM);
diff --git a/arch/mn10300/kernel/cevt-mn10300.c b/arch/mn10300/kernel/cevt-mn10300.c
new file mode 100644 (file)
index 0000000..d4cb535
--- /dev/null
@@ -0,0 +1,131 @@
+/* MN10300 clockevents
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by Mark Salter (msalter@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+#include <asm/timex.h>
+#include "internal.h"
+
+#ifdef CONFIG_SMP
+#if (CONFIG_NR_CPUS > 2) && !defined(CONFIG_GEENERIC_CLOCKEVENTS_BROADCAST)
+#error "This doesn't scale well! Need per-core local timers."
+#endif
+#else /* CONFIG_SMP */
+#define stop_jiffies_counter1()
+#define reload_jiffies_counter1(x)
+#define TMJC1IRQ TMJCIRQ
+#endif
+
+
+static int next_event(unsigned long delta,
+                     struct clock_event_device *evt)
+{
+       unsigned int cpu = smp_processor_id();
+
+       if (cpu == 0) {
+               stop_jiffies_counter();
+               reload_jiffies_counter(delta - 1);
+       } else {
+               stop_jiffies_counter1();
+               reload_jiffies_counter1(delta - 1);
+       }
+       return 0;
+}
+
+static void set_clock_mode(enum clock_event_mode mode,
+                          struct clock_event_device *evt)
+{
+       /* Nothing to do ...  */
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, mn10300_clockevent_device);
+static DEFINE_PER_CPU(struct irqaction, timer_irq);
+
+static irqreturn_t timer_interrupt(int irq, void *dev_id)
+{
+       struct clock_event_device *cd;
+       unsigned int cpu = smp_processor_id();
+
+       if (cpu == 0)
+               stop_jiffies_counter();
+       else
+               stop_jiffies_counter1();
+
+       cd = &per_cpu(mn10300_clockevent_device, cpu);
+       cd->event_handler(cd);
+
+       return IRQ_HANDLED;
+}
+
+static void event_handler(struct clock_event_device *dev)
+{
+}
+
+int __init init_clockevents(void)
+{
+       struct clock_event_device *cd;
+       struct irqaction *iact;
+       unsigned int cpu = smp_processor_id();
+
+       cd = &per_cpu(mn10300_clockevent_device, cpu);
+
+       if (cpu == 0) {
+               stop_jiffies_counter();
+               cd->irq = TMJCIRQ;
+       } else {
+               stop_jiffies_counter1();
+               cd->irq = TMJC1IRQ;
+       }
+
+       cd->name                = "Timestamp";
+       cd->features            = CLOCK_EVT_FEAT_ONESHOT;
+
+       /* Calculate the min / max delta */
+       clockevent_set_clock(cd, MN10300_JCCLK);
+
+       cd->max_delta_ns        = clockevent_delta2ns(TMJCBR_MAX, cd);
+       cd->min_delta_ns        = clockevent_delta2ns(100, cd);
+
+       cd->rating              = 200;
+       cd->cpumask             = cpumask_of(smp_processor_id());
+       cd->set_mode            = set_clock_mode;
+       cd->event_handler       = event_handler;
+       cd->set_next_event      = next_event;
+
+       iact = &per_cpu(timer_irq, cpu);
+       iact->flags = IRQF_DISABLED | IRQF_SHARED | IRQF_TIMER;
+       iact->handler = timer_interrupt;
+
+       clockevents_register_device(cd);
+
+#if defined(CONFIG_SMP) && !defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
+       /* setup timer irq affinity so it only runs on this cpu */
+       {
+               struct irq_desc *desc;
+               desc = irq_to_desc(cd->irq);
+               cpumask_copy(desc->affinity, cpumask_of(cpu));
+               iact->flags |= IRQF_NOBALANCING;
+       }
+#endif
+
+       if (cpu == 0) {
+               reload_jiffies_counter(MN10300_JC_PER_HZ - 1);
+               iact->name = "CPU0 Timer";
+       } else {
+               reload_jiffies_counter1(MN10300_JC_PER_HZ - 1);
+               iact->name = "CPU1 Timer";
+       }
+
+       setup_jiffies_interrupt(cd->irq, iact);
+
+       return 0;
+}
diff --git a/arch/mn10300/kernel/csrc-mn10300.c b/arch/mn10300/kernel/csrc-mn10300.c
new file mode 100644 (file)
index 0000000..ba2f0c4
--- /dev/null
@@ -0,0 +1,35 @@
+/* MN10300 clocksource
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by Mark Salter (msalter@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/clocksource.h>
+#include <linux/init.h>
+#include <asm/timex.h>
+#include "internal.h"
+
+static cycle_t mn10300_read(struct clocksource *cs)
+{
+       return read_timestamp_counter();
+}
+
+static struct clocksource clocksource_mn10300 = {
+       .name   = "TSC",
+       .rating = 200,
+       .read   = mn10300_read,
+       .mask   = CLOCKSOURCE_MASK(32),
+       .flags  = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+int __init init_clocksource(void)
+{
+       startup_timestamp_counter();
+       clocksource_set_clock(&clocksource_mn10300, MN10300_TSCCLK);
+       clocksource_register(&clocksource_mn10300);
+       return 0;
+}
index 3d394b4..f00b9ba 100644 (file)
 #include <asm/asm-offsets.h>
 #include <asm/frame.inc>
 
+#if defined(CONFIG_SMP) && defined(CONFIG_GDBSTUB)
+#include <asm/gdb-stub.h>
+#endif /* CONFIG_SMP && CONFIG_GDBSTUB */
+
 #ifdef CONFIG_PREEMPT
-#define preempt_stop           __cli
+#define preempt_stop           LOCAL_IRQ_DISABLE
 #else
 #define preempt_stop
 #define resume_kernel          restore_all
 #endif
 
-       .macro __cli
-       and     ~EPSW_IM,epsw
-       or      EPSW_IE|MN10300_CLI_LEVEL,epsw
-       nop
-       nop
-       nop
-       .endm
-       .macro __sti
-       or      EPSW_IE|EPSW_IM_7,epsw
-       .endm
-
-
        .am33_2
 
 ###############################################################################
@@ -88,7 +80,7 @@ syscall_call:
 syscall_exit:
        # make sure we don't miss an interrupt setting need_resched or
        # sigpending between sampling and the rti
-       __cli
+       LOCAL_IRQ_DISABLE
        mov     (TI_flags,a2),d2
        btst    _TIF_ALLWORK_MASK,d2
        bne     syscall_exit_work
@@ -105,7 +97,7 @@ restore_all:
 syscall_exit_work:
        btst    _TIF_SYSCALL_TRACE,d2
        beq     work_pending
-       __sti                           # could let syscall_trace_exit() call
+       LOCAL_IRQ_ENABLE                # could let syscall_trace_exit() call
                                        # schedule() instead
        mov     fp,d0
        call    syscall_trace_exit[],0  # do_syscall_trace(regs)
@@ -121,7 +113,7 @@ work_resched:
 
        # make sure we don't miss an interrupt setting need_resched or
        # sigpending between sampling and the rti
-       __cli
+       LOCAL_IRQ_DISABLE
 
        # is there any work to be done other than syscall tracing?
        mov     (TI_flags,a2),d2
@@ -168,7 +160,7 @@ ret_from_intr:
 ENTRY(resume_userspace)
        # make sure we don't miss an interrupt setting need_resched or
        # sigpending between sampling and the rti
-       __cli
+       LOCAL_IRQ_DISABLE
 
        # is there any work to be done on int/exception return?
        mov     (TI_flags,a2),d2
@@ -178,7 +170,7 @@ ENTRY(resume_userspace)
 
 #ifdef CONFIG_PREEMPT
 ENTRY(resume_kernel)
-       __cli
+       LOCAL_IRQ_DISABLE
        mov     (TI_preempt_count,a2),d0        # non-zero preempt_count ?
        cmp     0,d0
        bne     restore_all
@@ -216,31 +208,6 @@ ENTRY(irq_handler)
 
 ###############################################################################
 #
-# Monitor Signal handler entry point
-#
-###############################################################################
-ENTRY(monitor_signal)
-       movbu   (0xae000001),d1
-       cmp     1,d1
-       beq     monsignal
-       ret     [],0
-
-monsignal:
-       or      EPSW_NMID,epsw
-       mov     d0,a0
-       mov     a0,sp
-       mov     (REG_EPSW,fp),d1
-       and     ~EPSW_nSL,d1
-       mov     d1,(REG_EPSW,fp)
-       movm    (sp),[d2,d3,a2,a3,exreg0,exreg1,exother]
-       mov     (sp),a1
-       mov     a1,usp
-       movm    (sp),[other]
-       add     4,sp
-here:  jmp     0x8e000008-here+0x8e000008
-
-###############################################################################
-#
 # Double Fault handler entry point
 # - note that there will not be a stack, D0/A0 will hold EPSW/PC as were
 #
@@ -276,6 +243,10 @@ double_fault_loop:
 ENTRY(raw_bus_error)
        add     -4,sp
        mov     d0,(sp)
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+       mov     (MMUCTR),d0
+       mov     d0,(MMUCTR)
+#endif
        mov     (BCBERR),d0             # what
        btst    BCBERR_BEMR_DMA,d0      # see if it was an external bus error
        beq     __common_exception_aux  # it wasn't
@@ -302,11 +273,88 @@ ENTRY(nmi_handler)
        add     -4,sp
        mov     d0,(sp)
        mov     (TBR),d0
+
+#ifdef CONFIG_SMP
+       add     -4,sp
+       mov     d0,(sp)                 # save d0(TBR)
+       movhu   (NMIAGR),d0
+       and     NMIAGR_GN,d0
+       lsr     0x2,d0
+       cmp     CALL_FUNCTION_NMI_IPI,d0
+       bne     5f                      # if not call function, jump
+
+       # function call nmi ipi
+       add     4,sp                    # no need to store TBR
+       mov     GxICR_DETECT,d0         # clear NMI request
+       movbu   d0,(GxICR(CALL_FUNCTION_NMI_IPI))
+       movhu   (GxICR(CALL_FUNCTION_NMI_IPI)),d0
+       and     ~EPSW_NMID,epsw         # enable NMI
+
+       mov     (sp),d0                 # restore d0
+       SAVE_ALL
+       call    smp_nmi_call_function_interrupt[],0
+       RESTORE_ALL
+
+5:
+#ifdef CONFIG_GDBSTUB
+       cmp     GDB_NMI_IPI,d0
+       bne     3f                      # if not gdb nmi ipi, jump
+
+       # gdb nmi ipi
+       add     4,sp                    # no need to store TBR
+       mov     GxICR_DETECT,d0         # clear NMI
+       movbu   d0,(GxICR(GDB_NMI_IPI))
+       movhu   (GxICR(GDB_NMI_IPI)),d0
+       and     ~EPSW_NMID,epsw         # enable NMI
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+       mov     (gdbstub_nmi_opr_type),d0
+       cmp     GDBSTUB_NMI_CACHE_PURGE,d0
+       bne     4f                      # if not gdb cache purge, jump
+
+       # gdb cache purge nmi ipi
+       add     -20,sp
+       mov     d1,(4,sp)
+       mov     a0,(8,sp)
+       mov     a1,(12,sp)
+       mov     mdr,d0
+       mov     d0,(16,sp)
+       call    gdbstub_local_purge_cache[],0
+       mov     0x1,d0
+       mov     (CPUID),d1
+       asl     d1,d0
+       mov     gdbstub_nmi_cpumask,a0
+       bclr    d0,(a0)
+       mov     (4,sp),d1
+       mov     (8,sp),a0
+       mov     (12,sp),a1
+       mov     (16,sp),d0
+       mov     d0,mdr
+       add     20,sp
+       mov     (sp),d0
+       add     4,sp
+       rti
+4:
+#endif /* CONFIG_MN10300_CACHE_ENABLED */
+       # gdb wait nmi ipi
+       mov     (sp),d0
+       SAVE_ALL
+       call    gdbstub_nmi_wait[],0
+       RESTORE_ALL
+3:
+#endif /* CONFIG_GDBSTUB */
+       mov     (sp),d0                 # restore TBR to d0
+       add     4,sp
+#endif /* CONFIG_SMP */
+
        bra     __common_exception_nonmi
 
 ENTRY(__common_exception)
        add     -4,sp
        mov     d0,(sp)
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+       mov     (MMUCTR),d0
+       mov     d0,(MMUCTR)
+#endif
 
 __common_exception_aux:
        mov     (TBR),d0
@@ -331,15 +379,21 @@ __common_exception_nonmi:
        mov     d0,(REG_ORIG_D0,fp)
 
 #ifdef CONFIG_GDBSTUB
+#ifdef CONFIG_SMP
+       call    gdbstub_busy_check[],0
+       and     d0,d0                   # check return value
+       beq     2f
+#else  /* CONFIG_SMP */
        btst    0x01,(gdbstub_busy)
        beq     2f
+#endif /* CONFIG_SMP */
        and     ~EPSW_IE,epsw
        mov     fp,d0
        mov     a2,d1
        call    gdbstub_exception[],0   # gdbstub itself caused an exception
        bra     restore_all
 2:
-#endif
+#endif /* CONFIG_GDBSTUB */
 
        mov     fp,d0                   # arg 0: stacked register file
        mov     a2,d1                   # arg 1: exception number
@@ -374,11 +428,7 @@ ENTRY(set_excp_vector)
        add     exception_table,d0
        mov     d1,(d0)
        mov     4,d1
-#if defined(CONFIG_MN10300_CACHE_WBACK)
-       jmp     mn10300_dcache_flush_inv_range2
-#else
        ret     [],0
-#endif
 
 ###############################################################################
 #
index 96cfd47..78df25c 100644 (file)
@@ -8,25 +8,14 @@
  * as published by the Free Software Foundation; either version
  * 2 of the Licence, or (at your option) any later version.
  */
+#include <linux/linkage.h>
 #include <asm/cpu-regs.h>
+#include <asm/smp.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+#include <asm/frame.inc>
 
-###############################################################################
-#
-# void fpu_init_state(void)
-# - initialise the FPU
-#
-###############################################################################
-       .globl  fpu_init_state
-       .type   fpu_init_state,@function
-fpu_init_state:
-       mov     epsw,d0
-       or      EPSW_FE,epsw
-
-#ifdef CONFIG_MN10300_PROC_MN103E010
-       nop
-       nop
-       nop
-#endif
+.macro FPU_INIT_STATE_ALL
        fmov    0,fs0
        fmov    fs0,fs1
        fmov    fs0,fs2
@@ -60,7 +49,100 @@ fpu_init_state:
        fmov    fs0,fs30
        fmov    fs0,fs31
        fmov    FPCR_INIT,fpcr
+.endm
+
+.macro FPU_SAVE_ALL areg,dreg
+       fmov    fs0,(\areg+)
+       fmov    fs1,(\areg+)
+       fmov    fs2,(\areg+)
+       fmov    fs3,(\areg+)
+       fmov    fs4,(\areg+)
+       fmov    fs5,(\areg+)
+       fmov    fs6,(\areg+)
+       fmov    fs7,(\areg+)
+       fmov    fs8,(\areg+)
+       fmov    fs9,(\areg+)
+       fmov    fs10,(\areg+)
+       fmov    fs11,(\areg+)
+       fmov    fs12,(\areg+)
+       fmov    fs13,(\areg+)
+       fmov    fs14,(\areg+)
+       fmov    fs15,(\areg+)
+       fmov    fs16,(\areg+)
+       fmov    fs17,(\areg+)
+       fmov    fs18,(\areg+)
+       fmov    fs19,(\areg+)
+       fmov    fs20,(\areg+)
+       fmov    fs21,(\areg+)
+       fmov    fs22,(\areg+)
+       fmov    fs23,(\areg+)
+       fmov    fs24,(\areg+)
+       fmov    fs25,(\areg+)
+       fmov    fs26,(\areg+)
+       fmov    fs27,(\areg+)
+       fmov    fs28,(\areg+)
+       fmov    fs29,(\areg+)
+       fmov    fs30,(\areg+)
+       fmov    fs31,(\areg+)
+       fmov    fpcr,\dreg
+       mov     \dreg,(\areg)
+.endm
+
+.macro FPU_RESTORE_ALL areg,dreg
+       fmov    (\areg+),fs0
+       fmov    (\areg+),fs1
+       fmov    (\areg+),fs2
+       fmov    (\areg+),fs3
+       fmov    (\areg+),fs4
+       fmov    (\areg+),fs5
+       fmov    (\areg+),fs6
+       fmov    (\areg+),fs7
+       fmov    (\areg+),fs8
+       fmov    (\areg+),fs9
+       fmov    (\areg+),fs10
+       fmov    (\areg+),fs11
+       fmov    (\areg+),fs12
+       fmov    (\areg+),fs13
+       fmov    (\areg+),fs14
+       fmov    (\areg+),fs15
+       fmov    (\areg+),fs16
+       fmov    (\areg+),fs17
+       fmov    (\areg+),fs18
+       fmov    (\areg+),fs19
+       fmov    (\areg+),fs20
+       fmov    (\areg+),fs21
+       fmov    (\areg+),fs22
+       fmov    (\areg+),fs23
+       fmov    (\areg+),fs24
+       fmov    (\areg+),fs25
+       fmov    (\areg+),fs26
+       fmov    (\areg+),fs27
+       fmov    (\areg+),fs28
+       fmov    (\areg+),fs29
+       fmov    (\areg+),fs30
+       fmov    (\areg+),fs31
+       mov     (\areg),\dreg
+       fmov    \dreg,fpcr
+.endm
 
+###############################################################################
+#
+# void fpu_init_state(void)
+# - initialise the FPU
+#
+###############################################################################
+       .globl  fpu_init_state
+       .type   fpu_init_state,@function
+fpu_init_state:
+       mov     epsw,d0
+       or      EPSW_FE,epsw
+
+#ifdef CONFIG_MN10300_PROC_MN103E010
+       nop
+       nop
+       nop
+#endif
+       FPU_INIT_STATE_ALL
 #ifdef CONFIG_MN10300_PROC_MN103E010
        nop
        nop
@@ -89,40 +171,7 @@ fpu_save:
        nop
 #endif
        mov     d0,a0
-       fmov    fs0,(a0+)
-       fmov    fs1,(a0+)
-       fmov    fs2,(a0+)
-       fmov    fs3,(a0+)
-       fmov    fs4,(a0+)
-       fmov    fs5,(a0+)
-       fmov    fs6,(a0+)
-       fmov    fs7,(a0+)
-       fmov    fs8,(a0+)
-       fmov    fs9,(a0+)
-       fmov    fs10,(a0+)
-       fmov    fs11,(a0+)
-       fmov    fs12,(a0+)
-       fmov    fs13,(a0+)
-       fmov    fs14,(a0+)
-       fmov    fs15,(a0+)
-       fmov    fs16,(a0+)
-       fmov    fs17,(a0+)
-       fmov    fs18,(a0+)
-       fmov    fs19,(a0+)
-       fmov    fs20,(a0+)
-       fmov    fs21,(a0+)
-       fmov    fs22,(a0+)
-       fmov    fs23,(a0+)
-       fmov    fs24,(a0+)
-       fmov    fs25,(a0+)
-       fmov    fs26,(a0+)
-       fmov    fs27,(a0+)
-       fmov    fs28,(a0+)
-       fmov    fs29,(a0+)
-       fmov    fs30,(a0+)
-       fmov    fs31,(a0+)
-       fmov    fpcr,d0
-       mov     d0,(a0)
+       FPU_SAVE_ALL    a0,d0
 #ifdef CONFIG_MN10300_PROC_MN103E010
        nop
        nop
@@ -135,63 +184,75 @@ fpu_save:
 
 ###############################################################################
 #
-# void fpu_restore(struct fpu_state_struct *)
-# - restore the fpu state
-# - note that an FPU Operational exception might occur during this process
+# void fpu_disabled(void)
+# - handle an exception due to the FPU being disabled
+#   when CONFIG_FPU is enabled
 #
 ###############################################################################
-       .globl  fpu_restore
-       .type   fpu_restore,@function
-fpu_restore:
-       mov     epsw,d1
-       or      EPSW_FE,epsw            /* enable the FPU so we can access it */
-
-#ifdef CONFIG_MN10300_PROC_MN103E010
+       .type   fpu_disabled,@function
+       .globl  fpu_disabled
+fpu_disabled:
+       or      EPSW_nAR|EPSW_FE,epsw
        nop
        nop
-#endif
-       mov     d0,a0
-       fmov    (a0+),fs0
-       fmov    (a0+),fs1
-       fmov    (a0+),fs2
-       fmov    (a0+),fs3
-       fmov    (a0+),fs4
-       fmov    (a0+),fs5
-       fmov    (a0+),fs6
-       fmov    (a0+),fs7
-       fmov    (a0+),fs8
-       fmov    (a0+),fs9
-       fmov    (a0+),fs10
-       fmov    (a0+),fs11
-       fmov    (a0+),fs12
-       fmov    (a0+),fs13
-       fmov    (a0+),fs14
-       fmov    (a0+),fs15
-       fmov    (a0+),fs16
-       fmov    (a0+),fs17
-       fmov    (a0+),fs18
-       fmov    (a0+),fs19
-       fmov    (a0+),fs20
-       fmov    (a0+),fs21
-       fmov    (a0+),fs22
-       fmov    (a0+),fs23
-       fmov    (a0+),fs24
-       fmov    (a0+),fs25
-       fmov    (a0+),fs26
-       fmov    (a0+),fs27
-       fmov    (a0+),fs28
-       fmov    (a0+),fs29
-       fmov    (a0+),fs30
-       fmov    (a0+),fs31
-       mov     (a0),d0
-       fmov    d0,fpcr
-#ifdef CONFIG_MN10300_PROC_MN103E010
        nop
+
+       mov     sp,a1
+       mov     (a1),d1                 /* get epsw of user context */
+       and     ~(THREAD_SIZE-1),a1     /* a1: (thread_info *ti) */
+       mov     (TI_task,a1),a2         /* a2: (task_struct *tsk) */
+       btst    EPSW_nSL,d1
+       beq     fpu_used_in_kernel
+
+       or      EPSW_FE,d1
+       mov     d1,(sp)
+       mov     (TASK_THREAD+THREAD_FPU_FLAGS,a2),d1
+#ifndef CONFIG_LAZY_SAVE_FPU
+       or      __THREAD_HAS_FPU,d1
+       mov     d1,(TASK_THREAD+THREAD_FPU_FLAGS,a2)
+#else  /* !CONFIG_LAZY_SAVE_FPU */
+       mov     (fpu_state_owner),a0
+       cmp     0,a0
+       beq     fpu_regs_save_end
+
+       mov     (TASK_THREAD+THREAD_UREGS,a0),a1
+       add     TASK_THREAD+THREAD_FPU_STATE,a0
+       FPU_SAVE_ALL a0,d0
+
+       mov     (REG_EPSW,a1),d0
+       and     ~EPSW_FE,d0
+       mov     d0,(REG_EPSW,a1)
+
+fpu_regs_save_end:
+       mov     a2,(fpu_state_owner)
+#endif /* !CONFIG_LAZY_SAVE_FPU */
+
+       btst    __THREAD_USING_FPU,d1
+       beq     fpu_regs_init
+       add     TASK_THREAD+THREAD_FPU_STATE,a2
+       FPU_RESTORE_ALL a2,d0
+       rti
+
+fpu_regs_init:
+       FPU_INIT_STATE_ALL
+       add     TASK_THREAD+THREAD_FPU_FLAGS,a2
+       bset    __THREAD_USING_FPU,(0,a2)
+       rti
+
+fpu_used_in_kernel:
+       and     ~(EPSW_nAR|EPSW_FE),epsw
        nop
        nop
-#endif
 
-       mov     d1,epsw
-       ret     [],0
+       add     -4,sp
+       SAVE_ALL
+       mov     -1,d0
+       mov     d0,(REG_ORIG_D0,fp)
+
+       and     ~EPSW_NMID,epsw
+
+       mov     fp,d0
+       call    fpu_disabled_in_kernel[],0
+       jmp     ret_from_exception
 
-       .size   fpu_restore,.-fpu_restore
+       .size   fpu_disabled,.-fpu_disabled
diff --git a/arch/mn10300/kernel/fpu-nofpu-low.S b/arch/mn10300/kernel/fpu-nofpu-low.S
new file mode 100644 (file)
index 0000000..7ea087a
--- /dev/null
@@ -0,0 +1,39 @@
+/* MN10300 Low level FPU management operations
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/linkage.h>
+#include <asm/cpu-regs.h>
+#include <asm/smp.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+#include <asm/frame.inc>
+
+###############################################################################
+#
+# void fpu_disabled(void)
+# - handle an exception due to the FPU being disabled
+#   when CONFIG_FPU is disabled
+#
+###############################################################################
+       .type   fpu_disabled,@function
+       .globl  fpu_disabled
+fpu_disabled:
+       add     -4,sp
+       SAVE_ALL
+       mov     -1,d0
+       mov     d0,(REG_ORIG_D0,fp)
+
+       and     ~EPSW_NMID,epsw
+
+       mov     fp,d0
+       call    unexpected_fpu_exception[],0
+       jmp     ret_from_exception
+
+       .size   fpu_disabled,.-fpu_disabled
diff --git a/arch/mn10300/kernel/fpu-nofpu.c b/arch/mn10300/kernel/fpu-nofpu.c
new file mode 100644 (file)
index 0000000..31c765b
--- /dev/null
@@ -0,0 +1,30 @@
+/* MN10300 FPU management
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <asm/fpu.h>
+
+/*
+ * handle an FPU operational exception
+ * - there's a possibility that if the FPU is asynchronous, the signal might
+ *   be meant for a process other than the current one
+ */
+asmlinkage
+void unexpected_fpu_exception(struct pt_regs *regs, enum exception_code code)
+{
+       panic("An FPU exception was received, but there's no FPU enabled.");
+}
+
+/*
+ * fill in the FPU structure for a core dump
+ */
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpreg)
+{
+       return 0; /* not valid */
+}
index e705f25..5f9c3fa 100644 (file)
 #include <asm/fpu.h>
 #include <asm/elf.h>
 #include <asm/exceptions.h>
+#include <asm/system.h>
 
+#ifdef CONFIG_LAZY_SAVE_FPU
 struct task_struct *fpu_state_owner;
+#endif
 
 /*
- * handle an exception due to the FPU being disabled
+ * error functions in FPU disabled exception
  */
-asmlinkage void fpu_disabled(struct pt_regs *regs, enum exception_code code)
+asmlinkage void fpu_disabled_in_kernel(struct pt_regs *regs)
 {
-       struct task_struct *tsk = current;
-
-       if (!user_mode(regs))
-               die_if_no_fixup("An FPU Disabled exception happened in"
-                               " kernel space\n",
-                               regs, code);
-
-#ifdef CONFIG_FPU
-       preempt_disable();
-
-       /* transfer the last process's FPU state to memory */
-       if (fpu_state_owner) {
-               fpu_save(&fpu_state_owner->thread.fpu_state);
-               fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE;
-       }
-
-       /* the current process now owns the FPU state */
-       fpu_state_owner = tsk;
-       regs->epsw |= EPSW_FE;
-
-       /* load the FPU with the current process's FPU state or invent a new
-        * clean one if the process doesn't have one */
-       if (is_using_fpu(tsk)) {
-               fpu_restore(&tsk->thread.fpu_state);
-       } else {
-               fpu_init_state();
-               set_using_fpu(tsk);
-       }
-
-       preempt_enable();
-#else
-       {
-               siginfo_t info;
-
-               info.si_signo = SIGFPE;
-               info.si_errno = 0;
-               info.si_addr = (void *) tsk->thread.uregs->pc;
-               info.si_code = FPE_FLTINV;
-
-               force_sig_info(SIGFPE, &info, tsk);
-       }
-#endif  /* CONFIG_FPU */
+       die_if_no_fixup("An FPU Disabled exception happened in kernel space\n",
+                       regs, EXCEP_FPU_DISABLED);
 }
 
 /*
@@ -71,15 +34,16 @@ asmlinkage void fpu_disabled(struct pt_regs *regs, enum exception_code code)
  */
 asmlinkage void fpu_exception(struct pt_regs *regs, enum exception_code code)
 {
-       struct task_struct *tsk = fpu_state_owner;
+       struct task_struct *tsk = current;
        siginfo_t info;
+       u32 fpcr;
 
        if (!user_mode(regs))
                die_if_no_fixup("An FPU Operation exception happened in"
                                " kernel space\n",
                                regs, code);
 
-       if (!tsk)
+       if (!is_using_fpu(tsk))
                die_if_no_fixup("An FPU Operation exception happened,"
                                " but the FPU is not in use",
                                regs, code);
@@ -89,48 +53,45 @@ asmlinkage void fpu_exception(struct pt_regs *regs, enum exception_code code)
        info.si_addr = (void *) tsk->thread.uregs->pc;
        info.si_code = FPE_FLTINV;
 
-#ifdef CONFIG_FPU
-       {
-               u32 fpcr;
+       unlazy_fpu(tsk);
 
-               /* get FPCR (we need to enable the FPU whilst we do this) */
-               asm volatile("  or      %1,epsw         \n"
-#ifdef CONFIG_MN10300_PROC_MN103E010
-                            "  nop                     \n"
-                            "  nop                     \n"
-                            "  nop                     \n"
-#endif
-                            "  fmov    fpcr,%0         \n"
-#ifdef CONFIG_MN10300_PROC_MN103E010
-                            "  nop                     \n"
-                            "  nop                     \n"
-                            "  nop                     \n"
-#endif
-                            "  and     %2,epsw         \n"
-                            : "=&d"(fpcr)
-                            : "i"(EPSW_FE), "i"(~EPSW_FE)
-                            );
-
-               if (fpcr & FPCR_EC_Z)
-                       info.si_code = FPE_FLTDIV;
-               else if (fpcr & FPCR_EC_O)
-                       info.si_code = FPE_FLTOVF;
-               else if (fpcr & FPCR_EC_U)
-                       info.si_code = FPE_FLTUND;
-               else if (fpcr & FPCR_EC_I)
-                       info.si_code = FPE_FLTRES;
-       }
-#endif
+       fpcr = tsk->thread.fpu_state.fpcr;
+
+       if (fpcr & FPCR_EC_Z)
+               info.si_code = FPE_FLTDIV;
+       else if (fpcr & FPCR_EC_O)
+               info.si_code = FPE_FLTOVF;
+       else if (fpcr & FPCR_EC_U)
+               info.si_code = FPE_FLTUND;
+       else if (fpcr & FPCR_EC_I)
+               info.si_code = FPE_FLTRES;
 
        force_sig_info(SIGFPE, &info, tsk);
 }
 
 /*
+ * handle an FPU invalid_op exception
+ * - Derived from DO_EINFO() macro in arch/mn10300/kernel/traps.c
+ */
+asmlinkage void fpu_invalid_op(struct pt_regs *regs, enum exception_code code)
+{
+       siginfo_t info;
+
+       if (!user_mode(regs))
+               die_if_no_fixup("FPU invalid opcode", regs, code);
+
+       info.si_signo = SIGILL;
+       info.si_errno = 0;
+       info.si_code = ILL_COPROC;
+       info.si_addr = (void *) regs->pc;
+       force_sig_info(info.si_signo, &info, current);
+}
+
+/*
  * save the FPU state to a signal context
  */
 int fpu_setup_sigcontext(struct fpucontext *fpucontext)
 {
-#ifdef CONFIG_FPU
        struct task_struct *tsk = current;
 
        if (!is_using_fpu(tsk))
@@ -142,11 +103,19 @@ int fpu_setup_sigcontext(struct fpucontext *fpucontext)
         */
        preempt_disable();
 
+#ifndef CONFIG_LAZY_SAVE_FPU
+       if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
+               fpu_save(&tsk->thread.fpu_state);
+               tsk->thread.uregs->epsw &= ~EPSW_FE;
+               tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
+       }
+#else /* !CONFIG_LAZY_SAVE_FPU */
        if (fpu_state_owner == tsk) {
                fpu_save(&tsk->thread.fpu_state);
                fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE;
                fpu_state_owner = NULL;
        }
+#endif /* !CONFIG_LAZY_SAVE_FPU */
 
        preempt_enable();
 
@@ -161,9 +130,6 @@ int fpu_setup_sigcontext(struct fpucontext *fpucontext)
                return -1;
 
        return 1;
-#else
-       return 0;
-#endif
 }
 
 /*
@@ -171,17 +137,23 @@ int fpu_setup_sigcontext(struct fpucontext *fpucontext)
  */
 void fpu_kill_state(struct task_struct *tsk)
 {
-#ifdef CONFIG_FPU
        /* disown anything left in the FPU */
        preempt_disable();
 
+#ifndef CONFIG_LAZY_SAVE_FPU
+       if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
+               tsk->thread.uregs->epsw &= ~EPSW_FE;
+               tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
+       }
+#else /* !CONFIG_LAZY_SAVE_FPU */
        if (fpu_state_owner == tsk) {
                fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE;
                fpu_state_owner = NULL;
        }
+#endif /* !CONFIG_LAZY_SAVE_FPU */
 
        preempt_enable();
-#endif
+
        /* we no longer have a valid current FPU state */
        clear_using_fpu(tsk);
 }
@@ -195,8 +167,7 @@ int fpu_restore_sigcontext(struct fpucontext *fpucontext)
        int ret;
 
        /* load up the old FPU state */
-       ret = copy_from_user(&tsk->thread.fpu_state,
-                            fpucontext,
+       ret = copy_from_user(&tsk->thread.fpu_state, fpucontext,
                             min(sizeof(struct fpu_state_struct),
                                 sizeof(struct fpucontext)));
        if (!ret)
index 4998b24..b1d0152 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/thread_info.h>
 #include <asm/frame.inc>
 #include <asm/intctl-regs.h>
+#include <asm/irqflags.h>
 #include <unit/serial.h>
 
        .text
@@ -69,7 +70,7 @@ gdbstub_io_rx_overflow:
        bra     gdbstub_io_rx_done
 
 gdbstub_io_rx_enter:
-       or      EPSW_IE|EPSW_IM_1,epsw
+       LOCAL_CHANGE_INTR_MASK_LEVEL(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL+1))
        add     -4,sp
        SAVE_ALL
 
@@ -80,7 +81,7 @@ gdbstub_io_rx_enter:
        mov     fp,d0
        call    gdbstub_rx_irq[],0      # gdbstub_rx_irq(regs,excep)
 
-       and     ~EPSW_IE,epsw
+       LOCAL_CLI
        bclr    0x01,(gdbstub_busy)
 
        .globl gdbstub_return
index ae663dc..0d5d63c 100644 (file)
@@ -23,6 +23,7 @@
 #include <asm/exceptions.h>
 #include <asm/serial-regs.h>
 #include <unit/serial.h>
+#include <asm/smp.h>
 
 /*
  * initialise the GDB stub
@@ -45,22 +46,34 @@ void gdbstub_io_init(void)
        XIRQxICR(GDBPORT_SERIAL_IRQ) = 0;
        tmp = XIRQxICR(GDBPORT_SERIAL_IRQ);
 
+#if   CONFIG_GDBSTUB_IRQ_LEVEL == 0
        IVAR0 = EXCEP_IRQ_LEVEL0;
-       set_intr_stub(EXCEP_IRQ_LEVEL0, gdbstub_io_rx_handler);
+#elif CONFIG_GDBSTUB_IRQ_LEVEL == 1
+       IVAR1 = EXCEP_IRQ_LEVEL1;
+#elif CONFIG_GDBSTUB_IRQ_LEVEL == 2
+       IVAR2 = EXCEP_IRQ_LEVEL2;
+#elif CONFIG_GDBSTUB_IRQ_LEVEL == 3
+       IVAR3 = EXCEP_IRQ_LEVEL3;
+#elif CONFIG_GDBSTUB_IRQ_LEVEL == 4
+       IVAR4 = EXCEP_IRQ_LEVEL4;
+#elif CONFIG_GDBSTUB_IRQ_LEVEL == 5
+       IVAR5 = EXCEP_IRQ_LEVEL5;
+#else
+#error "Unknown irq level for gdbstub."
+#endif
+
+       set_intr_stub(NUM2EXCEP_IRQ_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL),
+               gdbstub_io_rx_handler);
 
        XIRQxICR(GDBPORT_SERIAL_IRQ) &= ~GxICR_REQUEST;
-       XIRQxICR(GDBPORT_SERIAL_IRQ) = GxICR_ENABLE | GxICR_LEVEL_0;
+       XIRQxICR(GDBPORT_SERIAL_IRQ) =
+               GxICR_ENABLE | NUM2GxICR_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL);
        tmp = XIRQxICR(GDBPORT_SERIAL_IRQ);
 
        GDBPORT_SERIAL_IER = UART_IER_RDI | UART_IER_RLSI;
 
        /* permit level 0 IRQs to take place */
-       asm volatile(
-               "       and %0,epsw     \n"
-               "       or %1,epsw      \n"
-               :
-               : "i"(~EPSW_IM), "i"(EPSW_IE | EPSW_IM_1)
-               );
+       local_change_intr_mask_level(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1));
 }
 
 /*
@@ -87,6 +100,9 @@ int gdbstub_io_rx_char(unsigned char *_ch, int nonblock)
 {
        unsigned ix;
        u8 ch, st;
+#if defined(CONFIG_MN10300_WD_TIMER)
+       int cpu;
+#endif
 
        *_ch = 0xff;
 
@@ -104,8 +120,9 @@ int gdbstub_io_rx_char(unsigned char *_ch, int nonblock)
                if (nonblock)
                        return -EAGAIN;
 #ifdef CONFIG_MN10300_WD_TIMER
-               watchdog_alert_counter = 0;
-#endif /* CONFIG_MN10300_WD_TIMER */
+       for (cpu = 0; cpu < NR_CPUS; cpu++)
+               watchdog_alert_counter[cpu] = 0;
+#endif
                goto try_again;
        }
 
index a560bbc..97dfda2 100644 (file)
@@ -58,9 +58,12 @@ void __init gdbstub_io_init(void)
        gdbstub_io_set_baud(115200);
 
        /* we want to get serial receive interrupts */
-       set_intr_level(gdbstub_port->rx_irq, GxICR_LEVEL_0);
-       set_intr_level(gdbstub_port->tx_irq, GxICR_LEVEL_0);
-       set_intr_stub(EXCEP_IRQ_LEVEL0, gdbstub_io_rx_handler);
+       set_intr_level(gdbstub_port->rx_irq,
+               NUM2GxICR_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL));
+       set_intr_level(gdbstub_port->tx_irq,
+               NUM2GxICR_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL));
+       set_intr_stub(NUM2EXCEP_IRQ_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL),
+               gdbstub_io_rx_handler);
 
        *gdbstub_port->rx_icr |= GxICR_ENABLE;
        tmp = *gdbstub_port->rx_icr;
@@ -84,12 +87,7 @@ void __init gdbstub_io_init(void)
        tmp = *gdbstub_port->_control;
 
        /* permit level 0 IRQs only */
-       asm volatile(
-               "       and %0,epsw     \n"
-               "       or %1,epsw      \n"
-               :
-               : "i"(~EPSW_IM), "i"(EPSW_IE|EPSW_IM_1)
-               );
+       local_change_intr_mask_level(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1));
 }
 
 /*
@@ -184,6 +182,9 @@ int gdbstub_io_rx_char(unsigned char *_ch, int nonblock)
 {
        unsigned ix;
        u8 ch, st;
+#if defined(CONFIG_MN10300_WD_TIMER)
+       int cpu;
+#endif
 
        *_ch = 0xff;
 
@@ -201,8 +202,9 @@ try_again:
                if (nonblock)
                        return -EAGAIN;
 #ifdef CONFIG_MN10300_WD_TIMER
-               watchdog_alert_counter = 0;
-#endif /* CONFIG_MN10300_WD_TIMER */
+       for (cpu = 0; cpu < NR_CPUS; cpu++)
+               watchdog_alert_counter[cpu] = 0;
+#endif
                goto try_again;
        }
 
index 41b1170..a5fc3f0 100644 (file)
@@ -440,15 +440,11 @@ static const unsigned char gdbstub_insn_sizes[256] =
 
 static int __gdbstub_mark_bp(u8 *addr, int ix)
 {
-       if (addr < (u8 *) 0x70000000UL)
-               return 0;
-       /* 70000000-7fffffff: vmalloc area */
-       if (addr < (u8 *) 0x80000000UL)
+       /* vmalloc area */
+       if (((u8 *) VMALLOC_START <= addr) && (addr < (u8 *) VMALLOC_END))
                goto okay;
-       if (addr < (u8 *) 0x8c000000UL)
-               return 0;
-       /* 8c000000-93ffffff: SRAM, SDRAM */
-       if (addr < (u8 *) 0x94000000UL)
+       /* SRAM, SDRAM */
+       if (((u8 *) 0x80000000UL <= addr) && (addr < (u8 *) 0xa0000000UL))
                goto okay;
        return 0;
 
@@ -1197,9 +1193,8 @@ static int gdbstub(struct pt_regs *regs, enum exception_code excep)
        mn10300_set_gdbleds(1);
 
        asm volatile("mov mdr,%0" : "=d"(mdr));
-       asm volatile("mov epsw,%0" : "=d"(epsw));
-       asm volatile("mov %0,epsw"
-                    :: "d"((epsw & ~EPSW_IM) | EPSW_IE | EPSW_IM_1));
+       local_save_flags(epsw);
+       local_change_intr_mask_level(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1));
 
        gdbstub_store_fpu();
 
index 14f27f3..73e00fc 100644 (file)
 #include <asm/frame.inc>
 #include <asm/param.h>
 #include <unit/serial.h>
+#ifdef CONFIG_SMP
+#include <asm/smp.h>
+#include <asm/intctl-regs.h>
+#include <asm/cpu-regs.h>
+#include <proc/smp-regs.h>
+#endif /* CONFIG_SMP */
 
        __HEAD
 
        .globl  _start
        .type   _start,@function
 _start:
+#ifdef CONFIG_SMP
+       #
+       # If this is a secondary CPU (AP), then deal with that elsewhere
+       #
+       mov     (CPUID),d3
+       and     CPUID_MASK,d3
+       bne     startup_secondary
+
+       #
+       # We're dealing with the primary CPU (BP) here, then.
+       # Keep BP's D0,D1,D2 register for boot check.
+       #
+
+       # Set up the Boot IPI for each secondary CPU
+       mov     0x1,a0
+loop_set_secondary_icr:
+       mov     a0,a1
+       asl     CROSS_ICR_CPU_SHIFT,a1
+       add     CROSS_GxICR(SMP_BOOT_IRQ,0),a1
+       movhu   (a1),d3
+       or      GxICR_ENABLE|GxICR_LEVEL_0,d3
+       movhu   d3,(a1)
+       movhu   (a1),d3                         # flush
+       inc     a0
+       cmp     NR_CPUS,a0
+       bne     loop_set_secondary_icr
+#endif /* CONFIG_SMP */
+
        # save commandline pointer
        mov     d0,a3
 
        # preload the PGD pointer register
        mov     swapper_pg_dir,d0
        mov     d0,(PTBR)
+       clr     d0
+       movbu   d0,(PIDR)
 
        # turn on the TLBs
        mov     MMUCTR_IIV|MMUCTR_DIV,d0
        mov     d0,(MMUCTR)
+#ifdef CONFIG_AM34_2
+       mov     MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE|MMUCTR_WTE,d0
+#else
        mov     MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE,d0
+#endif
        mov     d0,(MMUCTR)
 
        # turn on AM33v2 exception handling mode and set the trap table base
@@ -51,6 +91,11 @@ _start:
        mov     d0,(TBR)
 
        # invalidate and enable both of the caches
+#ifdef CONFIG_SMP
+       mov     ECHCTR,a0
+       clr     d0
+       mov     d0,(a0)
+#endif
        mov     CHCTR,a0
        clr     d0
        movhu   d0,(a0)                                 # turn off first
@@ -61,18 +106,18 @@ _start:
        btst    CHCTR_ICBUSY|CHCTR_DCBUSY,d0            # wait till not busy
        lne
 
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
 #ifdef CONFIG_MN10300_CACHE_WBACK
 #ifndef CONFIG_MN10300_CACHE_WBACK_NOWRALLOC
        mov     CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK,d0
 #else
        mov     CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK|CHCTR_DCALMD,d0
-#endif /* CACHE_DISABLED */
+#endif /* NOWRALLOC */
 #else
        mov     CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRTHROUGH,d0
 #endif /* WBACK */
        movhu   d0,(a0)                                 # enable
-#endif /* NOWRALLOC */
+#endif /* ENABLED */
 
        # turn on RTS on the debug serial port if applicable
 #ifdef CONFIG_MN10300_UNIT_ASB2305
@@ -206,6 +251,44 @@ __no_parameters:
        call    processor_init[],0
        call    unit_init[],0
 
+#ifdef CONFIG_SMP
+       # mark the primary CPU in cpu_boot_map
+       mov     cpu_boot_map,a0
+       mov     0x1,d0
+       mov     d0,(a0)
+
+       # signal each secondary CPU to begin booting
+       mov     0x1,d2                          # CPU ID
+
+loop_request_boot_secondary:
+       mov     d2,a0
+       # send SMP_BOOT_IPI to secondary CPU
+       asl     CROSS_ICR_CPU_SHIFT,a0
+       add     CROSS_GxICR(SMP_BOOT_IRQ,0),a0
+       movhu   (a0),d0
+       or      GxICR_REQUEST|GxICR_DETECT,d0
+       movhu   d0,(a0)
+       movhu   (a0),d0                         # flush
+
+       # wait up to 100ms for AP's IPI to be received
+       clr     d3
+wait_on_secondary_boot:
+       mov     DELAY_TIME_BOOT_IPI,d0
+       call    __delay[],0
+       inc     d3
+       mov     cpu_boot_map,a0
+       mov     (a0),d0
+       lsr     d2,d0
+       btst    0x1,d0
+       bne     1f
+       cmp     TIME_OUT_COUNT_BOOT_IPI,d3
+       bne     wait_on_secondary_boot
+1:
+       inc     d2
+       cmp     NR_CPUS,d2
+       bne     loop_request_boot_secondary
+#endif /* CONFIG_SMP */
+
 #ifdef CONFIG_GDBSTUB
        call    gdbstub_init[],0
 
@@ -217,7 +300,118 @@ __gdbstub_pause:
 #endif
 
        jmp     start_kernel
-       .size   _start, _start-.
+       .size   _start,.-_start
+
+###############################################################################
+#
+# Secondary CPU boot point
+#
+###############################################################################
+#ifdef CONFIG_SMP
+startup_secondary:
+       # preload the PGD pointer register
+       mov     swapper_pg_dir,d0
+       mov     d0,(PTBR)
+       clr     d0
+       movbu   d0,(PIDR)
+
+       # turn on the TLBs
+       mov     MMUCTR_IIV|MMUCTR_DIV,d0
+       mov     d0,(MMUCTR)
+#ifdef CONFIG_AM34_2
+       mov     MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE|MMUCTR_WTE,d0
+#else
+       mov     MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE,d0
+#endif
+       mov     d0,(MMUCTR)
+
+       # turn on AM33v2 exception handling mode and set the trap table base
+       movhu   (CPUP),d0
+       or      CPUP_EXM_AM33V2,d0
+       movhu   d0,(CPUP)
+
+       # set the interrupt vector table
+       mov     CONFIG_INTERRUPT_VECTOR_BASE,d0
+       mov     d0,(TBR)
+
+       # invalidate and enable both of the caches
+       mov     ECHCTR,a0
+       clr     d0
+       mov     d0,(a0)
+       mov     CHCTR,a0
+       clr     d0
+       movhu   d0,(a0)                                 # turn off first
+       mov     CHCTR_ICINV|CHCTR_DCINV,d0
+       movhu   d0,(a0)
+       setlb
+       mov     (a0),d0
+       btst    CHCTR_ICBUSY|CHCTR_DCBUSY,d0            # wait till not busy (use CPU loop buffer)
+       lne
+
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+#ifdef  CONFIG_MN10300_CACHE_WBACK
+#ifndef CONFIG_MN10300_CACHE_WBACK_NOWRALLOC
+       mov     CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK,d0
+#else
+       mov     CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK|CHCTR_DCALMD,d0
+#endif  /* !NOWRALLOC */
+#else
+       mov     CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRTHROUGH,d0
+#endif  /* WBACK */
+       movhu   d0,(a0)                                 # enable
+#endif  /* ENABLED */
+
+       # Clear the boot IPI interrupt for this CPU
+       movhu   (GxICR(SMP_BOOT_IRQ)),d0
+       and     ~GxICR_REQUEST,d0
+       movhu   d0,(GxICR(SMP_BOOT_IRQ))
+       movhu   (GxICR(SMP_BOOT_IRQ)),d0                # flush
+
+       /* get stack */
+       mov     CONFIG_INTERRUPT_VECTOR_BASE + CONFIG_BOOT_STACK_OFFSET,a0
+       mov     (CPUID),d0
+       and     CPUID_MASK,d0
+       mulu    CONFIG_BOOT_STACK_SIZE,d0
+       sub     d0,a0
+       mov     a0,sp
+
+       # init interrupt for AP
+       call    smp_prepare_cpu_init[],0
+
+       # mark this secondary CPU in cpu_boot_map
+       mov     (CPUID),d0
+       mov     0x1,d1
+       asl     d0,d1
+       mov     cpu_boot_map,a0
+       bset    d1,(a0)
+
+       or      EPSW_IE|EPSW_IM_1,epsw  # permit level 0 interrupts
+       nop
+       nop
+#ifdef  CONFIG_MN10300_CACHE_WBACK
+       # flush the local cache if it's in writeback mode
+       call    mn10300_local_dcache_flush_inv[],0
+       setlb
+       mov     (CHCTR),d0
+       btst    CHCTR_DCBUSY,d0         # wait till not busy (use CPU loop buffer)
+       lne
+#endif
+
+       # now sleep waiting for further instructions
+secondary_sleep:
+       mov     CPUM_SLEEP,d0
+       movhu   d0,(CPUM)
+       nop
+       nop
+       bra     secondary_sleep
+       .size   startup_secondary,.-startup_secondary
+#endif /* CONFIG_SMP */
+
+###############################################################################
+#
+#
+#
+###############################################################################
 ENTRY(__head_end)
 
 /*
index eee2eee..6a064ab 100644 (file)
@@ -9,6 +9,9 @@
  * 2 of the Licence, or (at your option) any later version.
  */
 
+struct clocksource;
+struct clock_event_device;
+
 /*
  * kthread.S
  */
@@ -18,3 +21,25 @@ extern int kernel_thread_helper(int);
  * entry.S
  */
 extern void ret_from_fork(struct task_struct *) __attribute__((noreturn));
+
+/*
+ * smp-low.S
+ */
+#ifdef CONFIG_SMP
+extern void mn10300_low_ipi_handler(void);
+#endif
+
+/*
+ * time.c
+ */
+extern irqreturn_t local_timer_interrupt(void);
+
+/*
+ * time.c
+ */
+#ifdef CONFIG_CEVT_MN10300
+extern void clockevent_set_clock(struct clock_event_device *, unsigned int);
+#endif
+#ifdef CONFIG_CSRC_MN10300
+extern void clocksource_set_clock(struct clocksource *, unsigned int);
+#endif
index e2d5ed8..c2e4459 100644 (file)
 #include <linux/interrupt.h>
 #include <linux/kernel_stat.h>
 #include <linux/seq_file.h>
+#include <linux/cpumask.h>
 #include <asm/setup.h>
+#include <asm/serial-regs.h>
 
-unsigned long __mn10300_irq_enabled_epsw = EPSW_IE | EPSW_IM_7;
+unsigned long __mn10300_irq_enabled_epsw[NR_CPUS] __cacheline_aligned_in_smp = {
+       [0 ... NR_CPUS - 1] = EPSW_IE | EPSW_IM_7
+};
 EXPORT_SYMBOL(__mn10300_irq_enabled_epsw);
 
+#ifdef CONFIG_SMP
+static char irq_affinity_online[NR_IRQS] = {
+       [0 ... NR_IRQS - 1] = 0
+};
+
+#define NR_IRQ_WORDS   ((NR_IRQS + 31) / 32)
+static unsigned long irq_affinity_request[NR_IRQ_WORDS] = {
+       [0 ... NR_IRQ_WORDS - 1] = 0
+};
+#endif  /* CONFIG_SMP */
+
 atomic_t irq_err_count;
 
 /*
@@ -24,30 +39,67 @@ atomic_t irq_err_count;
  */
 static void mn10300_cpupic_ack(unsigned int irq)
 {
+       unsigned long flags;
        u16 tmp;
-       *(volatile u8 *) &GxICR(irq) = GxICR_DETECT;
+
+       flags = arch_local_cli_save();
+       GxICR_u8(irq) = GxICR_DETECT;
        tmp = GxICR(irq);
+       arch_local_irq_restore(flags);
 }
 
-static void mn10300_cpupic_mask(unsigned int irq)
+static void __mask_and_set_icr(unsigned int irq,
+                              unsigned int mask, unsigned int set)
 {
-       u16 tmp = GxICR(irq);
-       GxICR(irq) = (tmp & GxICR_LEVEL);
+       unsigned long flags;
+       u16 tmp;
+
+       flags = arch_local_cli_save();
+       tmp = GxICR(irq);
+       GxICR(irq) = (tmp & mask) | set;
        tmp = GxICR(irq);
+       arch_local_irq_restore(flags);
+}
+
+static void mn10300_cpupic_mask(unsigned int irq)
+{
+       __mask_and_set_icr(irq, GxICR_LEVEL, 0);
 }
 
 static void mn10300_cpupic_mask_ack(unsigned int irq)
 {
-       u16 tmp = GxICR(irq);
-       GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
-       tmp = GxICR(irq);
+#ifdef CONFIG_SMP
+       unsigned long flags;
+       u16 tmp;
+
+       flags = arch_local_cli_save();
+
+       if (!test_and_clear_bit(irq, irq_affinity_request)) {
+               tmp = GxICR(irq);
+               GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
+               tmp = GxICR(irq);
+       } else {
+               u16 tmp2;
+               tmp = GxICR(irq);
+               GxICR(irq) = (tmp & GxICR_LEVEL);
+               tmp2 = GxICR(irq);
+
+               irq_affinity_online[irq] =
+                       any_online_cpu(*irq_desc[irq].affinity);
+               CROSS_GxICR(irq, irq_affinity_online[irq]) =
+                       (tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT;
+               tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
+       }
+
+       arch_local_irq_restore(flags);
+#else  /* CONFIG_SMP */
+       __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_DETECT);
+#endif /* CONFIG_SMP */
 }
 
 static void mn10300_cpupic_unmask(unsigned int irq)
 {
-       u16 tmp = GxICR(irq);
-       GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE;
-       tmp = GxICR(irq);
+       __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE);
 }
 
 static void mn10300_cpupic_unmask_clear(unsigned int irq)
@@ -56,11 +108,89 @@ static void mn10300_cpupic_unmask_clear(unsigned int irq)
         * device has ceased to assert its interrupt line and the interrupt
         * channel has been disabled in the PIC, so for level-triggered
         * interrupts we need to clear the request bit when we re-enable */
-       u16 tmp = GxICR(irq);
-       GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
-       tmp = GxICR(irq);
+#ifdef CONFIG_SMP
+       unsigned long flags;
+       u16 tmp;
+
+       flags = arch_local_cli_save();
+
+       if (!test_and_clear_bit(irq, irq_affinity_request)) {
+               tmp = GxICR(irq);
+               GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
+               tmp = GxICR(irq);
+       } else {
+               tmp = GxICR(irq);
+
+               irq_affinity_online[irq] = any_online_cpu(*irq_desc[irq].affinity);
+               CROSS_GxICR(irq, irq_affinity_online[irq]) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
+               tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
+       }
+
+       arch_local_irq_restore(flags);
+#else  /* CONFIG_SMP */
+       __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE | GxICR_DETECT);
+#endif /* CONFIG_SMP */
 }
 
+#ifdef CONFIG_SMP
+static int
+mn10300_cpupic_setaffinity(unsigned int irq, const struct cpumask *mask)
+{
+       unsigned long flags;
+       int err;
+
+       flags = arch_local_cli_save();
+
+       /* check irq no */
+       switch (irq) {
+       case TMJCIRQ:
+       case RESCHEDULE_IPI:
+       case CALL_FUNC_SINGLE_IPI:
+       case LOCAL_TIMER_IPI:
+       case FLUSH_CACHE_IPI:
+       case CALL_FUNCTION_NMI_IPI:
+       case GDB_NMI_IPI:
+#ifdef CONFIG_MN10300_TTYSM0
+       case SC0RXIRQ:
+       case SC0TXIRQ:
+#ifdef CONFIG_MN10300_TTYSM0_TIMER8
+       case TM8IRQ:
+#elif CONFIG_MN10300_TTYSM0_TIMER2
+       case TM2IRQ:
+#endif /* CONFIG_MN10300_TTYSM0_TIMER8 */
+#endif /* CONFIG_MN10300_TTYSM0 */
+
+#ifdef CONFIG_MN10300_TTYSM1
+       case SC1RXIRQ:
+       case SC1TXIRQ:
+#ifdef CONFIG_MN10300_TTYSM1_TIMER12
+       case TM12IRQ:
+#elif CONFIG_MN10300_TTYSM1_TIMER9
+       case TM9IRQ:
+#elif CONFIG_MN10300_TTYSM1_TIMER3
+       case TM3IRQ:
+#endif /* CONFIG_MN10300_TTYSM1_TIMER12 */
+#endif /* CONFIG_MN10300_TTYSM1 */
+
+#ifdef CONFIG_MN10300_TTYSM2
+       case SC2RXIRQ:
+       case SC2TXIRQ:
+       case TM10IRQ:
+#endif /* CONFIG_MN10300_TTYSM2 */
+               err = -1;
+               break;
+
+       default:
+               set_bit(irq, irq_affinity_request);
+               err = 0;
+               break;
+       }
+
+       arch_local_irq_restore(flags);
+       return err;
+}
+#endif /* CONFIG_SMP */
+
 /*
  * MN10300 PIC level-triggered IRQ handling.
  *
@@ -79,6 +209,9 @@ static struct irq_chip mn10300_cpu_pic_level = {
        .mask           = mn10300_cpupic_mask,
        .mask_ack       = mn10300_cpupic_mask,
        .unmask         = mn10300_cpupic_unmask_clear,
+#ifdef CONFIG_SMP
+       .set_affinity   = mn10300_cpupic_setaffinity,
+#endif
 };
 
 /*
@@ -94,6 +227,9 @@ static struct irq_chip mn10300_cpu_pic_edge = {
        .mask           = mn10300_cpupic_mask,
        .mask_ack       = mn10300_cpupic_mask_ack,
        .unmask         = mn10300_cpupic_unmask,
+#ifdef CONFIG_SMP
+       .set_affinity   = mn10300_cpupic_setaffinity,
+#endif
 };
 
 /*
@@ -111,14 +247,34 @@ void ack_bad_irq(int irq)
  */
 void set_intr_level(int irq, u16 level)
 {
-       u16 tmp;
+       BUG_ON(in_interrupt());
 
-       if (in_interrupt())
-               BUG();
+       __mask_and_set_icr(irq, GxICR_ENABLE, level);
+}
 
-       tmp = GxICR(irq);
-       GxICR(irq) = (tmp & GxICR_ENABLE) | level;
-       tmp = GxICR(irq);
+void mn10300_intc_set_level(unsigned int irq, unsigned int level)
+{
+       set_intr_level(irq, NUM2GxICR_LEVEL(level) & GxICR_LEVEL);
+}
+
+void mn10300_intc_clear(unsigned int irq)
+{
+       __mask_and_set_icr(irq, GxICR_LEVEL | GxICR_ENABLE, GxICR_DETECT);
+}
+
+void mn10300_intc_set(unsigned int irq)
+{
+       __mask_and_set_icr(irq, 0, GxICR_REQUEST | GxICR_DETECT);
+}
+
+void mn10300_intc_enable(unsigned int irq)
+{
+       mn10300_cpupic_unmask(irq);
+}
+
+void mn10300_intc_disable(unsigned int irq)
+{
+       mn10300_cpupic_mask(irq);
 }
 
 /*
@@ -126,7 +282,7 @@ void set_intr_level(int irq, u16 level)
  * than before
  * - see Documentation/mn10300/features.txt
  */
-void set_intr_postackable(int irq)
+void mn10300_set_lateack_irq_type(int irq)
 {
        set_irq_chip_and_handler(irq, &mn10300_cpu_pic_level,
                                 handle_level_irq);
@@ -147,6 +303,7 @@ void __init init_IRQ(void)
                         * interrupts */
                        set_irq_chip_and_handler(irq, &mn10300_cpu_pic_edge,
                                                 handle_level_irq);
+
        unit_init_IRQ();
 }
 
@@ -156,20 +313,22 @@ void __init init_IRQ(void)
 asmlinkage void do_IRQ(void)
 {
        unsigned long sp, epsw, irq_disabled_epsw, old_irq_enabled_epsw;
+       unsigned int cpu_id = smp_processor_id();
        int irq;
 
        sp = current_stack_pointer();
-       if (sp - (sp & ~(THREAD_SIZE - 1)) < STACK_WARN)
-               BUG();
+       BUG_ON(sp - (sp & ~(THREAD_SIZE - 1)) < STACK_WARN);
 
        /* make sure local_irq_enable() doesn't muck up the interrupt priority
         * setting in EPSW */
-       old_irq_enabled_epsw = __mn10300_irq_enabled_epsw;
+       old_irq_enabled_epsw = __mn10300_irq_enabled_epsw[cpu_id];
        local_save_flags(epsw);
-       __mn10300_irq_enabled_epsw = EPSW_IE | (EPSW_IM & epsw);
+       __mn10300_irq_enabled_epsw[cpu_id] = EPSW_IE | (EPSW_IM & epsw);
        irq_disabled_epsw = EPSW_IE | MN10300_CLI_LEVEL;
 
-       __IRQ_STAT(smp_processor_id(), __irq_count)++;
+#ifdef CONFIG_MN10300_WD_TIMER
+       __IRQ_STAT(cpu_id, __irq_count)++;
+#endif
 
        irq_enter();
 
@@ -189,7 +348,7 @@ asmlinkage void do_IRQ(void)
                local_irq_restore(epsw);
        }
 
-       __mn10300_irq_enabled_epsw = old_irq_enabled_epsw;
+       __mn10300_irq_enabled_epsw[cpu_id] = old_irq_enabled_epsw;
 
        irq_exit();
 }
@@ -222,9 +381,16 @@ int show_interrupts(struct seq_file *p, void *v)
                        seq_printf(p, "%3d: ", i);
                        for_each_present_cpu(cpu)
                                seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
-                       seq_printf(p, " %14s.%u", irq_desc[i].chip->name,
-                                  (GxICR(i) & GxICR_LEVEL) >>
-                                  GxICR_LEVEL_SHIFT);
+
+                       if (i < NR_CPU_IRQS)
+                               seq_printf(p, " %14s.%u",
+                                          irq_desc[i].chip->name,
+                                          (GxICR(i) & GxICR_LEVEL) >>
+                                          GxICR_LEVEL_SHIFT);
+                       else
+                               seq_printf(p, " %14s",
+                                          irq_desc[i].chip->name);
+
                        seq_printf(p, "  %s", action->name);
 
                        for (action = action->next;
@@ -240,11 +406,13 @@ int show_interrupts(struct seq_file *p, void *v)
 
                /* polish off with NMI and error counters */
        case NR_IRQS:
+#ifdef CONFIG_MN10300_WD_TIMER
                seq_printf(p, "NMI: ");
                for (j = 0; j < NR_CPUS; j++)
                        if (cpu_online(j))
                                seq_printf(p, "%10u ", nmi_count(j));
                seq_putc(p, '\n');
+#endif
 
                seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
                break;
@@ -252,3 +420,51 @@ int show_interrupts(struct seq_file *p, void *v)
 
        return 0;
 }
+
+#ifdef CONFIG_HOTPLUG_CPU
+void migrate_irqs(void)
+{
+       irq_desc_t *desc;
+       int irq;
+       unsigned int self, new;
+       unsigned long flags;
+
+       self = smp_processor_id();
+       for (irq = 0; irq < NR_IRQS; irq++) {
+               desc = irq_desc + irq;
+
+               if (desc->status == IRQ_PER_CPU)
+                       continue;
+
+               if (cpu_isset(self, irq_desc[irq].affinity) &&
+                   !cpus_intersects(irq_affinity[irq], cpu_online_map)) {
+                       int cpu_id;
+                       cpu_id = first_cpu(cpu_online_map);
+                       cpu_set(cpu_id, irq_desc[irq].affinity);
+               }
+               /* We need to operate irq_affinity_online atomically. */
+               arch_local_cli_save(flags);
+               if (irq_affinity_online[irq] == self) {
+                       u16 x, tmp;
+
+                       x = GxICR(irq);
+                       GxICR(irq) = x & GxICR_LEVEL;
+                       tmp = GxICR(irq);
+
+                       new = any_online_cpu(irq_desc[irq].affinity);
+                       irq_affinity_online[irq] = new;
+
+                       CROSS_GxICR(irq, new) =
+                               (x & GxICR_LEVEL) | GxICR_DETECT;
+                       tmp = CROSS_GxICR(irq, new);
+
+                       x &= GxICR_LEVEL | GxICR_ENABLE;
+                       if (GxICR(irq) & GxICR_REQUEST) {
+                               x |= GxICR_REQUEST | GxICR_DETECT;
+                       CROSS_GxICR(irq, new) = x;
+                       tmp = CROSS_GxICR(irq, new);
+               }
+               arch_local_irq_restore(flags);
+       }
+}
+#endif /* CONFIG_HOTPLUG_CPU */
index 67e6389..0311a7f 100644 (file)
@@ -377,8 +377,10 @@ void __kprobes arch_arm_kprobe(struct kprobe *p)
 
 void __kprobes arch_disarm_kprobe(struct kprobe *p)
 {
+#ifndef CONFIG_MN10300_CACHE_SNOOP
        mn10300_dcache_flush();
        mn10300_icache_inv();
+#endif
 }
 
 void arch_remove_kprobe(struct kprobe *p)
@@ -390,8 +392,10 @@ void __kprobes disarm_kprobe(struct kprobe *p, struct pt_regs *regs)
 {
        *p->addr = p->opcode;
        regs->pc = (unsigned long) p->addr;
+#ifndef CONFIG_MN10300_CACHE_SNOOP
        mn10300_dcache_flush();
        mn10300_icache_inv();
+#endif
 }
 
 static inline
index 66702d2..dfc1b6f 100644 (file)
@@ -39,7 +39,7 @@
 ###############################################################################
        .balign L1_CACHE_BYTES
 ENTRY(mn10300_serial_vdma_interrupt)
-       or      EPSW_IE,psw                     # permit overriding by
+#      or      EPSW_IE,psw                     # permit overriding by
                                                # debugging interrupts
        movm    [d2,d3,a2,a3,exreg0],(sp)
 
@@ -164,7 +164,7 @@ mnsc_vdma_tx_noint:
        rti
 
 mnsc_vdma_tx_empty:
-       mov     +(GxICR_LEVEL_1|GxICR_DETECT),d2
+       mov     +(NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL)|GxICR_DETECT),d2
        movhu   d2,(e3)                 # disable the interrupt
        movhu   (e3),d2                 # flush
 
@@ -175,7 +175,7 @@ mnsc_vdma_tx_break:
        movhu   (SCxCTR,e2),d2          # turn on break mode
        or      SC01CTR_BKE,d2
        movhu   d2,(SCxCTR,e2)
-       mov     +(GxICR_LEVEL_1|GxICR_DETECT),d2
+       mov     +(NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL)|GxICR_DETECT),d2
        movhu   d2,(e3)                 # disable transmit interrupts on this
                                        # channel
        movhu   (e3),d2                 # flush
index db509dd..996384d 100644 (file)
@@ -44,6 +44,11 @@ static const char serial_revdate[] = "2007-11-06";
 #include <unit/timex.h>
 #include "mn10300-serial.h"
 
+#ifdef CONFIG_SMP
+#undef  GxICR
+#define GxICR(X) CROSS_GxICR(X, 0)
+#endif /* CONFIG_SMP */
+
 #define kenter(FMT, ...) \
        printk(KERN_DEBUG "-->%s(" FMT ")\n", __func__, ##__VA_ARGS__)
 #define _enter(FMT, ...) \
@@ -57,6 +62,11 @@ static const char serial_revdate[] = "2007-11-06";
 #define _proto(FMT, ...) \
        no_printk(KERN_DEBUG "### MNSERIAL " FMT " ###\n", ##__VA_ARGS__)
 
+#ifndef CODMSB
+/* c_cflag bit meaning */
+#define CODMSB 004000000000    /* change Transfer bit-order */
+#endif
+
 #define NR_UARTS 3
 
 #ifdef CONFIG_MN10300_TTYSM_CONSOLE
@@ -152,26 +162,35 @@ struct mn10300_serial_port mn10300_serial_port_sif0 = {
        .name           = "ttySM0",
        ._iobase        = &SC0CTR,
        ._control       = &SC0CTR,
-       ._status        = (volatile u8 *) &SC0STR,
+       ._status        = (volatile u8 *)&SC0STR,
        ._intr          = &SC0ICR,
        ._rxb           = &SC0RXB,
        ._txb           = &SC0TXB,
        .rx_name        = "ttySM0:Rx",
        .tx_name        = "ttySM0:Tx",
-#ifdef CONFIG_MN10300_TTYSM0_TIMER8
+#if defined(CONFIG_MN10300_TTYSM0_TIMER8)
        .tm_name        = "ttySM0:Timer8",
        ._tmxmd         = &TM8MD,
        ._tmxbr         = &TM8BR,
        ._tmicr         = &TM8ICR,
        .tm_irq         = TM8IRQ,
        .div_timer      = MNSCx_DIV_TIMER_16BIT,
-#else /* CONFIG_MN10300_TTYSM0_TIMER2 */
+#elif defined(CONFIG_MN10300_TTYSM0_TIMER0)
+       .tm_name        = "ttySM0:Timer0",
+       ._tmxmd         = &TM0MD,
+       ._tmxbr         = (volatile u16 *)&TM0BR,
+       ._tmicr         = &TM0ICR,
+       .tm_irq         = TM0IRQ,
+       .div_timer      = MNSCx_DIV_TIMER_8BIT,
+#elif defined(CONFIG_MN10300_TTYSM0_TIMER2)
        .tm_name        = "ttySM0:Timer2",
        ._tmxmd         = &TM2MD,
-       ._tmxbr         = (volatile u16 *) &TM2BR,
+       ._tmxbr         = (volatile u16 *)&TM2BR,
        ._tmicr         = &TM2ICR,
        .tm_irq         = TM2IRQ,
        .div_timer      = MNSCx_DIV_TIMER_8BIT,
+#else
+#error "Unknown config for ttySM0"
 #endif
        .rx_irq         = SC0RXIRQ,
        .tx_irq         = SC0TXIRQ,
@@ -205,26 +224,35 @@ struct mn10300_serial_port mn10300_serial_port_sif1 = {
        .name           = "ttySM1",
        ._iobase        = &SC1CTR,
        ._control       = &SC1CTR,
-       ._status        = (volatile u8 *) &SC1STR,
+       ._status        = (volatile u8 *)&SC1STR,
        ._intr          = &SC1ICR,
        ._rxb           = &SC1RXB,
        ._txb           = &SC1TXB,
        .rx_name        = "ttySM1:Rx",
        .tx_name        = "ttySM1:Tx",
-#ifdef CONFIG_MN10300_TTYSM1_TIMER9
+#if defined(CONFIG_MN10300_TTYSM1_TIMER9)
        .tm_name        = "ttySM1:Timer9",
        ._tmxmd         = &TM9MD,
        ._tmxbr         = &TM9BR,
        ._tmicr         = &TM9ICR,
        .tm_irq         = TM9IRQ,
        .div_timer      = MNSCx_DIV_TIMER_16BIT,
-#else /* CONFIG_MN10300_TTYSM1_TIMER3 */
+#elif defined(CONFIG_MN10300_TTYSM1_TIMER3)
        .tm_name        = "ttySM1:Timer3",
        ._tmxmd         = &TM3MD,
-       ._tmxbr         = (volatile u16 *) &TM3BR,
+       ._tmxbr         = (volatile u16 *)&TM3BR,
        ._tmicr         = &TM3ICR,
        .tm_irq         = TM3IRQ,
        .div_timer      = MNSCx_DIV_TIMER_8BIT,
+#elif defined(CONFIG_MN10300_TTYSM1_TIMER12)
+       .tm_name        = "ttySM1/Timer12",
+       ._tmxmd         = &TM12MD,
+       ._tmxbr         = &TM12BR,
+       ._tmicr         = &TM12ICR,
+       .tm_irq         = TM12IRQ,
+       .div_timer      = MNSCx_DIV_TIMER_16BIT,
+#else
+#error "Unknown config for ttySM1"
 #endif
        .rx_irq         = SC1RXIRQ,
        .tx_irq         = SC1TXIRQ,
@@ -260,20 +288,45 @@ struct mn10300_serial_port mn10300_serial_port_sif2 = {
        .uart.lock      =
        __SPIN_LOCK_UNLOCKED(mn10300_serial_port_sif2.uart.lock),
        .name           = "ttySM2",
-       .rx_name        = "ttySM2:Rx",
-       .tx_name        = "ttySM2:Tx",
-       .tm_name        = "ttySM2:Timer10",
        ._iobase        = &SC2CTR,
        ._control       = &SC2CTR,
-       ._status        = &SC2STR,
+       ._status        = (volatile u8 *)&SC2STR,
        ._intr          = &SC2ICR,
        ._rxb           = &SC2RXB,
        ._txb           = &SC2TXB,
+       .rx_name        = "ttySM2:Rx",
+       .tx_name        = "ttySM2:Tx",
+#if defined(CONFIG_MN10300_TTYSM2_TIMER10)
+       .tm_name        = "ttySM2/Timer10",
        ._tmxmd         = &TM10MD,
        ._tmxbr         = &TM10BR,
        ._tmicr         = &TM10ICR,
        .tm_irq         = TM10IRQ,
        .div_timer      = MNSCx_DIV_TIMER_16BIT,
+#elif defined(CONFIG_MN10300_TTYSM2_TIMER9)
+       .tm_name        = "ttySM2/Timer9",
+       ._tmxmd         = &TM9MD,
+       ._tmxbr         = &TM9BR,
+       ._tmicr         = &TM9ICR,
+       .tm_irq         = TM9IRQ,
+       .div_timer      = MNSCx_DIV_TIMER_16BIT,
+#elif defined(CONFIG_MN10300_TTYSM2_TIMER1)
+       .tm_name        = "ttySM2/Timer1",
+       ._tmxmd         = &TM1MD,
+       ._tmxbr         = (volatile u16 *)&TM1BR,
+       ._tmicr         = &TM1ICR,
+       .tm_irq         = TM1IRQ,
+       .div_timer      = MNSCx_DIV_TIMER_8BIT,
+#elif defined(CONFIG_MN10300_TTYSM2_TIMER3)
+       .tm_name        = "ttySM2/Timer3",
+       ._tmxmd         = &TM3MD,
+       ._tmxbr         = (volatile u16 *)&TM3BR,
+       ._tmicr         = &TM3ICR,
+       .tm_irq         = TM3IRQ,
+       .div_timer      = MNSCx_DIV_TIMER_8BIT,
+#else
+#error "Unknown config for ttySM2"
+#endif
        .rx_irq         = SC2RXIRQ,
        .tx_irq         = SC2TXIRQ,
        .rx_icr         = &GxICR(SC2RXIRQ),
@@ -322,9 +375,13 @@ struct mn10300_serial_port *mn10300_serial_ports[NR_UARTS + 1] = {
  */
 static void mn10300_serial_mask_ack(unsigned int irq)
 {
+       unsigned long flags;
        u16 tmp;
+
+       flags = arch_local_cli_save();
        GxICR(irq) = GxICR_LEVEL_6;
        tmp = GxICR(irq); /* flush write buffer */
+       arch_local_irq_restore(flags);
 }
 
 static void mn10300_serial_nop(unsigned int irq)
@@ -348,23 +405,36 @@ struct mn10300_serial_int mn10300_serial_int_tbl[NR_IRQS];
 
 static void mn10300_serial_dis_tx_intr(struct mn10300_serial_port *port)
 {
+       unsigned long flags;
        u16 x;
-       *port->tx_icr = GxICR_LEVEL_1 | GxICR_DETECT;
+
+       flags = arch_local_cli_save();
+       *port->tx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
        x = *port->tx_icr;
+       arch_local_irq_restore(flags);
 }
 
 static void mn10300_serial_en_tx_intr(struct mn10300_serial_port *port)
 {
+       unsigned long flags;
        u16 x;
-       *port->tx_icr = GxICR_LEVEL_1 | GxICR_ENABLE;
+
+       flags = arch_local_cli_save();
+       *port->tx_icr =
+               NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL) | GxICR_ENABLE;
        x = *port->tx_icr;
+       arch_local_irq_restore(flags);
 }
 
 static void mn10300_serial_dis_rx_intr(struct mn10300_serial_port *port)
 {
+       unsigned long flags;
        u16 x;
-       *port->rx_icr = GxICR_LEVEL_1 | GxICR_DETECT;
+
+       flags = arch_local_cli_save();
+       *port->rx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
        x = *port->rx_icr;
+       arch_local_irq_restore(flags);
 }
 
 /*
@@ -650,7 +720,7 @@ static unsigned int mn10300_serial_tx_empty(struct uart_port *_port)
 static void mn10300_serial_set_mctrl(struct uart_port *_port,
                                     unsigned int mctrl)
 {
-       struct mn10300_serial_port *port =
+       struct mn10300_serial_port *port __attribute__ ((unused)) =
                container_of(_port, struct mn10300_serial_port, uart);
 
        _enter("%s,%x", port->name, mctrl);
@@ -706,6 +776,7 @@ static void mn10300_serial_start_tx(struct uart_port *_port)
                        UART_XMIT_SIZE));
 
        /* kick the virtual DMA controller */
+       arch_local_cli();
        x = *port->tx_icr;
        x |= GxICR_ENABLE;
 
@@ -716,10 +787,14 @@ static void mn10300_serial_start_tx(struct uart_port *_port)
 
        _debug("CTR=%04hx ICR=%02hx STR=%04x TMD=%02hx TBR=%04hx ICR=%04hx",
               *port->_control, *port->_intr, *port->_status,
-              *port->_tmxmd, *port->_tmxbr, *port->tx_icr);
+              *port->_tmxmd,
+              (port->div_timer == MNSCx_DIV_TIMER_8BIT) ?
+                  *(volatile u8 *)port->_tmxbr : *port->_tmxbr,
+              *port->tx_icr);
 
        *port->tx_icr = x;
        x = *port->tx_icr;
+       arch_local_sti();
 }
 
 /*
@@ -842,8 +917,10 @@ static int mn10300_serial_startup(struct uart_port *_port)
        pint->port = port;
        pint->vdma = mn10300_serial_vdma_tx_handler;
 
-       set_intr_level(port->rx_irq, GxICR_LEVEL_1);
-       set_intr_level(port->tx_irq, GxICR_LEVEL_1);
+       set_intr_level(port->rx_irq,
+               NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL));
+       set_intr_level(port->tx_irq,
+               NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL));
        set_irq_chip(port->tm_irq, &mn10300_serial_pic);
 
        if (request_irq(port->rx_irq, mn10300_serial_interrupt,
@@ -876,6 +953,7 @@ error:
  */
 static void mn10300_serial_shutdown(struct uart_port *_port)
 {
+       u16 x;
        struct mn10300_serial_port *port =
                container_of(_port, struct mn10300_serial_port, uart);
 
@@ -897,8 +975,12 @@ static void mn10300_serial_shutdown(struct uart_port *_port)
        free_irq(port->rx_irq, port);
        free_irq(port->tx_irq, port);
 
-       *port->rx_icr = GxICR_LEVEL_1;
-       *port->tx_icr = GxICR_LEVEL_1;
+       arch_local_cli();
+       *port->rx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
+       x = *port->rx_icr;
+       *port->tx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
+       x = *port->tx_icr;
+       arch_local_sti();
 }
 
 /*
@@ -947,11 +1029,66 @@ static void mn10300_serial_change_speed(struct mn10300_serial_port *port,
        /* Determine divisor based on baud rate */
        battempt = 0;
 
-       if (div_timer == MNSCx_DIV_TIMER_16BIT)
-               scxctr |= SC0CTR_CK_TM8UFLOW_8; /* ( == SC1CTR_CK_TM9UFLOW_8
-                                                *   == SC2CTR_CK_TM10UFLOW) */
-       else if (div_timer == MNSCx_DIV_TIMER_8BIT)
+       switch (port->uart.line) {
+#ifdef CONFIG_MN10300_TTYSM0
+       case 0: /* ttySM0 */
+#if   defined(CONFIG_MN10300_TTYSM0_TIMER8)
+               scxctr |= SC0CTR_CK_TM8UFLOW_8;
+#elif defined(CONFIG_MN10300_TTYSM0_TIMER0)
+               scxctr |= SC0CTR_CK_TM0UFLOW_8;
+#elif defined(CONFIG_MN10300_TTYSM0_TIMER2)
                scxctr |= SC0CTR_CK_TM2UFLOW_8;
+#else
+#error "Unknown config for ttySM0"
+#endif
+               break;
+#endif /* CONFIG_MN10300_TTYSM0 */
+
+#ifdef CONFIG_MN10300_TTYSM1
+       case 1: /* ttySM1 */
+#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
+#if   defined(CONFIG_MN10300_TTYSM1_TIMER9)
+               scxctr |= SC1CTR_CK_TM9UFLOW_8;
+#elif defined(CONFIG_MN10300_TTYSM1_TIMER3)
+               scxctr |= SC1CTR_CK_TM3UFLOW_8;
+#else
+#error "Unknown config for ttySM1"
+#endif
+#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+#if defined(CONFIG_MN10300_TTYSM1_TIMER12)
+               scxctr |= SC1CTR_CK_TM12UFLOW_8;
+#else
+#error "Unknown config for ttySM1"
+#endif
+#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+               break;
+#endif /* CONFIG_MN10300_TTYSM1 */
+
+#ifdef CONFIG_MN10300_TTYSM2
+       case 2: /* ttySM2 */
+#if defined(CONFIG_AM33_2)
+#if   defined(CONFIG_MN10300_TTYSM2_TIMER10)
+               scxctr |= SC2CTR_CK_TM10UFLOW;
+#else
+#error "Unknown config for ttySM2"
+#endif
+#else /* CONFIG_AM33_2 */
+#if   defined(CONFIG_MN10300_TTYSM2_TIMER9)
+               scxctr |= SC2CTR_CK_TM9UFLOW_8;
+#elif defined(CONFIG_MN10300_TTYSM2_TIMER1)
+               scxctr |= SC2CTR_CK_TM1UFLOW_8;
+#elif defined(CONFIG_MN10300_TTYSM2_TIMER3)
+               scxctr |= SC2CTR_CK_TM3UFLOW_8;
+#else
+#error "Unknown config for ttySM2"
+#endif
+#endif /* CONFIG_AM33_2 */
+               break;
+#endif /* CONFIG_MN10300_TTYSM2 */
+
+       default:
+               break;
+       }
 
 try_alternative:
        baud = uart_get_baud_rate(&port->uart, new, old, 0,
@@ -1195,6 +1332,12 @@ static void mn10300_serial_set_termios(struct uart_port *_port,
                ctr &= ~SC2CTR_TWE;
                *port->_control = ctr;
        }
+
+       /* change Transfer bit-order (LSB/MSB) */
+       if (new->c_cflag & CODMSB)
+               *port->_control |= SC01CTR_OD_MSBFIRST; /* MSB MODE */
+       else
+               *port->_control &= ~SC01CTR_OD_MSBFIRST; /* LSB MODE */
 }
 
 /*
@@ -1302,11 +1445,16 @@ static int __init mn10300_serial_init(void)
        printk(KERN_INFO "%s version %s (%s)\n",
               serial_name, serial_version, serial_revdate);
 
-#ifdef CONFIG_MN10300_TTYSM2
-       SC2TIM = 8; /* make the baud base of timer 2 IOCLK/8 */
+#if defined(CONFIG_MN10300_TTYSM2) && defined(CONFIG_AM33_2)
+       {
+               int tmp;
+               SC2TIM = 8; /* make the baud base of timer 2 IOCLK/8 */
+               tmp = SC2TIM;
+       }
 #endif
 
-       set_intr_stub(EXCEP_IRQ_LEVEL1, mn10300_serial_vdma_interrupt);
+       set_intr_stub(NUM2EXCEP_IRQ_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL),
+               mn10300_serial_vdma_interrupt);
 
        ret = uart_register_driver(&mn10300_serial_driver);
        if (!ret) {
@@ -1366,9 +1514,11 @@ static void mn10300_serial_console_write(struct console *co,
        port = mn10300_serial_ports[co->index];
 
        /* firstly hijack the serial port from the "virtual DMA" controller */
+       arch_local_cli();
        txicr = *port->tx_icr;
-       *port->tx_icr = GxICR_LEVEL_1;
+       *port->tx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
        tmp = *port->tx_icr;
+       arch_local_sti();
 
        /* the transmitter may be disabled */
        scxctr = *port->_control;
@@ -1422,8 +1572,10 @@ static void mn10300_serial_console_write(struct console *co,
        if (!(scxctr & SC01CTR_TXE))
                *port->_control = scxctr;
 
+       arch_local_cli();
        *port->tx_icr = txicr;
        tmp = *port->tx_icr;
+       arch_local_sti();
 }
 
 /*
index 9962447..f2f5c9c 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/intctl-regs.h>
 #include <asm/timer-regs.h>
 #include <asm/frame.inc>
+#include <linux/threads.h>
 
        .text
 
@@ -53,7 +54,13 @@ watchdog_handler:
        .type   touch_nmi_watchdog,@function
 touch_nmi_watchdog:
        clr     d0
-       mov     d0,(watchdog_alert_counter)
+       clr     d1
+       mov     watchdog_alert_counter, a0
+       setlb
+       mov     d0, (a0+)
+       inc     d1
+       cmp     NR_CPUS, d1
+       lne
        ret     [],0
 
        .size   touch_nmi_watchdog,.-touch_nmi_watchdog
index f362d9d..c5e12bf 100644 (file)
@@ -30,7 +30,7 @@
 static DEFINE_SPINLOCK(watchdog_print_lock);
 static unsigned int watchdog;
 static unsigned int watchdog_hz = 1;
-unsigned int watchdog_alert_counter;
+unsigned int watchdog_alert_counter[NR_CPUS];
 
 EXPORT_SYMBOL(touch_nmi_watchdog);
 
@@ -39,9 +39,6 @@ EXPORT_SYMBOL(touch_nmi_watchdog);
  * is to check its timer makes IRQ counts. If they are not
  * changing then that CPU has some problem.
  *
- * as these watchdog NMI IRQs are generated on every CPU, we only
- * have to check the current processor.
- *
  * since NMIs dont listen to _any_ locks, we have to be extremely
  * careful not to rely on unsafe variables. The printk might lock
  * up though, so we have to break up any console locks first ...
@@ -69,8 +66,8 @@ int __init check_watchdog(void)
 
        printk(KERN_INFO "OK.\n");
 
-       /* now that we know it works we can reduce NMI frequency to
-        * something more reasonable; makes a difference in some configs
+       /* now that we know it works we can reduce NMI frequency to something
+        * more reasonable; makes a difference in some configs
         */
        watchdog_hz = 1;
 
@@ -121,15 +118,22 @@ void __init watchdog_go(void)
        }
 }
 
+#ifdef CONFIG_SMP
+static void watchdog_dump_register(void *dummy)
+{
+       printk(KERN_ERR "--- Register Dump (CPU%d) ---\n", CPUID);
+       show_registers(current_frame());
+}
+#endif
+
 asmlinkage
 void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
 {
-
        /*
         * Since current-> is always on the stack, and we always switch
         * the stack NMI-atomically, it's safe to use smp_processor_id().
         */
-       int sum, cpu = smp_processor_id();
+       int sum, cpu;
        int irq = NMIIRQ;
        u8 wdt, tmp;
 
@@ -138,43 +142,61 @@ void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
        tmp = WDCTR;
        NMICR = NMICR_WDIF;
 
-       nmi_count(cpu)++;
+       nmi_count(smp_processor_id())++;
        kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
-       sum = irq_stat[cpu].__irq_count;
-
-       if (last_irq_sums[cpu] == sum) {
-               /*
-                * Ayiee, looks like this CPU is stuck ...
-                * wait a few IRQs (5 seconds) before doing the oops ...
-                */
-               watchdog_alert_counter++;
-               if (watchdog_alert_counter == 5 * watchdog_hz) {
-                       spin_lock(&watchdog_print_lock);
+
+       for_each_online_cpu(cpu) {
+
+               sum = irq_stat[cpu].__irq_count;
+
+               if ((last_irq_sums[cpu] == sum)
+#if defined(CONFIG_GDBSTUB) && defined(CONFIG_SMP)
+                       && !(CHK_GDBSTUB_BUSY()
+                            || atomic_read(&cpu_doing_single_step))
+#endif
+                       ) {
                        /*
-                        * We are in trouble anyway, lets at least try
-                        * to get a message out.
+                        * Ayiee, looks like this CPU is stuck ...
+                        * wait a few IRQs (5 seconds) before doing the oops ...
                         */
-                       bust_spinlocks(1);
-                       printk(KERN_ERR
-                              "NMI Watchdog detected LOCKUP on CPU%d,"
-                              " pc %08lx, registers:\n",
-                              cpu, regs->pc);
-                       show_registers(regs);
-                       printk("console shuts up ...\n");
-                       console_silent();
-                       spin_unlock(&watchdog_print_lock);
-                       bust_spinlocks(0);
+                       watchdog_alert_counter[cpu]++;
+                       if (watchdog_alert_counter[cpu] == 5 * watchdog_hz) {
+                               spin_lock(&watchdog_print_lock);
+                               /*
+                                * We are in trouble anyway, lets at least try
+                                * to get a message out.
+                                */
+                               bust_spinlocks(1);
+                               printk(KERN_ERR
+                                      "NMI Watchdog detected LOCKUP on CPU%d,"
+                                      " pc %08lx, registers:\n",
+                                      cpu, regs->pc);
+#ifdef CONFIG_SMP
+                               printk(KERN_ERR
+                                      "--- Register Dump (CPU%d) ---\n",
+                                      CPUID);
+#endif
+                               show_registers(regs);
+#ifdef CONFIG_SMP
+                               smp_nmi_call_function(watchdog_dump_register,
+                                       NULL, 1);
+#endif
+                               printk(KERN_NOTICE "console shuts up ...\n");
+                               console_silent();
+                               spin_unlock(&watchdog_print_lock);
+                               bust_spinlocks(0);
 #ifdef CONFIG_GDBSTUB
-                       if (gdbstub_busy)
-                               gdbstub_exception(regs, excep);
-                       else
-                               gdbstub_intercept(regs, excep);
+                               if (CHK_GDBSTUB_BUSY_AND_ACTIVE())
+                                       gdbstub_exception(regs, excep);
+                               else
+                                       gdbstub_intercept(regs, excep);
 #endif
-                       do_exit(SIGSEGV);
+                               do_exit(SIGSEGV);
+                       }
+               } else {
+                       last_irq_sums[cpu] = sum;
+                       watchdog_alert_counter[cpu] = 0;
                }
-       } else {
-               last_irq_sums[cpu] = sum;
-               watchdog_alert_counter = 0;
        }
 
        WDCTR = wdt | WDCTR_WDRST;
index f48373e..0d0f804 100644 (file)
@@ -57,6 +57,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
 void (*pm_power_off)(void);
 EXPORT_SYMBOL(pm_power_off);
 
+#if !defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
 /*
  * we use this if we don't have any better idle routine
  */
@@ -69,6 +70,35 @@ static void default_idle(void)
                local_irq_enable();
 }
 
+#else /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU  */
+/*
+ * On SMP it's slightly faster (but much more power-consuming!)
+ * to poll the ->work.need_resched flag instead of waiting for the
+ * cross-CPU IPI to arrive. Use this option with caution.
+ */
+static inline void poll_idle(void)
+{
+       int oldval;
+
+       local_irq_enable();
+
+       /*
+        * Deal with another CPU just having chosen a thread to
+        * run here:
+        */
+       oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
+
+       if (!oldval) {
+               set_thread_flag(TIF_POLLING_NRFLAG);
+               while (!need_resched())
+                       cpu_relax();
+               clear_thread_flag(TIF_POLLING_NRFLAG);
+       } else {
+               set_need_resched();
+       }
+}
+#endif /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */
+
 /*
  * the idle thread
  * - there's no useful work to be done, so just try to conserve power and have
@@