Merge remote branch 'tip/perf/core' into oprofile/core
Robert Richter [Fri, 15 Oct 2010 10:45:00 +0000 (12:45 +0200)]
Conflicts:
arch/arm/oprofile/common.c
kernel/perf_event.c

463 files changed:
CREDITS
Documentation/kprobes.txt
MAINTAINERS
Makefile
arch/Kconfig
arch/alpha/kernel/entry.S
arch/alpha/kernel/perf_event.c
arch/alpha/kernel/process.c
arch/alpha/kernel/signal.c
arch/alpha/kernel/systbls.S
arch/arm/Kconfig
arch/arm/boot/compressed/Makefile
arch/arm/common/it8152.c
arch/arm/include/asm/pgtable.h
arch/arm/kernel/entry-common.S
arch/arm/kernel/perf_event.c
arch/arm/mach-at91/at91sam9g45_devices.c
arch/arm/mach-davinci/dm355.c
arch/arm/mach-davinci/dm365.c
arch/arm/mach-davinci/dm644x.c
arch/arm/mach-davinci/dm646x.c
arch/arm/mach-dove/include/mach/io.h
arch/arm/mach-ixp4xx/common-pci.c
arch/arm/mach-ixp4xx/include/mach/hardware.h
arch/arm/mach-kirkwood/include/mach/kirkwood.h
arch/arm/mach-kirkwood/pcie.c
arch/arm/mach-mmp/include/mach/system.h
arch/arm/mach-pxa/cpufreq-pxa2xx.c
arch/arm/mach-pxa/include/mach/hardware.h
arch/arm/mach-pxa/include/mach/io.h
arch/arm/mach-pxa/palm27x.c
arch/arm/mach-pxa/vpac270.c
arch/arm/mach-u300/include/mach/gpio.h
arch/arm/mach-vexpress/ct-ca9x4.c
arch/arm/mm/alignment.c
arch/arm/mm/mmu.c
arch/arm/mm/proc-v7.S
arch/arm/plat-nomadik/timer.c
arch/arm/plat-omap/Kconfig
arch/arm/plat-omap/mcbsp.c
arch/arm/plat-omap/sram.c
arch/avr32/kernel/module.c
arch/h8300/kernel/module.c
arch/m32r/include/asm/signal.h
arch/m32r/include/asm/unistd.h
arch/m32r/kernel/entry.S
arch/m32r/kernel/ptrace.c
arch/m32r/kernel/signal.c
arch/m68k/mac/macboing.c
arch/mips/Kconfig
arch/mips/alchemy/common/prom.c
arch/mips/boot/compressed/Makefile
arch/mips/cavium-octeon/Kconfig
arch/mips/cavium-octeon/cpu.c
arch/mips/cavium-octeon/executive/Makefile
arch/mips/include/asm/atomic.h
arch/mips/include/asm/cop2.h
arch/mips/include/asm/gic.h
arch/mips/include/asm/mach-tx49xx/kmalloc.h
arch/mips/include/asm/mips-boards/maltaint.h
arch/mips/include/asm/page.h
arch/mips/include/asm/thread_info.h
arch/mips/include/asm/unistd.h
arch/mips/kernel/irq-gic.c
arch/mips/kernel/kgdb.c
arch/mips/kernel/kspd.c
arch/mips/kernel/linux32.c
arch/mips/kernel/scall32-o32.S
arch/mips/kernel/scall64-64.S
arch/mips/kernel/scall64-n32.S
arch/mips/kernel/scall64-o32.S
arch/mips/mm/dma-default.c
arch/mips/mm/sc-rm7k.c
arch/mips/mti-malta/malta-int.c
arch/mips/pci/pci-rc32434.c
arch/mips/pnx8550/common/reset.c
arch/mips/pnx8550/common/setup.c
arch/mn10300/Kconfig
arch/mn10300/Kconfig.debug
arch/mn10300/include/asm/bitops.h
arch/mn10300/include/asm/signal.h
arch/mn10300/kernel/module.c
arch/mn10300/kernel/signal.c
arch/mn10300/mm/Makefile
arch/mn10300/mm/cache-disabled.c [new file with mode: 0644]
arch/mn10300/mm/cache.c
arch/parisc/kernel/module.c
arch/powerpc/kernel/module.c
arch/powerpc/kernel/perf_callchain.c
arch/powerpc/kernel/perf_event.c
arch/powerpc/kernel/perf_event_fsl_emb.c
arch/powerpc/platforms/512x/clock.c
arch/powerpc/platforms/52xx/efika.c
arch/powerpc/platforms/52xx/mpc52xx_common.c
arch/s390/kernel/module.c
arch/sh/kernel/module.c
arch/sh/kernel/perf_callchain.c
arch/sh/kernel/perf_event.c
arch/sparc/Kconfig
arch/sparc/include/asm/jump_label.h [new file with mode: 0644]
arch/sparc/kernel/Makefile
arch/sparc/kernel/jump_label.c [new file with mode: 0644]
arch/sparc/kernel/module.c
arch/sparc/kernel/perf_event.c
arch/tile/kernel/intvec_32.S
arch/um/drivers/net_kern.c
arch/um/kernel/exec.c
arch/um/kernel/internal.h
arch/um/kernel/syscall.c
arch/x86/Kconfig
arch/x86/boot/early_serial_console.c
arch/x86/include/asm/alternative.h
arch/x86/include/asm/amd_iommu_proto.h
arch/x86/include/asm/amd_iommu_types.h
arch/x86/include/asm/bitops.h
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/jump_label.h [new file with mode: 0644]
arch/x86/include/asm/perf_event_p4.h
arch/x86/kernel/Makefile
arch/x86/kernel/acpi/cstate.c
arch/x86/kernel/alternative.c
arch/x86/kernel/amd_iommu.c
arch/x86/kernel/amd_iommu_init.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/cpu.h
arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_ds.c
arch/x86/kernel/cpu/perf_event_p4.c
arch/x86/kernel/cpu/scattered.c
arch/x86/kernel/ftrace.c
arch/x86/kernel/hpet.c
arch/x86/kernel/jump_label.c [new file with mode: 0644]
arch/x86/kernel/kprobes.c
arch/x86/kernel/module.c
arch/x86/kernel/setup.c
arch/x86/mm/fault.c
arch/x86/mm/kmemcheck/kmemcheck.c
arch/x86/xen/time.c
block/blk-merge.c
drivers/acpi/Kconfig
drivers/acpi/acpi_pad.c
drivers/acpi/acpica/aclocal.h
drivers/acpi/acpica/exutils.c
drivers/acpi/acpica/rsutils.c
drivers/acpi/apei/Kconfig
drivers/acpi/apei/apei-base.c
drivers/acpi/apei/einj.c
drivers/acpi/apei/erst-dbg.c
drivers/acpi/apei/erst.c
drivers/acpi/apei/ghes.c
drivers/acpi/apei/hest.c
drivers/acpi/atomicio.c
drivers/acpi/battery.c
drivers/acpi/blacklist.c
drivers/acpi/bus.c
drivers/acpi/fan.c
drivers/acpi/processor_core.c
drivers/acpi/processor_driver.c
drivers/acpi/processor_perflib.c
drivers/acpi/sleep.c
drivers/acpi/sysfs.c
drivers/acpi/video_detect.c
drivers/ata/ahci.c
drivers/ata/ahci.h
drivers/ata/ahci_platform.c
drivers/ata/libahci.c
drivers/block/pktcdvd.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/cpuidle/governors/menu.c
drivers/dma/mv_xor.c
drivers/dma/shdma.c
drivers/edac/edac_mc.c
drivers/edac/i7core_edac.c
drivers/gpu/drm/drm_buffer.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_info.c
drivers/gpu/drm/drm_vm.c
drivers/gpu/drm/i810/i810_dma.c
drivers/gpu/drm/i830/i830_dma.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_fb.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/nouveau/nouveau_notifier.c
drivers/gpu/drm/radeon/atombios.h
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_fb.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/ttm/ttm_page_alloc.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/vga/vgaarb.c
drivers/hwmon/Kconfig
drivers/hwmon/coretemp.c
drivers/hwmon/f71882fg.c
drivers/hwmon/lis3lv02d.c
drivers/hwmon/pkgtemp.c
drivers/i2c/busses/i2c-davinci.c
drivers/i2c/busses/i2c-octeon.c
drivers/i2c/busses/i2c-s3c2410.c
drivers/idle/intel_idle.c [changed mode: 0755->0644]
drivers/infiniband/hw/cxgb3/iwch_cm.c
drivers/leds/leds-ns2.c
drivers/mfd/max8925-core.c
drivers/mfd/wm831x-irq.c
drivers/misc/Kconfig
drivers/misc/Makefile
drivers/misc/vmw_balloon.c [moved from drivers/misc/vmware_balloon.c with 100% similarity]
drivers/mmc/host/sdhci-s3c.c
drivers/mtd/nand/omap2.c
drivers/net/3c59x.c
drivers/net/atlx/atl1.c
drivers/net/e1000e/hw.h
drivers/net/e1000e/ich8lan.c
drivers/net/e1000e/netdev.c
drivers/net/ibm_newemac/core.c
drivers/net/netxen/netxen_nic_init.c
drivers/net/qlcnic/qlcnic_init.c
drivers/net/rionet.c
drivers/net/sgiseeq.c
drivers/net/smsc911x.c
drivers/net/tulip/de2104x.c
drivers/net/wireless/iwlwifi/iwl-agn-lib.c
drivers/net/wireless/iwlwifi/iwl-core.c
drivers/net/wireless/iwlwifi/iwl3945-base.c
drivers/oprofile/oprofile_perf.c
drivers/pci/intel-iommu.c
drivers/pci/iov.c
drivers/pci/pci.h
drivers/pci/quirks.c
drivers/pcmcia/pcmcia_resource.c
drivers/pcmcia/pd6729.c
drivers/platform/x86/thinkpad_acpi.c
drivers/regulator/core.c
drivers/regulator/max8649.c
drivers/rtc/rtc-ab3100.c
drivers/rtc/rtc-s3c.c
drivers/s390/net/ctcm_main.c
drivers/serial/mfd.c
drivers/serial/mrst_max3110.c
drivers/spi/spi.c
drivers/spi/spi_gpio.c
drivers/spi/spi_mpc8xxx.c
drivers/staging/ti-st/st.h
drivers/staging/ti-st/st_core.c
drivers/staging/ti-st/st_core.h
drivers/staging/ti-st/st_kim.c
drivers/usb/core/Kconfig
drivers/usb/core/file.c
drivers/usb/core/message.c
drivers/usb/musb/cppi_dma.c
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/musb_gadget.h
drivers/usb/musb/musb_gadget_ep0.c
drivers/usb/musb/musb_host.c
drivers/vhost/net.c
drivers/vhost/vhost.c
drivers/video/console/fbcon.c
drivers/video/efifb.c
drivers/video/pxa168fb.c
drivers/video/sis/sis_main.c
drivers/xen/xenbus/xenbus_probe.c
fs/aio.c
fs/cifs/cifssmb.c
fs/cifs/inode.c
fs/compat.c
fs/fs-writeback.c
fs/fuse/dev.c
fs/ocfs2/acl.c
fs/ocfs2/cluster/tcp.c
fs/ocfs2/dir.c
fs/ocfs2/dlm/dlmcommon.h
fs/ocfs2/dlm/dlmdebug.c
fs/ocfs2/dlm/dlmdomain.c
fs/ocfs2/dlm/dlmmaster.c
fs/ocfs2/dlmglue.h
fs/ocfs2/ocfs2_fs.h
fs/ocfs2/ocfs2_ioctl.h
fs/ocfs2/refcounttree.c
fs/ocfs2/reservations.c
fs/ocfs2/suballoc.c
fs/ocfs2/symlink.c
fs/ocfs2/xattr.c
fs/proc/base.c
fs/proc/task_mmu.c
fs/proc/vmcore.c
fs/reiserfs/ioctl.c
fs/xfs/xfs_log_cil.c
fs/xfs/xfs_log_priv.h
include/acpi/acpixf.h
include/asm-generic/hardirq.h
include/asm-generic/vmlinux.lds.h
include/drm/drmP.h
include/drm/drm_pciids.h
include/linux/cpuidle.h
include/linux/dma-mapping.h
include/linux/dmaengine.h
include/linux/dynamic_debug.h
include/linux/ftrace_event.h
include/linux/interrupt.h
include/linux/jump_label.h [new file with mode: 0644]
include/linux/module.h
include/linux/netlink.h
include/linux/pci_ids.h
include/linux/percpu.h
include/linux/perf_event.h
include/linux/rcupdate.h
include/linux/sched.h
include/linux/socket.h
include/linux/stop_machine.h
include/linux/tracepoint.h
include/linux/wait.h
include/net/addrconf.h
include/net/dst.h
include/net/route.h
include/net/xfrm.h
include/trace/events/irq.h
include/trace/events/napi.h
include/trace/events/net.h [new file with mode: 0644]
include/trace/events/power.h
include/trace/events/skb.h
ipc/sem.c
kernel/Makefile
kernel/exit.c
kernel/fork.c
kernel/hw_breakpoint.c
kernel/jump_label.c [new file with mode: 0644]
kernel/kfifo.c
kernel/kprobes.c
kernel/module.c
kernel/perf_event.c
kernel/sched.c
kernel/smp.c
kernel/test_kprobes.c
kernel/trace/Kconfig
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace_event_perf.c
kernel/trace/trace_events.c
kernel/trace/trace_functions_graph.c
kernel/trace/trace_workqueue.c
kernel/tracepoint.c
kernel/watchdog.c
lib/Kconfig.debug
lib/bug.c
lib/dynamic_debug.c
lib/list_sort.c
mm/fremap.c
mm/hugetlb.c
mm/ksm.c
mm/mmap.c
mm/oom_kill.c
mm/percpu.c
mm/rmap.c
mm/vmscan.c
net/8021q/vlan_core.c
net/9p/trans_rdma.c
net/9p/trans_virtio.c
net/atm/br2684.c
net/core/datagram.c
net/core/dev.c
net/core/iovec.c
net/core/net-traces.c
net/core/skbuff.c
net/core/sock.c
net/ipv4/Kconfig
net/ipv4/ip_gre.c
net/ipv4/ip_output.c
net/ipv4/netfilter/ipt_REJECT.c
net/ipv4/netfilter/nf_defrag_ipv4.c
net/ipv4/netfilter/nf_nat_snmp_basic.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_timer.c
net/ipv4/xfrm4_policy.c
net/ipv4/xfrm4_state.c
net/ipv6/addrconf.c
net/ipv6/addrlabel.c
net/ipv6/ip6_output.c
net/ipv6/route.c
net/ipv6/xfrm6_state.c
net/mac80211/rx.c
net/netfilter/nf_conntrack_extend.c
net/netfilter/nf_conntrack_sip.c
net/netfilter/nf_tproxy_core.c
net/phonet/pep.c
net/rds/tcp_connect.c
net/rds/tcp_listen.c
net/rds/tcp_recv.c
net/rds/tcp_send.c
net/rose/af_rose.c
net/sunrpc/xprtsock.c
net/wireless/wext-priv.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
samples/kfifo/dma-example.c
scripts/Makefile
scripts/Makefile.build
scripts/Makefile.lib
scripts/basic/Makefile
scripts/basic/hash.c [deleted file]
scripts/gcc-goto.sh [new file with mode: 0644]
scripts/recordmcount.c [new file with mode: 0644]
scripts/recordmcount.h [new file with mode: 0644]
security/tomoyo/common.c
security/tomoyo/common.h
sound/core/control.c
sound/i2c/other/ak4xxx-adda.c
sound/pci/hda/patch_analog.c
sound/pci/hda/patch_realtek.c
sound/pci/oxygen/oxygen.c
sound/pci/rme9652/hdsp.c
sound/pci/rme9652/hdspm.c
sound/soc/sh/migor.c
sound/soc/soc-cache.c
tools/perf/Documentation/perf-annotate.txt
tools/perf/Documentation/perf-report.txt
tools/perf/Makefile
tools/perf/builtin-annotate.c
tools/perf/builtin-report.c
tools/perf/feature-tests.mak
tools/perf/scripts/python/bin/netdev-times-record [new file with mode: 0644]
tools/perf/scripts/python/bin/netdev-times-report [new file with mode: 0644]
tools/perf/scripts/python/netdev-times.py [new file with mode: 0644]
tools/perf/util/cache.h
tools/perf/util/callchain.c
tools/perf/util/callchain.h
tools/perf/util/hist.c
tools/perf/util/path.c
tools/perf/util/sort.h
tools/perf/util/symbol.c
tools/perf/util/symbol.h
tools/perf/util/trace-event-scripting.c
tools/perf/util/ui/browser.c
tools/perf/util/ui/browser.h
tools/perf/util/ui/browsers/annotate.c
tools/perf/util/ui/browsers/hists.c
tools/perf/util/ui/browsers/map.c
tools/perf/util/ui/util.c
tools/perf/util/util.h
virt/kvm/eventfd.c
virt/kvm/kvm_main.c

diff --git a/CREDITS b/CREDITS
index 72b4878..41d8e63 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -3554,12 +3554,12 @@ E: cvance@nai.com
 D: portions of the Linux Security Module (LSM) framework and security modules
 
 N: Petr Vandrovec
-E: vandrove@vc.cvut.cz
+E: petr@vandrovec.name
 D: Small contributions to ncpfs
 D: Matrox framebuffer driver
-S: Chudenicka 8
-S: 10200 Prague 10, Hostivar
-S: Czech Republic
+S: 21513 Conradia Ct
+S: Cupertino, CA 95014
+S: USA
 
 N: Thibaut Varene
 E: T-Bone@parisc-linux.org
index 1762b81..741fe66 100644 (file)
@@ -542,9 +542,11 @@ Kprobes does not use mutexes or allocate memory except during
 registration and unregistration.
 
 Probe handlers are run with preemption disabled.  Depending on the
-architecture, handlers may also run with interrupts disabled.  In any
-case, your handler should not yield the CPU (e.g., by attempting to
-acquire a semaphore).
+architecture and optimization state, handlers may also run with
+interrupts disabled (e.g., kretprobe handlers and optimized kprobe
+handlers run without interrupt disabled on x86/x86-64).  In any case,
+your handler should not yield the CPU (e.g., by attempting to acquire
+a semaphore).
 
 Since a return probe is implemented by replacing the return
 address with the trampoline's address, stack backtraces and calls
index 50b8148..f46d8e6 100644 (file)
@@ -962,6 +962,13 @@ W: http://www.fluff.org/ben/linux/
 S:     Maintained
 F:     arch/arm/mach-s3c6410/
 
+ARM/S5P ARM ARCHITECTURES
+M:     Kukjin Kim <kgene.kim@samsung.com>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
+S:     Maintained
+F:     arch/arm/mach-s5p*/
+
 ARM/SHMOBILE ARM ARCHITECTURE
 M:     Paul Mundt <lethal@linux-sh.org>
 M:     Magnus Damm <magnus.damm@gmail.com>
@@ -1220,7 +1227,7 @@ F:        drivers/auxdisplay/
 F:     include/linux/cfag12864b.h
 
 AVR32 ARCHITECTURE
-M:     Haavard Skinnemoen <hskinnemoen@atmel.com>
+M:     Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>
 W:     http://www.atmel.com/products/AVR32/
 W:     http://avr32linux.org/
 W:     http://avrfreaks.net/
@@ -1228,7 +1235,7 @@ S:        Supported
 F:     arch/avr32/
 
 AVR32/AT32AP MACHINE SUPPORT
-M:     Haavard Skinnemoen <hskinnemoen@atmel.com>
+M:     Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>
 S:     Supported
 F:     arch/avr32/mach-at32ap/
 
@@ -2199,6 +2206,12 @@ W:       http://acpi4asus.sf.net
 S:     Maintained
 F:     drivers/platform/x86/eeepc-laptop.c
 
+EFIFB FRAMEBUFFER DRIVER
+L:     linux-fbdev@vger.kernel.org
+M:     Peter Jones <pjones@redhat.com>
+S:     Maintained
+F:     drivers/video/efifb.c
+
 EFS FILESYSTEM
 W:     http://aeschi.ch.eu.org/efs/
 S:     Orphan
@@ -2662,6 +2675,8 @@ M:        Guenter Roeck <guenter.roeck@ericsson.com>
 L:     lm-sensors@lm-sensors.org
 W:     http://www.lm-sensors.org/
 T:     quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
+T:     quilt kernel.org/pub/linux/kernel/people/groeck/linux-staging/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
 S:     Maintained
 F:     Documentation/hwmon/
 F:     drivers/hwmon/
@@ -3773,9 +3788,8 @@ W:        http://www.syskonnect.com
 S:     Supported
 
 MATROX FRAMEBUFFER DRIVER
-M:     Petr Vandrovec <vandrove@vc.cvut.cz>
 L:     linux-fbdev@vger.kernel.org
-S:     Maintained
+S:     Orphan
 F:     drivers/video/matrox/matroxfb_*
 F:     include/linux/matroxfb.h
 
@@ -3899,10 +3913,8 @@ F:       Documentation/serial/moxa-smartio
 F:     drivers/char/mxser.*
 
 MSI LAPTOP SUPPORT
-M:     Lennart Poettering <mzxreary@0pointer.de>
+M:     Lee, Chun-Yi <jlee@novell.com>
 L:     platform-driver-x86@vger.kernel.org
-W:     https://tango.0pointer.de/mailman/listinfo/s270-linux
-W:     http://0pointer.de/lennart/tchibo.html
 S:     Maintained
 F:     drivers/platform/x86/msi-laptop.c
 
@@ -3919,8 +3931,10 @@ S:       Supported
 F:     drivers/mfd/
 
 MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM
-S:     Orphan
+M:     Chris Ball <cjb@laptop.org>
 L:     linux-mmc@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc.git
+S:     Maintained
 F:     drivers/mmc/
 F:     include/linux/mmc/
 
@@ -3962,8 +3976,8 @@ S:        Maintained
 F:     drivers/net/natsemi.c
 
 NCP FILESYSTEM
-M:     Petr Vandrovec <vandrove@vc.cvut.cz>
-S:     Maintained
+M:     Petr Vandrovec <petr@vandrovec.name>
+S:     Odd Fixes
 F:     fs/ncpfs/
 
 NCR DUAL 700 SCSI DRIVER (MICROCHANNEL)
@@ -5091,8 +5105,10 @@ S:       Maintained
 F:     drivers/mmc/host/sdricoh_cs.c
 
 SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) DRIVER
-S:     Orphan
+M:     Chris Ball <cjb@laptop.org>
 L:     linux-mmc@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc.git
+S:     Maintained
 F:     drivers/mmc/host/sdhci.*
 
 SECURE DIGITAL HOST CONTROLLER INTERFACE, OPEN FIRMWARE BINDINGS (SDHCI-OF)
index 3133a57..c2362c2 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 36
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc7
 NAME = Sheep on Meth
 
 # *DOCUMENTATION*
@@ -568,6 +568,12 @@ endif
 
 ifdef CONFIG_FUNCTION_TRACER
 KBUILD_CFLAGS  += -pg
+ifdef CONFIG_DYNAMIC_FTRACE
+       ifdef CONFIG_HAVE_C_RECORDMCOUNT
+               BUILD_C_RECORDMCOUNT := y
+               export BUILD_C_RECORDMCOUNT
+       endif
+endif
 endif
 
 # We trigger additional mismatches with less inlining
@@ -591,6 +597,11 @@ KBUILD_CFLAGS      += $(call cc-option,-fno-strict-overflow)
 # conserve stack if available
 KBUILD_CFLAGS   += $(call cc-option,-fconserve-stack)
 
+# check for 'asm goto'
+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y)
+       KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
+endif
+
 # Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments
 # But warn user when we do so
 warn-assign = \
index fe48fc7..53d7f61 100644 (file)
@@ -158,4 +158,7 @@ config HAVE_PERF_EVENTS_NMI
          subsystem.  Also has support for calculating CPU cycle events
          to determine how many clock cycles in a given period.
 
+config HAVE_ARCH_JUMP_LABEL
+       bool
+
 source "kernel/gcov/Kconfig"
index ab1ee0a..6d159ce 100644 (file)
@@ -73,8 +73,6 @@
        ldq     $20, HAE_REG($19);      \
        stq     $21, HAE_CACHE($19);    \
        stq     $21, 0($20);            \
-       ldq     $0, 0($sp);             \
-       ldq     $1, 8($sp);             \
 99:;                                   \
        ldq     $19, 72($sp);           \
        ldq     $20, 80($sp);           \
@@ -316,7 +314,7 @@ ret_from_sys_call:
        cmovne  $26, 0, $19             /* $19 = 0 => non-restartable */
        ldq     $0, SP_OFF($sp)
        and     $0, 8, $0
-       beq     $0, restore_all
+       beq     $0, ret_to_kernel
 ret_to_user:
        /* Make sure need_resched and sigpending don't change between
                sampling and the rti.  */
@@ -329,6 +327,11 @@ restore_all:
        RESTORE_ALL
        call_pal PAL_rti
 
+ret_to_kernel:
+       lda     $16, 7
+       call_pal PAL_swpipl
+       br restore_all
+
        .align 3
 $syscall_error:
        /*
@@ -657,7 +660,7 @@ kernel_thread:
        /* We don't actually care for a3 success widgetry in the kernel.
           Not for positive errno values.  */
        stq     $0, 0($sp)              /* $0 */
-       br      restore_all
+       br      ret_to_kernel
 .end kernel_thread
 
 /*
@@ -912,15 +915,6 @@ sys_execve:
 .end sys_execve
 
        .align  4
-       .globl  osf_sigprocmask
-       .ent    osf_sigprocmask
-osf_sigprocmask:
-       .prologue 0
-       mov     $sp, $18
-       jmp     $31, sys_osf_sigprocmask
-.end osf_sigprocmask
-
-       .align  4
        .globl  alpha_ni_syscall
        .ent    alpha_ni_syscall
 alpha_ni_syscall:
index 85d8e4f..1cc4968 100644 (file)
@@ -307,7 +307,7 @@ again:
                             new_raw_count) != prev_raw_count)
                goto again;
 
-       delta = (new_raw_count  - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf;
+       delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf;
 
        /* It is possible on very rare occasions that the PMC has overflowed
         * but the interrupt is yet to come.  Detect and fix this situation.
@@ -402,14 +402,13 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc)
                struct hw_perf_event *hwc = &pe->hw;
                int idx = hwc->idx;
 
-               if (cpuc->current_idx[j] != PMC_NO_INDEX) {
-                       cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
-                       continue;
+               if (cpuc->current_idx[j] == PMC_NO_INDEX) {
+                       alpha_perf_event_set_period(pe, hwc, idx);
+                       cpuc->current_idx[j] = idx;
                }
 
-               alpha_perf_event_set_period(pe, hwc, idx);
-               cpuc->current_idx[j] = idx;
-               cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
+               if (!(hwc->state & PERF_HES_STOPPED))
+                       cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
        }
        cpuc->config = cpuc->event[0]->hw.config_base;
 }
@@ -420,12 +419,13 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc)
  *  - this function is called from outside this module via the pmu struct
  *    returned from perf event initialisation.
  */
-static int alpha_pmu_enable(struct perf_event *event)
+static int alpha_pmu_add(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       struct hw_perf_event *hwc = &event->hw;
        int n0;
        int ret;
-       unsigned long flags;
+       unsigned long irq_flags;
 
        /*
         * The Sparc code has the IRQ disable first followed by the perf
@@ -435,8 +435,8 @@ static int alpha_pmu_enable(struct perf_event *event)
         * nevertheless we disable the PMCs first to enable a potential
         * final PMI to occur before we disable interrupts.
         */
-       perf_disable();
-       local_irq_save(flags);
+       perf_pmu_disable(event->pmu);
+       local_irq_save(irq_flags);
 
        /* Default to error to be returned */
        ret = -EAGAIN;
@@ -455,8 +455,12 @@ static int alpha_pmu_enable(struct perf_event *event)
                }
        }
 
-       local_irq_restore(flags);
-       perf_enable();
+       hwc->state = PERF_HES_UPTODATE;
+       if (!(flags & PERF_EF_START))
+               hwc->state |= PERF_HES_STOPPED;
+
+       local_irq_restore(irq_flags);
+       perf_pmu_enable(event->pmu);
 
        return ret;
 }
@@ -467,15 +471,15 @@ static int alpha_pmu_enable(struct perf_event *event)
  *  - this function is called from outside this module via the pmu struct
  *    returned from perf event initialisation.
  */
-static void alpha_pmu_disable(struct perf_event *event)
+static void alpha_pmu_del(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
-       unsigned long flags;
+       unsigned long irq_flags;
        int j;
 
-       perf_disable();
-       local_irq_save(flags);
+       perf_pmu_disable(event->pmu);
+       local_irq_save(irq_flags);
 
        for (j = 0; j < cpuc->n_events; j++) {
                if (event == cpuc->event[j]) {
@@ -501,8 +505,8 @@ static void alpha_pmu_disable(struct perf_event *event)
                }
        }
 
-       local_irq_restore(flags);
-       perf_enable();
+       local_irq_restore(irq_flags);
+       perf_pmu_enable(event->pmu);
 }
 
 
@@ -514,13 +518,44 @@ static void alpha_pmu_read(struct perf_event *event)
 }
 
 
-static void alpha_pmu_unthrottle(struct perf_event *event)
+static void alpha_pmu_stop(struct perf_event *event, int flags)
+{
+       struct hw_perf_event *hwc = &event->hw;
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+       if (!(hwc->state & PERF_HES_STOPPED)) {
+               cpuc->idx_mask &= ~(1UL<<hwc->idx);
+               hwc->state |= PERF_HES_STOPPED;
+       }
+
+       if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
+               alpha_perf_event_update(event, hwc, hwc->idx, 0);
+               hwc->state |= PERF_HES_UPTODATE;
+       }
+
+       if (cpuc->enabled)
+               wrperfmon(PERFMON_CMD_DISABLE, (1UL<<hwc->idx));
+}
+
+
+static void alpha_pmu_start(struct perf_event *event, int flags)
 {
        struct hw_perf_event *hwc = &event->hw;
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
+       if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
+               return;
+
+       if (flags & PERF_EF_RELOAD) {
+               WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+               alpha_perf_event_set_period(event, hwc, hwc->idx);
+       }
+
+       hwc->state = 0;
+
        cpuc->idx_mask |= 1UL<<hwc->idx;
-       wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx));
+       if (cpuc->enabled)
+               wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx));
 }
 
 
@@ -642,39 +677,36 @@ static int __hw_perf_event_init(struct perf_event *event)
        return 0;
 }
 
-static const struct pmu pmu = {
-       .enable         = alpha_pmu_enable,
-       .disable        = alpha_pmu_disable,
-       .read           = alpha_pmu_read,
-       .unthrottle     = alpha_pmu_unthrottle,
-};
-
-
 /*
  * Main entry point to initialise a HW performance event.
  */
-const struct pmu *hw_perf_event_init(struct perf_event *event)
+static int alpha_pmu_event_init(struct perf_event *event)
 {
        int err;
 
+       switch (event->attr.type) {
+       case PERF_TYPE_RAW:
+       case PERF_TYPE_HARDWARE:
+       case PERF_TYPE_HW_CACHE:
+               break;
+
+       default:
+               return -ENOENT;
+       }
+
        if (!alpha_pmu)
-               return ERR_PTR(-ENODEV);
+               return -ENODEV;
 
        /* Do the real initialisation work. */
        err = __hw_perf_event_init(event);
 
-       if (err)
-               return ERR_PTR(err);
-
-       return &pmu;
+       return err;
 }
 
-
-
 /*
  * Main entry point - enable HW performance counters.
  */
-void hw_perf_enable(void)
+static void alpha_pmu_enable(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
@@ -700,7 +732,7 @@ void hw_perf_enable(void)
  * Main entry point - disable HW performance counters.
  */
 
-void hw_perf_disable(void)
+static void alpha_pmu_disable(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
@@ -713,6 +745,17 @@ void hw_perf_disable(void)
        wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
 }
 
+static struct pmu pmu = {
+       .pmu_enable     = alpha_pmu_enable,
+       .pmu_disable    = alpha_pmu_disable,
+       .event_init     = alpha_pmu_event_init,
+       .add            = alpha_pmu_add,
+       .del            = alpha_pmu_del,
+       .start          = alpha_pmu_start,
+       .stop           = alpha_pmu_stop,
+       .read           = alpha_pmu_read,
+};
+
 
 /*
  * Main entry point - don't know when this is called but it
@@ -766,7 +809,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
        wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
 
        /* la_ptr is the counter that overflowed. */
-       if (unlikely(la_ptr >= perf_max_events)) {
+       if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) {
                /* This should never occur! */
                irq_err_count++;
                pr_warning("PMI: silly index %ld\n", la_ptr);
@@ -807,7 +850,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
                        /* Interrupts coming too quickly; "throttle" the
                         * counter, i.e., disable it for a little while.
                         */
-                       cpuc->idx_mask &= ~(1UL<<idx);
+                       alpha_pmu_stop(event, 0);
                }
        }
        wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
@@ -837,6 +880,7 @@ void __init init_hw_perf_events(void)
 
        /* And set up PMU specification */
        alpha_pmu = &ev67_pmu;
-       perf_max_events = alpha_pmu->num_pmcs;
+
+       perf_pmu_register(&pmu);
 }
 
index 842dba3..3ec3506 100644 (file)
@@ -356,7 +356,7 @@ dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti)
        dest[27] = pt->r27;
        dest[28] = pt->r28;
        dest[29] = pt->gp;
-       dest[30] = rdusp();
+       dest[30] = ti == current_thread_info() ? rdusp() : ti->pcb.usp;
        dest[31] = pt->pc;
 
        /* Once upon a time this was the PS value.  Which is stupid
index 0f6b51a..6f7feb5 100644 (file)
@@ -41,46 +41,20 @@ static void do_signal(struct pt_regs *, struct switch_stack *,
 /*
  * The OSF/1 sigprocmask calling sequence is different from the
  * C sigprocmask() sequence..
- *
- * how:
- * 1 - SIG_BLOCK
- * 2 - SIG_UNBLOCK
- * 3 - SIG_SETMASK
- *
- * We change the range to -1 .. 1 in order to let gcc easily
- * use the conditional move instructions.
- *
- * Note that we don't need to acquire the kernel lock for SMP
- * operation, as all of this is local to this thread.
  */
-SYSCALL_DEFINE3(osf_sigprocmask, int, how, unsigned long, newmask,
-               struct pt_regs *, regs)
+SYSCALL_DEFINE2(osf_sigprocmask, int, how, unsigned long, newmask)
 {
-       unsigned long oldmask = -EINVAL;
-
-       if ((unsigned long)how-1 <= 2) {
-               long sign = how-2;              /* -1 .. 1 */
-               unsigned long block, unblock;
-
-               newmask &= _BLOCKABLE;
-               spin_lock_irq(&current->sighand->siglock);
-               oldmask = current->blocked.sig[0];
-
-               unblock = oldmask & ~newmask;
-               block = oldmask | newmask;
-               if (!sign)
-                       block = unblock;
-               if (sign <= 0)
-                       newmask = block;
-               if (_NSIG_WORDS > 1 && sign > 0)
-                       sigemptyset(&current->blocked);
-               current->blocked.sig[0] = newmask;
-               recalc_sigpending();
-               spin_unlock_irq(&current->sighand->siglock);
-
-               regs->r0 = 0;           /* special no error return */
+       sigset_t oldmask;
+       sigset_t mask;
+       unsigned long res;
+
+       siginitset(&mask, newmask & _BLOCKABLE);
+       res = sigprocmask(how, &mask, &oldmask);
+       if (!res) {
+               force_successful_syscall_return();
+               res = oldmask.sig[0];
        }
-       return oldmask;
+       return res;
 }
 
 SYSCALL_DEFINE3(osf_sigaction, int, sig,
@@ -94,9 +68,9 @@ SYSCALL_DEFINE3(osf_sigaction, int, sig,
                old_sigset_t mask;
                if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
                    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
-                   __get_user(new_ka.sa.sa_flags, &act->sa_flags))
+                   __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
+                   __get_user(mask, &act->sa_mask))
                        return -EFAULT;
-               __get_user(mask, &act->sa_mask);
                siginitset(&new_ka.sa.sa_mask, mask);
                new_ka.ka_restorer = NULL;
        }
@@ -106,9 +80,9 @@ SYSCALL_DEFINE3(osf_sigaction, int, sig,
        if (!ret && oact) {
                if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
                    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
-                   __put_user(old_ka.sa.sa_flags, &oact->sa_flags))
+                   __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
+                   __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
                        return -EFAULT;
-               __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
        }
 
        return ret;
index ce594ef..a6a1de9 100644 (file)
@@ -58,7 +58,7 @@ sys_call_table:
        .quad sys_open                          /* 45 */
        .quad alpha_ni_syscall
        .quad sys_getxgid
-       .quad osf_sigprocmask
+       .quad sys_osf_sigprocmask
        .quad alpha_ni_syscall
        .quad alpha_ni_syscall                  /* 50 */
        .quad sys_acct
index 553b7cf..88c97bc 100644 (file)
@@ -271,7 +271,6 @@ config ARCH_AT91
        bool "Atmel AT91"
        select ARCH_REQUIRE_GPIOLIB
        select HAVE_CLK
-       select ARCH_USES_GETTIMEOFFSET
        help
          This enables support for systems based on the Atmel AT91RM9200,
          AT91SAM9 and AT91CAP9 processors.
@@ -1051,6 +1050,32 @@ config ARM_ERRATA_460075
          ACTLR register. Note that setting specific bits in the ACTLR register
          may not be available in non-secure mode.
 
+config ARM_ERRATA_742230
+       bool "ARM errata: DMB operation may be faulty"
+       depends on CPU_V7 && SMP
+       help
+         This option enables the workaround for the 742230 Cortex-A9
+         (r1p0..r2p2) erratum. Under rare circumstances, a DMB instruction
+         between two write operations may not ensure the correct visibility
+         ordering of the two writes. This workaround sets a specific bit in
+         the diagnostic register of the Cortex-A9 which causes the DMB
+         instruction to behave as a DSB, ensuring the correct behaviour of
+         the two writes.
+
+config ARM_ERRATA_742231
+       bool "ARM errata: Incorrect hazard handling in the SCU may lead to data corruption"
+       depends on CPU_V7 && SMP
+       help
+         This option enables the workaround for the 742231 Cortex-A9
+         (r2p0..r2p2) erratum. Under certain conditions, specific to the
+         Cortex-A9 MPCore micro-architecture, two CPUs working in SMP mode,
+         accessing some data located in the same cache line, may get corrupted
+         data due to bad handling of the address hazard when the line gets
+         replaced from one of the CPUs at the same time as another CPU is
+         accessing it. This workaround sets specific bits in the diagnostic
+         register of the Cortex-A9 which reduces the linefill issuing
+         capabilities of the processor.
+
 config PL310_ERRATA_588369
        bool "Clean & Invalidate maintenance operations do not invalidate clean lines"
        depends on CACHE_L2X0 && ARCH_OMAP4
index b23f6bc..65a7c1c 100644 (file)
@@ -116,5 +116,5 @@ CFLAGS_font.o := -Dstatic=
 $(obj)/font.c: $(FONTC)
        $(call cmd,shipped)
 
-$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile .config
+$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile $(KCONFIG_CONFIG)
        @sed "$(SEDFLAGS)" < $< > $@
index 7974baa..1bec96e 100644 (file)
@@ -271,6 +271,14 @@ int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
                ((dma_addr + size - PHYS_OFFSET) >= SZ_64M);
 }
 
+int dma_set_coherent_mask(struct device *dev, u64 mask)
+{
+       if (mask >= PHYS_OFFSET + SZ_64M - 1)
+               return 0;
+
+       return -EIO;
+}
+
 int __init it8152_pci_setup(int nr, struct pci_sys_data *sys)
 {
        it8152_io.start = IT8152_IO_BASE + 0x12000;
index ab68cf1..e90b167 100644 (file)
@@ -317,6 +317,10 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
 #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
 #define pgprot_dmacoherent(prot) \
        __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE)
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+struct file;
+extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+                                    unsigned long size, pgprot_t vma_prot);
 #else
 #define pgprot_dmacoherent(prot) \
        __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED)
index 1b56082..7885722 100644 (file)
@@ -48,6 +48,8 @@ work_pending:
        beq     no_work_pending
        mov     r0, sp                          @ 'regs'
        mov     r2, why                         @ 'syscall'
+       tst     r1, #_TIF_SIGPENDING            @ delivering a signal?
+       movne   why, #0                         @ prevent further restarts
        bl      do_notify_resume
        b       ret_slow_syscall                @ Check work again
 
index ef3bc33..6cc6521 100644 (file)
@@ -227,46 +227,56 @@ again:
 }
 
 static void
-armpmu_disable(struct perf_event *event)
+armpmu_read(struct perf_event *event)
 {
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
-       int idx = hwc->idx;
-
-       WARN_ON(idx < 0);
-
-       clear_bit(idx, cpuc->active_mask);
-       armpmu->disable(hwc, idx);
-
-       barrier();
 
-       armpmu_event_update(event, hwc, idx);
-       cpuc->events[idx] = NULL;
-       clear_bit(idx, cpuc->used_mask);
+       /* Don't read disabled counters! */
+       if (hwc->idx < 0)
+               return;
 
-       perf_event_update_userpage(event);
+       armpmu_event_update(event, hwc, hwc->idx);
 }
 
 static void
-armpmu_read(struct perf_event *event)
+armpmu_stop(struct perf_event *event, int flags)
 {
        struct hw_perf_event *hwc = &event->hw;
 
-       /* Don't read disabled counters! */
-       if (hwc->idx < 0)
+       if (!armpmu)
                return;
 
-       armpmu_event_update(event, hwc, hwc->idx);
+       /*
+        * ARM pmu always has to update the counter, so ignore
+        * PERF_EF_UPDATE, see comments in armpmu_start().
+        */
+       if (!(hwc->state & PERF_HES_STOPPED)) {
+               armpmu->disable(hwc, hwc->idx);
+               barrier(); /* why? */
+               armpmu_event_update(event, hwc, hwc->idx);
+               hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+       }
 }
 
 static void
-armpmu_unthrottle(struct perf_event *event)
+armpmu_start(struct perf_event *event, int flags)
 {
        struct hw_perf_event *hwc = &event->hw;
 
+       if (!armpmu)
+               return;
+
+       /*
+        * ARM pmu always has to reprogram the period, so ignore
+        * PERF_EF_RELOAD, see the comment below.
+        */
+       if (flags & PERF_EF_RELOAD)
+               WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+
+       hwc->state = 0;
        /*
         * Set the period again. Some counters can't be stopped, so when we
-        * were throttled we simply disabled the IRQ source and the counter
+        * were stopped we simply disabled the IRQ source and the counter
         * may have been left counting. If we don't do this step then we may
         * get an interrupt too soon or *way* too late if the overflow has
         * happened since disabling.
@@ -275,14 +285,33 @@ armpmu_unthrottle(struct perf_event *event)
        armpmu->enable(hwc, hwc->idx);
 }
 
+static void
+armpmu_del(struct perf_event *event, int flags)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       struct hw_perf_event *hwc = &event->hw;
+       int idx = hwc->idx;
+
+       WARN_ON(idx < 0);
+
+       clear_bit(idx, cpuc->active_mask);
+       armpmu_stop(event, PERF_EF_UPDATE);
+       cpuc->events[idx] = NULL;
+       clear_bit(idx, cpuc->used_mask);
+
+       perf_event_update_userpage(event);
+}
+
 static int
-armpmu_enable(struct perf_event *event)
+armpmu_add(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
        int idx;
        int err = 0;
 
+       perf_pmu_disable(event->pmu);
+
        /* If we don't have a space for the counter then finish early. */
        idx = armpmu->get_event_idx(cpuc, hwc);
        if (idx < 0) {
@@ -299,25 +328,19 @@ armpmu_enable(struct perf_event *event)
        cpuc->events[idx] = event;
        set_bit(idx, cpuc->active_mask);
 
-       /* Set the period for the event. */
-       armpmu_event_set_period(event, hwc, idx);
-
-       /* Enable the event. */
-       armpmu->enable(hwc, idx);
+       hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+       if (flags & PERF_EF_START)
+               armpmu_start(event, PERF_EF_RELOAD);
 
        /* Propagate our changes to the userspace mapping. */
        perf_event_update_userpage(event);
 
 out:
+       perf_pmu_enable(event->pmu);
        return err;
 }
 
-static struct pmu pmu = {
-       .enable     = armpmu_enable,
-       .disable    = armpmu_disable,
-       .unthrottle = armpmu_unthrottle,
-       .read       = armpmu_read,
-};
+static struct pmu pmu;
 
 static int
 validate_event(struct cpu_hw_events *cpuc,
@@ -497,20 +520,29 @@ __hw_perf_event_init(struct perf_event *event)
        return err;
 }
 
-const struct pmu *
-hw_perf_event_init(struct perf_event *event)
+static int armpmu_event_init(struct perf_event *event)
 {
        int err = 0;
 
+       switch (event->attr.type) {
+       case PERF_TYPE_RAW:
+       case PERF_TYPE_HARDWARE:
+       case PERF_TYPE_HW_CACHE:
+               break;
+
+       default:
+               return -ENOENT;
+       }
+
        if (!armpmu)
-               return ERR_PTR(-ENODEV);
+               return -ENODEV;
 
        event->destroy = hw_perf_event_destroy;
 
        if (!atomic_inc_not_zero(&active_events)) {
-               if (atomic_read(&active_events) > perf_max_events) {
+               if (atomic_read(&active_events) > armpmu->num_events) {
                        atomic_dec(&active_events);
-                       return ERR_PTR(-ENOSPC);
+                       return -ENOSPC;
                }
 
                mutex_lock(&pmu_reserve_mutex);
@@ -524,17 +556,16 @@ hw_perf_event_init(struct perf_event *event)
        }
 
        if (err)
-               return ERR_PTR(err);
+               return err;
 
        err = __hw_perf_event_init(event);
        if (err)
                hw_perf_event_destroy(event);
 
-       return err ? ERR_PTR(err) : &pmu;
+       return err;
 }
 
-void
-hw_perf_enable(void)
+static void armpmu_enable(struct pmu *pmu)
 {
        /* Enable all of the perf events on hardware. */
        int idx;
@@ -555,13 +586,23 @@ hw_perf_enable(void)
        armpmu->start();
 }
 
-void
-hw_perf_disable(void)
+static void armpmu_disable(struct pmu *pmu)
 {
        if (armpmu)
                armpmu->stop();
 }
 
+static struct pmu pmu = {
+       .pmu_enable     = armpmu_enable,
+       .pmu_disable    = armpmu_disable,
+       .event_init     = armpmu_event_init,
+       .add            = armpmu_add,
+       .del            = armpmu_del,
+       .start          = armpmu_start,
+       .stop           = armpmu_stop,
+       .read           = armpmu_read,
+};
+
 /*
  * ARMv6 Performance counter handling code.
  *
@@ -2939,14 +2980,12 @@ init_hw_perf_events(void)
                        armpmu = &armv6pmu;
                        memcpy(armpmu_perf_cache_map, armv6_perf_cache_map,
                                        sizeof(armv6_perf_cache_map));
-                       perf_max_events = armv6pmu.num_events;
                        break;
                case 0xB020:    /* ARM11mpcore */
                        armpmu = &armv6mpcore_pmu;
                        memcpy(armpmu_perf_cache_map,
                               armv6mpcore_perf_cache_map,
                               sizeof(armv6mpcore_perf_cache_map));
-                       perf_max_events = armv6mpcore_pmu.num_events;
                        break;
                case 0xC080:    /* Cortex-A8 */
                        armv7pmu.id = ARM_PERF_PMU_ID_CA8;
@@ -2958,7 +2997,6 @@ init_hw_perf_events(void)
                        /* Reset PMNC and read the nb of CNTx counters
                            supported */
                        armv7pmu.num_events = armv7_reset_read_pmnc();
-                       perf_max_events = armv7pmu.num_events;
                        break;
                case 0xC090:    /* Cortex-A9 */
                        armv7pmu.id = ARM_PERF_PMU_ID_CA9;
@@ -2970,7 +3008,6 @@ init_hw_perf_events(void)
                        /* Reset PMNC and read the nb of CNTx counters
                            supported */
                        armv7pmu.num_events = armv7_reset_read_pmnc();
-                       perf_max_events = armv7pmu.num_events;
                        break;
                }
        /* Intel CPUs [xscale]. */
@@ -2981,13 +3018,11 @@ init_hw_perf_events(void)
                        armpmu = &xscale1pmu;
                        memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
                                        sizeof(xscale_perf_cache_map));
-                       perf_max_events = xscale1pmu.num_events;
                        break;
                case 2:
                        armpmu = &xscale2pmu;
                        memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
                                        sizeof(xscale_perf_cache_map));
-                       perf_max_events = xscale2pmu.num_events;
                        break;
                }
        }
@@ -2997,9 +3032,10 @@ init_hw_perf_events(void)
                                arm_pmu_names[armpmu->id], armpmu->num_events);
        } else {
                pr_info("no hardware support available\n");
-               perf_max_events = -1;
        }
 
+       perf_pmu_register(&pmu);
+
        return 0;
 }
 arch_initcall(init_hw_perf_events);
@@ -3007,13 +3043,6 @@ arch_initcall(init_hw_perf_events);
 /*
  * Callchain handling code.
  */
-static inline void
-callchain_store(struct perf_callchain_entry *entry,
-               u64 ip)
-{
-       if (entry->nr < PERF_MAX_STACK_DEPTH)
-               entry->ip[entry->nr++] = ip;
-}
 
 /*
  * The registers we're interested in are at the end of the variable
@@ -3045,7 +3074,7 @@ user_backtrace(struct frame_tail *tail,
        if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
                return NULL;
 
-       callchain_store(entry, buftail.lr);
+       perf_callchain_store(entry, buftail.lr);
 
        /*
         * Frame pointers should strictly progress back up the stack
@@ -3057,16 +3086,11 @@ user_backtrace(struct frame_tail *tail,
        return buftail.fp - 1;
 }
 
-static void
-perf_callchain_user(struct pt_regs *regs,
-                   struct perf_callchain_entry *entry)
+void
+perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
        struct frame_tail *tail;
 
-       callchain_store(entry, PERF_CONTEXT_USER);
-
-       if (!user_mode(regs))
-               regs = task_pt_regs(current);
 
        tail = (struct frame_tail *)regs->ARM_fp - 1;
 
@@ -3084,56 +3108,18 @@ callchain_trace(struct stackframe *fr,
                void *data)
 {
        struct perf_callchain_entry *entry = data;
-       callchain_store(entry, fr->pc);
+       perf_callchain_store(entry, fr->pc);
        return 0;
 }
 
-static void
-perf_callchain_kernel(struct pt_regs *regs,
-                     struct perf_callchain_entry *entry)
+void
+perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
        struct stackframe fr;
 
-       callchain_store(entry, PERF_CONTEXT_KERNEL);
        fr.fp = regs->ARM_fp;
        fr.sp = regs->ARM_sp;
        fr.lr = regs->ARM_lr;
        fr.pc = regs->ARM_pc;
        walk_stackframe(&fr, callchain_trace, entry);
 }
-
-static void
-perf_do_callchain(struct pt_regs *regs,
-                 struct perf_callchain_entry *entry)
-{
-       int is_user;
-
-       if (!regs)
-               return;
-
-       is_user = user_mode(regs);
-
-       if (!current || !current->pid)
-               return;
-
-       if (is_user && current->state != TASK_RUNNING)
-               return;
-
-       if (!is_user)
-               perf_callchain_kernel(regs, entry);
-
-       if (current->mm)
-               perf_callchain_user(regs, entry);
-}
-
-static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
-
-struct perf_callchain_entry *
-perf_callchain(struct pt_regs *regs)
-{
-       struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
-
-       entry->nr = 0;
-       perf_do_callchain(regs, entry);
-       return entry;
-}
index 5e71ccd..1276bab 100644 (file)
@@ -426,7 +426,7 @@ static struct i2c_gpio_platform_data pdata_i2c0 = {
        .sda_is_open_drain      = 1,
        .scl_pin                = AT91_PIN_PA21,
        .scl_is_open_drain      = 1,
-       .udelay                 = 2,            /* ~100 kHz */
+       .udelay                 = 5,            /* ~100 kHz */
 };
 
 static struct platform_device at91sam9g45_twi0_device = {
@@ -440,7 +440,7 @@ static struct i2c_gpio_platform_data pdata_i2c1 = {
        .sda_is_open_drain      = 1,
        .scl_pin                = AT91_PIN_PB11,
        .scl_is_open_drain      = 1,
-       .udelay                 = 2,            /* ~100 kHz */
+       .udelay                 = 5,            /* ~100 kHz */
 };
 
 static struct platform_device at91sam9g45_twi1_device = {
index 3d996b6..9be261b 100644 (file)
@@ -769,8 +769,7 @@ static struct map_desc dm355_io_desc[] = {
                .virtual        = SRAM_VIRT,
                .pfn            = __phys_to_pfn(0x00010000),
                .length         = SZ_32K,
-               /* MT_MEMORY_NONCACHED requires supersection alignment */
-               .type           = MT_DEVICE,
+               .type           = MT_MEMORY_NONCACHED,
        },
 };
 
index 6b6f4c6..7781e35 100644 (file)
@@ -969,8 +969,7 @@ static struct map_desc dm365_io_desc[] = {
                .virtual        = SRAM_VIRT,
                .pfn            = __phys_to_pfn(0x00010000),
                .length         = SZ_32K,
-               /* MT_MEMORY_NONCACHED requires supersection alignment */
-               .type           = MT_DEVICE,
+               .type           = MT_MEMORY_NONCACHED,
        },
 };
 
index 40fec31..5e5b0a7 100644 (file)
@@ -653,8 +653,7 @@ static struct map_desc dm644x_io_desc[] = {
                .virtual        = SRAM_VIRT,
                .pfn            = __phys_to_pfn(0x00008000),
                .length         = SZ_16K,
-               /* MT_MEMORY_NONCACHED requires supersection alignment */
-               .type           = MT_DEVICE,
+               .type           = MT_MEMORY_NONCACHED,
        },
 };
 
index e4a3df1..26e8a9c 100644 (file)
@@ -737,8 +737,7 @@ static struct map_desc dm646x_io_desc[] = {
                .virtual        = SRAM_VIRT,
                .pfn            = __phys_to_pfn(0x00010000),
                .length         = SZ_32K,
-               /* MT_MEMORY_NONCACHED requires supersection alignment */
-               .type           = MT_DEVICE,
+               .type           = MT_MEMORY_NONCACHED,
        },
 };
 
index 3b3e472..eb4936f 100644 (file)
@@ -13,8 +13,8 @@
 
 #define IO_SPACE_LIMIT         0xffffffff
 
-#define __io(a)  ((void __iomem *)(((a) - DOVE_PCIE0_IO_PHYS_BASE) +\
-                                  DOVE_PCIE0_IO_VIRT_BASE))
-#define __mem_pci(a)           (a)
+#define __io(a)        ((void __iomem *)(((a) - DOVE_PCIE0_IO_BUS_BASE) + \
+                                                DOVE_PCIE0_IO_VIRT_BASE))
+#define __mem_pci(a)   (a)
 
 #endif
index 61cd4d6..24498a9 100644 (file)
@@ -503,6 +503,14 @@ struct pci_bus * __devinit ixp4xx_scan_bus(int nr, struct pci_sys_data *sys)
        return pci_scan_bus(sys->busnr, &ixp4xx_ops, sys);
 }
 
+int dma_set_coherent_mask(struct device *dev, u64 mask)
+{
+       if (mask >= SZ_64M - 1)
+               return 0;
+
+       return -EIO;
+}
+
 EXPORT_SYMBOL(ixp4xx_pci_read);
 EXPORT_SYMBOL(ixp4xx_pci_write);
 
index f91ca6d..8138371 100644 (file)
@@ -26,6 +26,8 @@
 #define PCIBIOS_MAX_MEM                0x4BFFFFFF
 #endif
 
+#define ARCH_HAS_DMA_SET_COHERENT_MASK
+
 #define pcibios_assign_all_busses()    1
 
 /* Register locations and bits */
index 93fc2ec..6e924b3 100644 (file)
@@ -38,7 +38,7 @@
 
 #define KIRKWOOD_PCIE1_IO_PHYS_BASE    0xf3000000
 #define KIRKWOOD_PCIE1_IO_VIRT_BASE    0xfef00000
-#define KIRKWOOD_PCIE1_IO_BUS_BASE     0x00000000
+#define KIRKWOOD_PCIE1_IO_BUS_BASE     0x00100000
 #define KIRKWOOD_PCIE1_IO_SIZE         SZ_1M
 
 #define KIRKWOOD_PCIE_IO_PHYS_BASE     0xf2000000
index 55e7f00..513ad31 100644 (file)
@@ -117,7 +117,7 @@ static void __init pcie0_ioresources_init(struct pcie_port *pp)
         * IORESOURCE_IO
         */
        pp->res[0].name = "PCIe 0 I/O Space";
-       pp->res[0].start = KIRKWOOD_PCIE_IO_PHYS_BASE;
+       pp->res[0].start = KIRKWOOD_PCIE_IO_BUS_BASE;
        pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE_IO_SIZE - 1;
        pp->res[0].flags = IORESOURCE_IO;
 
@@ -139,7 +139,7 @@ static void __init pcie1_ioresources_init(struct pcie_port *pp)
         * IORESOURCE_IO
         */
        pp->res[0].name = "PCIe 1 I/O Space";
-       pp->res[0].start = KIRKWOOD_PCIE1_IO_PHYS_BASE;
+       pp->res[0].start = KIRKWOOD_PCIE1_IO_BUS_BASE;
        pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE1_IO_SIZE - 1;
        pp->res[0].flags = IORESOURCE_IO;
 
index 4f5b0e0..1a8a25e 100644 (file)
@@ -9,6 +9,8 @@
 #ifndef __ASM_MACH_SYSTEM_H
 #define __ASM_MACH_SYSTEM_H
 
+#include <mach/cputype.h>
+
 static inline void arch_idle(void)
 {
        cpu_do_idle();
@@ -16,6 +18,9 @@ static inline void arch_idle(void)
 
 static inline void arch_reset(char mode, const char *cmd)
 {
-       cpu_reset(0);
+       if (cpu_is_pxa168())
+               cpu_reset(0xffff0000);
+       else
+               cpu_reset(0);
 }
 #endif /* __ASM_MACH_SYSTEM_H */
index 50d5939..58093d9 100644 (file)
@@ -312,8 +312,7 @@ static int pxa_set_target(struct cpufreq_policy *policy,
        freqs.cpu = policy->cpu;
 
        if (freq_debug)
-               pr_debug(KERN_INFO "Changing CPU frequency to %d Mhz, "
-                        "(SDRAM %d Mhz)\n",
+               pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n",
                         freqs.new / 1000, (pxa_freq_settings[idx].div2) ?
                         (new_freq_mem / 2000) : (new_freq_mem / 1000));
 
index 7f64d24..814f145 100644 (file)
  * <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x
  * == 0x3 for pxa300/pxa310/pxa320
  */
+#if defined(CONFIG_PXA25x) || defined(CONFIG_PXA27x)
 #define __cpu_is_pxa2xx(id)                            \
        ({                                              \
                unsigned int _id = (id) >> 13 & 0x7;    \
                _id <= 0x2;                             \
         })
+#else
+#define __cpu_is_pxa2xx(id)    (0)
+#endif
 
+#ifdef CONFIG_PXA3xx
 #define __cpu_is_pxa3xx(id)                            \
        ({                                              \
                unsigned int _id = (id) >> 13 & 0x7;    \
                _id == 0x3;                             \
         })
+#else
+#define __cpu_is_pxa3xx(id)    (0)
+#endif
 
+#if defined(CONFIG_CPU_PXA930) || defined(CONFIG_CPU_PXA935)
 #define __cpu_is_pxa93x(id)                            \
        ({                                              \
                unsigned int _id = (id) >> 4 & 0xfff;   \
                _id == 0x683 || _id == 0x693;           \
         })
+#else
+#define __cpu_is_pxa93x(id)    (0)
+#endif
 
 #define cpu_is_pxa2xx()                                        \
        ({                                              \
@@ -309,7 +321,7 @@ extern unsigned long get_clock_tick_rate(void);
 #define PCIBIOS_MIN_IO         0
 #define PCIBIOS_MIN_MEM                0
 #define pcibios_assign_all_busses()    1
+#define ARCH_HAS_DMA_SET_COHERENT_MASK
 #endif
 
-
 #endif  /* _ASM_ARCH_HARDWARE_H */
index 262691f..fdca3be 100644 (file)
@@ -6,6 +6,8 @@
 #ifndef __ASM_ARM_ARCH_IO_H
 #define __ASM_ARM_ARCH_IO_H
 
+#include <mach/hardware.h>
+
 #define IO_SPACE_LIMIT 0xffffffff
 
 /*
index 77ad6d3..405b92a 100644 (file)
@@ -469,9 +469,13 @@ static struct i2c_board_info __initdata palm27x_pi2c_board_info[] = {
        },
 };
 
+static struct i2c_pxa_platform_data palm27x_i2c_power_info = {
+       .use_pio        = 1,
+};
+
 void __init palm27x_pmic_init(void)
 {
        i2c_register_board_info(1, ARRAY_AND_SIZE(palm27x_pi2c_board_info));
-       pxa27x_set_i2c_power_info(NULL);
+       pxa27x_set_i2c_power_info(&palm27x_i2c_power_info);
 }
 #endif
index c9b747c..37d6173 100644 (file)
@@ -240,6 +240,7 @@ static void __init vpac270_onenand_init(void) {}
 #if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE)
 static struct pxamci_platform_data vpac270_mci_platform_data = {
        .ocr_mask               = MMC_VDD_32_33 | MMC_VDD_33_34,
+       .gpio_power             = -1,
        .gpio_card_detect       = GPIO53_VPAC270_SD_DETECT_N,
        .gpio_card_ro           = GPIO52_VPAC270_SD_READONLY,
        .detect_delay_ms        = 200,
index 7b1fc98..d5a71ab 100644 (file)
@@ -273,6 +273,9 @@ extern void gpio_pullup(unsigned gpio, int value);
 extern int gpio_get_value(unsigned gpio);
 extern void gpio_set_value(unsigned gpio, int value);
 
+#define gpio_get_value_cansleep gpio_get_value
+#define gpio_set_value_cansleep gpio_set_value
+
 /* wrappers to sleep-enable the previous two functions */
 static inline unsigned gpio_to_irq(unsigned gpio)
 {
index 577df6c..efb1270 100644 (file)
@@ -227,7 +227,13 @@ static void ct_ca9x4_init(void)
        int i;
 
 #ifdef CONFIG_CACHE_L2X0
-       l2x0_init(MMIO_P2V(CT_CA9X4_L2CC), 0x00000000, 0xfe0fffff);
+       void __iomem *l2x0_base = MMIO_P2V(CT_CA9X4_L2CC);
+
+       /* set RAM latencies to 1 cycle for this core tile. */
+       writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL);
+       writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL);
+
+       l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff);
 #endif
 
        clkdev_add_table(lookups, ARRAY_SIZE(lookups));
index d073b64..724ba3b 100644 (file)
@@ -885,8 +885,23 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 
        if (ai_usermode & UM_SIGNAL)
                force_sig(SIGBUS, current);
-       else
-               set_cr(cr_no_alignment);
+       else {
+               /*
+                * We're about to disable the alignment trap and return to
+                * user space.  But if an interrupt occurs before actually
+                * reaching user space, then the IRQ vector entry code will
+                * notice that we were still in kernel space and therefore
+                * the alignment trap won't be re-enabled in that case as it
+                * is presumed to be always on from kernel space.
+                * Let's prevent that race by disabling interrupts here (they
+                * are disabled on the way back to user space anyway in
+                * entry-common.S) and disable the alignment trap only if
+                * there is no work pending for this thread.
+                */
+               raw_local_irq_disable();
+               if (!(current_thread_info()->flags & _TIF_WORK_MASK))
+                       set_cr(cr_no_alignment);
+       }
 
        return 0;
 }
index 6e1c4f6..6a3a2d0 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/nodemask.h>
 #include <linux/memblock.h>
 #include <linux/sort.h>
+#include <linux/fs.h>
 
 #include <asm/cputype.h>
 #include <asm/sections.h>
@@ -246,6 +247,9 @@ static struct mem_type mem_types[] = {
                .domain    = DOMAIN_USER,
        },
        [MT_MEMORY] = {
+               .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+                               L_PTE_USER | L_PTE_EXEC,
+               .prot_l1   = PMD_TYPE_TABLE,
                .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
                .domain    = DOMAIN_KERNEL,
        },
@@ -254,6 +258,9 @@ static struct mem_type mem_types[] = {
                .domain    = DOMAIN_KERNEL,
        },
        [MT_MEMORY_NONCACHED] = {
+               .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+                               L_PTE_USER | L_PTE_EXEC | L_PTE_MT_BUFFERABLE,
+               .prot_l1   = PMD_TYPE_TABLE,
                .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
                .domain    = DOMAIN_KERNEL,
        },
@@ -411,9 +418,12 @@ static void __init build_mem_type_table(void)
         * Enable CPU-specific coherency if supported.
         * (Only available on XSC3 at the moment.)
         */
-       if (arch_is_coherent() && cpu_is_xsc3())
+       if (arch_is_coherent() && cpu_is_xsc3()) {
                mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
-
+               mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
+               mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
+               mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
+       }
        /*
         * ARMv6 and above have extended page tables.
         */
@@ -438,7 +448,9 @@ static void __init build_mem_type_table(void)
                mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
                mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
                mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
+               mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
                mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
+               mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
 #endif
        }
 
@@ -475,6 +487,8 @@ static void __init build_mem_type_table(void)
        mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
        mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
        mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
+       mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
+       mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
        mem_types[MT_ROM].prot_sect |= cp->pmd;
 
        switch (cp->pmd) {
@@ -498,6 +512,19 @@ static void __init build_mem_type_table(void)
        }
 }
 
+#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
+pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+                             unsigned long size, pgprot_t vma_prot)
+{
+       if (!pfn_valid(pfn))
+               return pgprot_noncached(vma_prot);
+       else if (file->f_flags & O_SYNC)
+               return pgprot_writecombine(vma_prot);
+       return vma_prot;
+}
+EXPORT_SYMBOL(phys_mem_access_prot);
+#endif
+
 #define vectors_base() (vectors_high() ? 0xffff0000 : 0)
 
 static void __init *early_alloc(unsigned long sz)
index 6a8506d..7563ff0 100644 (file)
@@ -186,13 +186,14 @@ cpu_v7_name:
  *     It is assumed that:
  *     - cache type register is implemented
  */
-__v7_setup:
+__v7_ca9mp_setup:
 #ifdef CONFIG_SMP
        mrc     p15, 0, r0, c1, c0, 1
        tst     r0, #(1 << 6)                   @ SMP/nAMP mode enabled?
        orreq   r0, r0, #(1 << 6) | (1 << 0)    @ Enable SMP/nAMP mode and
        mcreq   p15, 0, r0, c1, c0, 1           @ TLB ops broadcasting
 #endif
+__v7_setup:
        adr     r12, __v7_setup_stack           @ the local stack
        stmia   r12, {r0-r5, r7, r9, r11, lr}
        bl      v7_flush_dcache_all
@@ -201,11 +202,16 @@ __v7_setup:
        mrc     p15, 0, r0, c0, c0, 0           @ read main ID register
        and     r10, r0, #0xff000000            @ ARM?
        teq     r10, #0x41000000
-       bne     2f
+       bne     3f
        and     r5, r0, #0x00f00000             @ variant
        and     r6, r0, #0x0000000f             @ revision
-       orr     r0, r6, r5, lsr #20-4           @ combine variant and revision
+       orr     r6, r6, r5, lsr #20-4           @ combine variant and revision
+       ubfx    r0, r0, #4, #12                 @ primary part number
 
+       /* Cortex-A8 Errata */
+       ldr     r10, =0x00000c08                @ Cortex-A8 primary part number
+       teq     r0, r10
+       bne     2f
 #ifdef CONFIG_ARM_ERRATA_430973
        teq     r5, #0x00100000                 @ only present in r1p*
        mrceq   p15, 0, r10, c1, c0, 1          @ read aux control register
@@ -213,21 +219,42 @@ __v7_setup:
        mcreq   p15, 0, r10, c1, c0, 1          @ write aux control register
 #endif
 #ifdef CONFIG_ARM_ERRATA_458693
-       teq     r0, #0x20                       @ only present in r2p0
+       teq     r6, #0x20                       @ only present in r2p0
        mrceq   p15, 0, r10, c1, c0, 1          @ read aux control register
        orreq   r10, r10, #(1 << 5)             @ set L1NEON to 1
        orreq   r10, r10, #(1 << 9)             @ set PLDNOP to 1
        mcreq   p15, 0, r10, c1, c0, 1          @ write aux control register
 #endif
 #ifdef CONFIG_ARM_ERRATA_460075
-       teq     r0, #0x20                       @ only present in r2p0
+       teq     r6, #0x20                       @ only present in r2p0
        mrceq   p15, 1, r10, c9, c0, 2          @ read L2 cache aux ctrl register
        tsteq   r10, #1 << 22
        orreq   r10, r10, #(1 << 22)            @ set the Write Allocate disable bit
        mcreq   p15, 1, r10, c9, c0, 2          @ write the L2 cache aux ctrl register
 #endif
+       b       3f
+
+       /* Cortex-A9 Errata */
+2:     ldr     r10, =0x00000c09                @ Cortex-A9 primary part number
+       teq     r0, r10
+       bne     3f
+#ifdef CONFIG_ARM_ERRATA_742230
+       cmp     r6, #0x22                       @ only present up to r2p2
+       mrcle   p15, 0, r10, c15, c0, 1         @ read diagnostic register
+       orrle   r10, r10, #1 << 4               @ set bit #4
+       mcrle   p15, 0, r10, c15, c0, 1         @ write diagnostic register
+#endif
+#ifdef CONFIG_ARM_ERRATA_742231
+       teq     r6, #0x20                       @ present in r2p0
+       teqne   r6, #0x21                       @ present in r2p1
+       teqne   r6, #0x22                       @ present in r2p2
+       mrceq   p15, 0, r10, c15, c0, 1         @ read diagnostic register
+       orreq   r10, r10, #1 << 12              @ set bit #12
+       orreq   r10, r10, #1 << 22              @ set bit #22
+       mcreq   p15, 0, r10, c15, c0, 1         @ write diagnostic register
+#endif
 
-2:     mov     r10, #0
+3:     mov     r10, #0
 #ifdef HARVARD_CACHE
        mcr     p15, 0, r10, c7, c5, 0          @ I+BTB cache invalidate
 #endif
@@ -323,6 +350,29 @@ cpu_elf_name:
 
        .section ".proc.info.init", #alloc, #execinstr
 
+       .type   __v7_ca9mp_proc_info, #object
+__v7_ca9mp_proc_info:
+       .long   0x410fc090              @ Required ID value
+       .long   0xff0ffff0              @ Mask for ID
+       .long   PMD_TYPE_SECT | \
+               PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ | \
+               PMD_FLAGS
+       .long   PMD_TYPE_SECT | \
+               PMD_SECT_XN | \
+               PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ
+       b       __v7_ca9mp_setup
+       .long   cpu_arch_name
+       .long   cpu_elf_name
+       .long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
+       .long   cpu_v7_name
+       .long   v7_processor_functions
+       .long   v7wbi_tlb_fns
+       .long   v6_user_fns
+       .long   v7_cache_fns
+       .size   __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info
+
        /*
         * Match any ARMv7 processor core.
         */
index ea3ca86..aedf9c1 100644 (file)
@@ -1,5 +1,5 @@
 /*
- *  linux/arch/arm/mach-nomadik/timer.c
+ *  linux/arch/arm/plat-nomadik/timer.c
  *
  * Copyright (C) 2008 STMicroelectronics
  * Copyright (C) 2010 Alessandro Rubini
@@ -75,7 +75,7 @@ static void nmdk_clkevt_mode(enum clock_event_mode mode,
                cr = readl(mtu_base + MTU_CR(1));
                writel(0, mtu_base + MTU_LR(1));
                writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(1));
-               writel(0x2, mtu_base + MTU_IMSC);
+               writel(1 << 1, mtu_base + MTU_IMSC);
                break;
        case CLOCK_EVT_MODE_SHUTDOWN:
        case CLOCK_EVT_MODE_UNUSED:
@@ -131,25 +131,23 @@ void __init nmdk_timer_init(void)
 {
        unsigned long rate;
        struct clk *clk0;
-       struct clk *clk1;
-       u32 cr;
+       u32 cr = MTU_CRn_32BITS;
 
        clk0 = clk_get_sys("mtu0", NULL);
        BUG_ON(IS_ERR(clk0));
 
-       clk1 = clk_get_sys("mtu1", NULL);
-       BUG_ON(IS_ERR(clk1));
-
        clk_enable(clk0);
-       clk_enable(clk1);
 
        /*
-        * Tick rate is 2.4MHz for Nomadik and 110MHz for ux500:
-        * use a divide-by-16 counter if it's more than 16MHz
+        * Tick rate is 2.4MHz for Nomadik and 2.4Mhz, 100MHz or 133 MHz
+        * for ux500.
+        * Use a divide-by-16 counter if the tick rate is more than 32MHz.
+        * At 32 MHz, the timer (with 32 bit counter) can be programmed
+        * to wake-up at a max 127s a head in time. Dividing a 2.4 MHz timer
+        * with 16 gives too low timer resolution.
         */
-       cr = MTU_CRn_32BITS;;
        rate = clk_get_rate(clk0);
-       if (rate > 16 << 20) {
+       if (rate > 32000000) {
                rate /= 16;
                cr |= MTU_CRn_PRESCALE_16;
        } else {
@@ -170,15 +168,8 @@ void __init nmdk_timer_init(void)
                pr_err("timer: failed to initialize clock source %s\n",
                       nmdk_clksrc.name);
 
-       /* Timer 1 is used for events, fix according to rate */
-       cr = MTU_CRn_32BITS;
-       rate = clk_get_rate(clk1);
-       if (rate > 16 << 20) {
-               rate /= 16;
-               cr |= MTU_CRn_PRESCALE_16;
-       } else {
-               cr |= MTU_CRn_PRESCALE_1;
-       }
+       /* Timer 1 is used for events */
+
        clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE);
 
        writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */
index e39a417..a92cb49 100644 (file)
@@ -33,7 +33,7 @@ config OMAP_DEBUG_DEVICES
 config OMAP_DEBUG_LEDS
        bool
        depends on OMAP_DEBUG_DEVICES
-       default y if LEDS
+       default y if LEDS_CLASS
 
 config OMAP_RESET_CLOCKS
        bool "Reset unused clocks during boot"
index e31496e..0c8612f 100644 (file)
@@ -156,7 +156,7 @@ static irqreturn_t omap_mcbsp_rx_irq_handler(int irq, void *dev_id)
                /* Writing zero to RSYNC_ERR clears the IRQ */
                MCBSP_WRITE(mcbsp_rx, SPCR1, MCBSP_READ_CACHE(mcbsp_rx, SPCR1));
        } else {
-               complete(&mcbsp_rx->tx_irq_completion);
+               complete(&mcbsp_rx->rx_irq_completion);
        }
 
        return IRQ_HANDLED;
index 226b2e8..10b3b4c 100644 (file)
@@ -220,20 +220,7 @@ void __init omap_map_sram(void)
        if (omap_sram_size == 0)
                return;
 
-       if (cpu_is_omap24xx()) {
-               omap_sram_io_desc[0].virtual = OMAP2_SRAM_VA;
-
-               base = OMAP2_SRAM_PA;
-               base = ROUND_DOWN(base, PAGE_SIZE);
-               omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
-       }
-
        if (cpu_is_omap34xx()) {
-               omap_sram_io_desc[0].virtual = OMAP3_SRAM_VA;
-               base = OMAP3_SRAM_PA;
-               base = ROUND_DOWN(base, PAGE_SIZE);
-               omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
-
                /*
                 * SRAM must be marked as non-cached on OMAP3 since the
                 * CORE DPLL M2 divider change code (in SRAM) runs with the
@@ -244,13 +231,11 @@ void __init omap_map_sram(void)
                omap_sram_io_desc[0].type = MT_MEMORY_NONCACHED;
        }
 
-       if (cpu_is_omap44xx()) {
-               omap_sram_io_desc[0].virtual = OMAP4_SRAM_VA;
-               base = OMAP4_SRAM_PA;
-               base = ROUND_DOWN(base, PAGE_SIZE);
-               omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
-       }
-       omap_sram_io_desc[0].length = 1024 * 1024;      /* Use section desc */
+       omap_sram_io_desc[0].virtual = omap_sram_base;
+       base = omap_sram_start;
+       base = ROUND_DOWN(base, PAGE_SIZE);
+       omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
+       omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size, PAGE_SIZE);
        iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc));
 
        printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n",
index 98f94d0..a727f54 100644 (file)
@@ -314,10 +314,9 @@ int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
        vfree(module->arch.syminfo);
        module->arch.syminfo = NULL;
 
-       return module_bug_finalize(hdr, sechdrs, module);
+       return 0;
 }
 
 void module_arch_cleanup(struct module *module)
 {
-       module_bug_cleanup(module);
 }
index 0865e29..db4953d 100644 (file)
@@ -112,10 +112,9 @@ int module_finalize(const Elf_Ehdr *hdr,
                    const Elf_Shdr *sechdrs,
                    struct module *me)
 {
-       return module_bug_finalize(hdr, sechdrs, me);
+       return 0;
 }
 
 void module_arch_cleanup(struct module *mod)
 {
-       module_bug_cleanup(mod);
 }
index 9c1acb2..b2eeb0d 100644 (file)
@@ -157,7 +157,6 @@ typedef struct sigaltstack {
 #undef __HAVE_ARCH_SIG_BITOPS
 
 struct pt_regs;
-extern int do_signal(struct pt_regs *regs, sigset_t *oldset);
 
 #define ptrace_signal_deliver(regs, cookie)    do { } while (0)
 
index 7612577..c705456 100644 (file)
 #define __ARCH_WANT_SYS_OLD_GETRLIMIT /*will be unused*/
 #define __ARCH_WANT_SYS_OLDUMOUNT
 #define __ARCH_WANT_SYS_RT_SIGACTION
+#define __ARCH_WANT_SYS_RT_SIGSUSPEND
 
 #define __IGNORE_lchown
 #define __IGNORE_setuid
index 4038698..225412b 100644 (file)
@@ -235,10 +235,9 @@ work_resched:
 work_notifysig:                                ; deal with pending signals and
                                        ; notify-resume requests
        mv      r0, sp                  ; arg1 : struct pt_regs *regs
-       ldi     r1, #0                  ; arg2 : sigset_t *oldset
-       mv      r2, r9                  ; arg3 : __u32 thread_info_flags
+       mv      r1, r9                  ; arg2 : __u32 thread_info_flags
        bl      do_notify_resume
-       bra     restore_all
+       bra     resume_userspace
 
        ; perform syscall exit tracing
        ALIGN
index e555091..0021ade 100644 (file)
@@ -592,16 +592,17 @@ void user_enable_single_step(struct task_struct *child)
 
        if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
            != sizeof(insn))
-               break;
+               return -EIO;
 
        compute_next_pc(insn, pc, &next_pc, child);
        if (next_pc & 0x80000000)
-               break;
+               return -EIO;
 
        if (embed_debug_trap(child, next_pc))
-               break;
+               return -EIO;
 
        invalidate_cache();
+       return 0;
 }
 
 void user_disable_single_step(struct task_struct *child)
index 144b0f1..7bbe386 100644 (file)
 
 #define DEBUG_SIG 0
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
-int do_signal(struct pt_regs *, sigset_t *);
-
-asmlinkage int
-sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize,
-                 unsigned long r2, unsigned long r3, unsigned long r4,
-                 unsigned long r5, unsigned long r6, struct pt_regs *regs)
-{
-       sigset_t newset;
-
-       /* XXX: Don't preclude handling different sized sigset_t's.  */
-       if (sigsetsize != sizeof(sigset_t))
-               return -EINVAL;
-
-       if (copy_from_user(&newset, unewset, sizeof(newset)))
-               return -EFAULT;
-       sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
-
-       spin_lock_irq(&current->sighand->siglock);
-       current->saved_sigmask = current->blocked;
-       current->blocked = newset;
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
-
-       current->state = TASK_INTERRUPTIBLE;
-       schedule();
-       set_thread_flag(TIF_RESTORE_SIGMASK);
-       return -ERESTARTNOHAND;
-}
-
 asmlinkage int
 sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
                unsigned long r2, unsigned long r3, unsigned long r4,
@@ -218,7 +187,7 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
        return (void __user *)((sp - frame_size) & -8ul);
 }
 
-static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
                           sigset_t *set, struct pt_regs *regs)
 {
        struct rt_sigframe __user *frame;
@@ -275,22 +244,34 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
                current->comm, current->pid, frame, regs->pc);
 #endif
 
-       return;
+       return 0;
 
 give_sigsegv:
        force_sigsegv(sig, current);
+       return -EFAULT;
+}
+
+static int prev_insn(struct pt_regs *regs)
+{
+       u16 inst;
+       if (get_user(&inst, (u16 __user *)(regs->bpc - 2)))
+               return -EFAULT;
+       if ((inst & 0xfff0) == 0x10f0)  /* trap ? */
+               regs->bpc -= 2;
+       else
+               regs->bpc -= 4;
+       regs->syscall_nr = -1;
+       return 0;
 }
 
 /*
  * OK, we're invoking a handler
  */
 
-static void
+static int
 handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
              sigset_t *oldset, struct pt_regs *regs)
 {
-       unsigned short inst;
-
        /* Are we from a system call? */
        if (regs->syscall_nr >= 0) {
                /* If so, check system call restarting.. */
@@ -308,16 +289,14 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
                        /* fallthrough */
                        case -ERESTARTNOINTR:
                                regs->r0 = regs->orig_r0;
-                               inst = *(unsigned short *)(regs->bpc - 2);
-                               if ((inst & 0xfff0) == 0x10f0)  /* trap ? */
-                                       regs->bpc -= 2;
-                               else
-                                       regs->bpc -= 4;
+                               if (prev_insn(regs) < 0)
+                                       return -EFAULT;
                }
        }
 
        /* Set up the stack frame */
-       setup_rt_frame(sig, ka, info, oldset, regs);
+       if (setup_rt_frame(sig, ka, info, oldset, regs))
+               return -EFAULT;
 
        spin_lock_irq(&current->sighand->siglock);
        sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
@@ -325,6 +304,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
                sigaddset(&current->blocked,sig);
        recalc_sigpending();
        spin_unlock_irq(&current->sighand->siglock);
+       return 0;
 }
 
 /*
@@ -332,12 +312,12 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
  * want to handle. Thus you cannot kill init even with a SIGKILL even by
  * mistake.
  */
-int do_signal(struct pt_regs *regs, sigset_t *oldset)
+static void do_signal(struct pt_regs *regs)
 {
        siginfo_t info;
        int signr;
        struct k_sigaction ka;
-       unsigned short inst;
+       sigset_t *oldset;
 
        /*
         * We want the common case to go fast, which
@@ -346,12 +326,14 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
         * if so.
         */
        if (!user_mode(regs))
-               return 1;
+               return;
 
        if (try_to_freeze()) 
                goto no_signal;
 
-       if (!oldset)
+       if (test_thread_flag(TIF_RESTORE_SIGMASK))
+               oldset = &current->saved_sigmask;
+       else
                oldset = &current->blocked;
 
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
@@ -363,8 +345,10 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
                 */
 
                /* Whee!  Actually deliver the signal.  */
-               handle_signal(signr, &ka, &info, oldset, regs);
-               return 1;
+               if (handle_signal(signr, &ka, &info, oldset, regs) == 0)
+                       clear_thread_flag(TIF_RESTORE_SIGMASK);
+
+               return;
        }
 
  no_signal:
@@ -375,31 +359,24 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
                    regs->r0 == -ERESTARTSYS ||
                    regs->r0 == -ERESTARTNOINTR) {
                        regs->r0 = regs->orig_r0;
-                       inst = *(unsigned short *)(regs->bpc - 2);
-                       if ((inst & 0xfff0) == 0x10f0)  /* trap ? */
-                               regs->bpc -= 2;
-                       else
-                               regs->bpc -= 4;
-               }
-               if (regs->r0 == -ERESTART_RESTARTBLOCK){
+                       prev_insn(regs);
+               } else if (regs->r0 == -ERESTART_RESTARTBLOCK){
                        regs->r0 = regs->orig_r0;
                        regs->r7 = __NR_restart_syscall;
-                       inst = *(unsigned short *)(regs->bpc - 2);
-                       if ((inst & 0xfff0) == 0x10f0)  /* trap ? */
-                               regs->bpc -= 2;
-                       else
-                               regs->bpc -= 4;
+                       prev_insn(regs);
                }
        }
-       return 0;
+       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+               clear_thread_flag(TIF_RESTORE_SIGMASK);
+               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+       }
 }
 
 /*
  * notification of userspace execution resumption
  * - triggered by current->work.notify_resume
  */
-void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
-                     __u32 thread_info_flags)
+void do_notify_resume(struct pt_regs *regs, __u32 thread_info_flags)
 {
        /* Pending single-step? */
        if (thread_info_flags & _TIF_SINGLESTEP)
@@ -407,7 +384,7 @@ void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
 
        /* deal with pending signal delivery */
        if (thread_info_flags & _TIF_SIGPENDING)
-               do_signal(regs,oldset);
+               do_signal(regs);
 
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
index 8f06408..05285d0 100644 (file)
@@ -162,7 +162,7 @@ static void mac_init_asc( void )
 void mac_mksound( unsigned int freq, unsigned int length )
 {
        __u32 cfreq = ( freq << 5 ) / 468;
-       __u32 flags;
+       unsigned long flags;
        int i;
 
        if ( mac_special_bell == NULL )
@@ -224,7 +224,7 @@ static void mac_nosound( unsigned long ignored )
  */
 static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsigned int volume )
 {
-       __u32 flags;
+       unsigned long flags;
 
        /* if the bell is already ringing, ring longer */
        if ( mac_bell_duration > 0 )
@@ -271,7 +271,7 @@ static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsig
 static void mac_quadra_ring_bell( unsigned long ignored )
 {
        int     i, count = mac_asc_samplespersec / HZ;
-       __u32 flags;
+       unsigned long flags;
 
        /*
         * we neither want a sound buffer overflow nor underflow, so we need to match
index 3ad59dd..5526faa 100644 (file)
@@ -13,6 +13,7 @@ config MIPS
        select HAVE_KPROBES
        select HAVE_KRETPROBES
        select RTC_LIB if !MACH_LOONGSON
+       select GENERIC_ATOMIC64 if !64BIT
 
 mainmenu "Linux/MIPS Kernel Configuration"
 
@@ -1646,8 +1647,16 @@ config MIPS_MT_SMP
        select SYS_SUPPORTS_SMP
        select SMP_UP
        help
-         This is a kernel model which is also known a VSMP or lately
-         has been marketesed into SMVP.
+         This is a kernel model which is known a VSMP but lately has been
+         marketesed into SMVP.
+         Virtual SMP uses the processor's VPEs  to implement virtual
+         processors. In currently available configuration of the 34K processor
+         this allows for a dual processor. Both processors will share the same
+         primary caches; each will obtain the half of the TLB for it's own
+         exclusive use. For a layman this model can be described as similar to
+         what Intel calls Hyperthreading.
+
+         For further information see http://www.linux-mips.org/wiki/34K#VSMP
 
 config MIPS_MT_SMTC
        bool "SMTC: Use all TCs on all VPEs for SMP"
@@ -1664,6 +1673,14 @@ config MIPS_MT_SMTC
        help
          This is a kernel model which is known a SMTC or lately has been
          marketesed into SMVP.
+         is presenting the available TC's of the core as processors to Linux.
+         On currently available 34K processors this means a Linux system will
+         see up to 5 processors. The implementation of the SMTC kernel differs
+         significantly from VSMP and cannot efficiently coexist in the same
+         kernel binary so the choice between VSMP and SMTC is a compile time
+         decision.
+
+         For further information see http://www.linux-mips.org/wiki/34K#SMTC
 
 endchoice
 
index c29511b..5340210 100644 (file)
@@ -43,7 +43,7 @@ int prom_argc;
 char **prom_argv;
 char **prom_envp;
 
-void prom_init_cmdline(void)
+void __init prom_init_cmdline(void)
 {
        int i;
 
@@ -104,7 +104,7 @@ static inline void str2eaddr(unsigned char *ea, unsigned char *str)
        }
 }
 
-int prom_get_ethernet_addr(char *ethernet_addr)
+int __init prom_get_ethernet_addr(char *ethernet_addr)
 {
        char *ethaddr_str;
 
@@ -123,7 +123,6 @@ int prom_get_ethernet_addr(char *ethernet_addr)
 
        return 0;
 }
-EXPORT_SYMBOL(prom_get_ethernet_addr);
 
 void __init prom_free_prom_memory(void)
 {
index ed9bb70..5fd7f7a 100644 (file)
@@ -59,7 +59,7 @@ $(obj)/piggy.o: $(obj)/dummy.o $(obj)/vmlinux.bin.z FORCE
 hostprogs-y := calc_vmlinuz_load_addr
 
 VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \
-               $(objtree)/$(KBUILD_IMAGE) $(VMLINUX_LOAD_ADDRESS))
+               $(obj)/vmlinux.bin $(VMLINUX_LOAD_ADDRESS))
 
 vmlinuzobjs-y += $(obj)/piggy.o
 
index 094c17e..47323ca 100644 (file)
@@ -83,3 +83,7 @@ config ARCH_SPARSEMEM_ENABLE
        def_bool y
        select SPARSEMEM_STATIC
        depends on CPU_CAVIUM_OCTEON
+
+config CAVIUM_OCTEON_HELPER
+       def_bool y
+       depends on OCTEON_ETHERNET || PCI
index c664c8c..a5b4279 100644 (file)
@@ -41,7 +41,7 @@ static int cnmips_cu2_call(struct notifier_block *nfb, unsigned long action,
        return NOTIFY_OK;               /* Let default notifier send signals */
 }
 
-static int cnmips_cu2_setup(void)
+static int __init cnmips_cu2_setup(void)
 {
        return cu2_notifier(cnmips_cu2_call, 0);
 }
index 2fd66db..7f41c5b 100644 (file)
@@ -11,4 +11,4 @@
 
 obj-y += cvmx-bootmem.o cvmx-l2c.o cvmx-sysinfo.o octeon-model.o
 
-obj-$(CONFIG_PCI) += cvmx-helper-errata.o cvmx-helper-jtag.o
+obj-$(CONFIG_CAVIUM_OCTEON_HELPER) += cvmx-helper-errata.o cvmx-helper-jtag.o
index c63c56b..47d87da 100644 (file)
@@ -782,6 +782,10 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
  */
 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
 
+#else /* !CONFIG_64BIT */
+
+#include <asm-generic/atomic64.h>
+
 #endif /* CONFIG_64BIT */
 
 /*
index 2cb2f0c..3532e2c 100644 (file)
@@ -24,7 +24,7 @@ extern int cu2_notifier_call_chain(unsigned long val, void *v);
 
 #define cu2_notifier(fn, pri)                                          \
 ({                                                                     \
-       static struct notifier_block fn##_nb __cpuinitdata = {          \
+       static struct notifier_block fn##_nb = {                        \
                .notifier_call = fn,                                    \
                .priority = pri                                         \
        };                                                              \
index 9b9436a..86548da 100644 (file)
@@ -321,6 +321,7 @@ struct gic_intrmask_regs {
  */
 struct gic_intr_map {
        unsigned int cpunum;    /* Directed to this CPU */
+#define GIC_UNUSED             0xdead                  /* Dummy data */
        unsigned int pin;       /* Directed to this Pin */
        unsigned int polarity;  /* Polarity : +/-       */
        unsigned int trigtype;  /* Trigger  : Edge/Levl */
index b74caf6..ff9a8b8 100644 (file)
@@ -1,6 +1,6 @@
 #ifndef __ASM_MACH_TX49XX_KMALLOC_H
 #define __ASM_MACH_TX49XX_KMALLOC_H
 
-#define ARCH_KMALLOC_MINALIGN  L1_CACHE_BYTES
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
 
 #endif /* __ASM_MACH_TX49XX_KMALLOC_H */
index cea872f..d11aa02 100644 (file)
@@ -88,9 +88,6 @@
 
 #define GIC_EXT_INTR(x)                x
 
-/* Dummy data */
-#define X                      0xdead
-
 /* External Interrupts used for IPI */
 #define GIC_IPI_EXT_INTR_RESCHED_VPE0  16
 #define GIC_IPI_EXT_INTR_CALLFNC_VPE0  17
index a16beaf..e59cd1a 100644 (file)
@@ -150,6 +150,20 @@ typedef struct { unsigned long pgprot; } pgprot_t;
     ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)
 #endif
 #define __va(x)                ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
+
+/*
+ * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad
+ * (lmo) rsp. 8431fd094d625b94d364fe393076ccef88e6ce18 (kernel.org).  The
+ * discussion can be found in lkml posting
+ * <a2ebde260608230500o3407b108hc03debb9da6e62c@mail.gmail.com> which is
+ * archived at http://lists.linuxcoding.com/kernel/2006-q3/msg17360.html
+ *
+ * It is unclear if the misscompilations mentioned in
+ * http://lkml.org/lkml/2010/8/8/138 also affect MIPS so we keep this one
+ * until GCC 3.x has been retired before we can apply
+ * https://patchwork.linux-mips.org/patch/1541/
+ */
+
 #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
 
 #define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
index 2376f2e..70df9c0 100644 (file)
@@ -146,7 +146,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
 #define _TIF_LOAD_WATCH                (1<<TIF_LOAD_WATCH)
 
 /* work to do on interrupt/exception return */
-#define _TIF_WORK_MASK         (0x0000ffef & ~_TIF_SECCOMP)
+#define _TIF_WORK_MASK         (0x0000ffef &                           \
+                                       ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT))
 /* work to do on any return to u-space */
 #define _TIF_ALLWORK_MASK      (0x8000ffff & ~_TIF_SECCOMP)
 
index baa318a..550725b 100644 (file)
 #define __NR_perf_event_open           (__NR_Linux + 333)
 #define __NR_accept4                   (__NR_Linux + 334)
 #define __NR_recvmmsg                  (__NR_Linux + 335)
+#define __NR_fanotify_init             (__NR_Linux + 336)
+#define __NR_fanotify_mark             (__NR_Linux + 337)
+#define __NR_prlimit64                 (__NR_Linux + 338)
 
 /*
  * Offset of the last Linux o32 flavoured syscall
  */
-#define __NR_Linux_syscalls            335
+#define __NR_Linux_syscalls            338
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 
 #define __NR_O32_Linux                 4000
-#define __NR_O32_Linux_syscalls                335
+#define __NR_O32_Linux_syscalls                338
 
 #if _MIPS_SIM == _MIPS_SIM_ABI64
 
 #define __NR_perf_event_open           (__NR_Linux + 292)
 #define __NR_accept4                   (__NR_Linux + 293)
 #define __NR_recvmmsg                  (__NR_Linux + 294)
+#define __NR_fanotify_init             (__NR_Linux + 295)
+#define __NR_fanotify_mark             (__NR_Linux + 296)
+#define __NR_prlimit64                 (__NR_Linux + 297)
 
 /*
  * Offset of the last Linux 64-bit flavoured syscall
  */
-#define __NR_Linux_syscalls            294
+#define __NR_Linux_syscalls            297
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 
 #define __NR_64_Linux                  5000
-#define __NR_64_Linux_syscalls         294
+#define __NR_64_Linux_syscalls         297
 
 #if _MIPS_SIM == _MIPS_SIM_NABI32
 
 #define __NR_accept4                   (__NR_Linux + 297)
 #define __NR_recvmmsg                  (__NR_Linux + 298)
 #define __NR_getdents64                        (__NR_Linux + 299)
+#define __NR_fanotify_init             (__NR_Linux + 300)
+#define __NR_fanotify_mark             (__NR_Linux + 301)
+#define __NR_prlimit64                 (__NR_Linux + 302)
 
 /*
  * Offset of the last N32 flavoured syscall
  */
-#define __NR_Linux_syscalls            299
+#define __NR_Linux_syscalls            302
 
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 
 #define __NR_N32_Linux                 6000
-#define __NR_N32_Linux_syscalls                299
+#define __NR_N32_Linux_syscalls                302
 
 #ifdef __KERNEL__
 
index b181f2f..82ba9f6 100644 (file)
@@ -7,7 +7,6 @@
 #include <asm/io.h>
 #include <asm/gic.h>
 #include <asm/gcmpregs.h>
-#include <asm/mips-boards/maltaint.h>
 #include <asm/irq.h>
 #include <linux/hardirq.h>
 #include <asm-generic/bitops/find.h>
@@ -131,7 +130,7 @@ static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
        int             i;
 
        irq -= _irqbase;
-       pr_debug(KERN_DEBUG "%s(%d) called\n", __func__, irq);
+       pr_debug("%s(%d) called\n", __func__, irq);
        cpumask_and(&tmp, cpumask, cpu_online_mask);
        if (cpus_empty(tmp))
                return -1;
@@ -222,7 +221,7 @@ static void __init gic_basic_init(int numintrs, int numvpes,
        /* Setup specifics */
        for (i = 0; i < mapsize; i++) {
                cpu = intrmap[i].cpunum;
-               if (cpu == X)
+               if (cpu == GIC_UNUSED)
                        continue;
                if (cpu == 0 && i != 0 && intrmap[i].flags == 0)
                        continue;
index 1f4e2fa..f4546e9 100644 (file)
@@ -283,7 +283,7 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
        struct pt_regs *regs = args->regs;
        int trap = (regs->cp0_cause & 0x7c) >> 2;
 
-       /* Userpace events, ignore. */
+       /* Userspace events, ignore. */
        if (user_mode(regs))
                return NOTIFY_DONE;
 
index 80e2ba6..29811f0 100644 (file)
@@ -251,7 +251,7 @@ void sp_work_handle_request(void)
                memset(&tz, 0, sizeof(tz));
                if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv,
                                             (int)&tz, 0, 0)) == 0)
-               ret.retval = tv.tv_sec;
+                       ret.retval = tv.tv_sec;
                break;
 
        case MTSP_SYSCALL_EXIT:
index c2dab14..6343b4a 100644 (file)
@@ -341,3 +341,10 @@ asmlinkage long sys32_lookup_dcookie(u32 a0, u32 a1, char __user *buf,
 {
        return sys_lookup_dcookie(merge_64(a0, a1), buf, len);
 }
+
+SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags,
+               u64, a3, u64, a4, int, dfd, const char  __user *, pathname)
+{
+       return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4),
+                                dfd, pathname);
+}
index 17202bb..584415e 100644 (file)
@@ -583,7 +583,10 @@ einval:    li      v0, -ENOSYS
        sys     sys_rt_tgsigqueueinfo   4
        sys     sys_perf_event_open     5
        sys     sys_accept4             4
-       sys     sys_recvmmsg            5
+       sys     sys_recvmmsg            5       /* 4335 */
+       sys     sys_fanotify_init       2
+       sys     sys_fanotify_mark       6
+       sys     sys_prlimit64           4
        .endm
 
        /* We pre-compute the number of _instruction_ bytes needed to
index a8a6c59..5573f8e 100644 (file)
@@ -416,9 +416,12 @@ sys_call_table:
        PTR     sys_pipe2
        PTR     sys_inotify_init1
        PTR     sys_preadv
-       PTR     sys_pwritev                     /* 5390 */
+       PTR     sys_pwritev                     /* 5290 */
        PTR     sys_rt_tgsigqueueinfo
        PTR     sys_perf_event_open
        PTR     sys_accept4
-       PTR     sys_recvmmsg
+       PTR     sys_recvmmsg
+       PTR     sys_fanotify_init               /* 5295 */
+       PTR     sys_fanotify_mark
+       PTR     sys_prlimit64
        .size   sys_call_table,.-sys_call_table
index a3d6613..1e38ec9 100644 (file)
@@ -419,5 +419,8 @@ EXPORT(sysn32_call_table)
        PTR     sys_perf_event_open
        PTR     sys_accept4
        PTR     compat_sys_recvmmsg
-       PTR     sys_getdents
+       PTR     sys_getdents64
+       PTR     sys_fanotify_init               /* 6300 */
+       PTR     sys_fanotify_mark
+       PTR     sys_prlimit64
        .size   sysn32_call_table,.-sysn32_call_table
index 813689e..171979f 100644 (file)
@@ -538,5 +538,8 @@ sys_call_table:
        PTR     compat_sys_rt_tgsigqueueinfo
        PTR     sys_perf_event_open
        PTR     sys_accept4
-       PTR     compat_sys_recvmmsg
+       PTR     compat_sys_recvmmsg             /* 4335 */
+       PTR     sys_fanotify_init
+       PTR     sys_32_fanotify_mark
+       PTR     sys_prlimit64
        .size   sys_call_table,.-sys_call_table
index 7ba8908..469d401 100644 (file)
@@ -44,27 +44,39 @@ static inline int cpu_is_noncoherent_r10000(struct device *dev)
 
 static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
 {
+       gfp_t dma_flag;
+
        /* ignore region specifiers */
        gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
 
-#ifdef CONFIG_ZONE_DMA
+#ifdef CONFIG_ISA
        if (dev == NULL)
-               gfp |= __GFP_DMA;
-       else if (dev->coherent_dma_mask < DMA_BIT_MASK(24))
-               gfp |= __GFP_DMA;
+               dma_flag = __GFP_DMA;
        else
 #endif
-#ifdef CONFIG_ZONE_DMA32
+#if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
             if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
-               gfp |= __GFP_DMA32;
+                       dma_flag = __GFP_DMA;
+       else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
+                       dma_flag = __GFP_DMA32;
+       else
+#endif
+#if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
+            if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
+               dma_flag = __GFP_DMA32;
+       else
+#endif
+#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
+            if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
+               dma_flag = __GFP_DMA;
        else
 #endif
-               ;
+               dma_flag = 0;
 
        /* Don't invoke OOM killer */
        gfp |= __GFP_NORETRY;
 
-       return gfp;
+       return gfp | dma_flag;
 }
 
 void *dma_alloc_noncoherent(struct device *dev, size_t size,
index 1ef75cd..274af3b 100644 (file)
@@ -30,7 +30,7 @@
 #define tc_lsize       32
 
 extern unsigned long icache_way_size, dcache_way_size;
-unsigned long tcache_size;
+static unsigned long tcache_size;
 
 #include <asm/r4kcache.h>
 
index 15949b0..b79b24a 100644 (file)
@@ -385,6 +385,8 @@ static int __initdata msc_nr_eicirqs = ARRAY_SIZE(msc_eicirqmap);
  */
 
 #define GIC_CPU_NMI GIC_MAP_TO_NMI_MSK
+#define X GIC_UNUSED
+
 static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = {
        { X, X,            X,           X,              0 },
        { X, X,            X,           X,              0 },
@@ -404,6 +406,7 @@ static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = {
        { X, X,            X,           X,              0 },
        /* The remainder of this table is initialised by fill_ipi_map */
 };
+#undef X
 
 /*
  * GCMP needs to be detected before any SMP initialisation
index 71f7d27..f31218e 100644 (file)
@@ -118,7 +118,7 @@ static int __init rc32434_pcibridge_init(void)
        if (!((pcicvalue == PCIM_H_EA) ||
              (pcicvalue == PCIM_H_IA_FIX) ||
              (pcicvalue == PCIM_H_IA_RR))) {
-               pr_err(KERN_ERR "PCI init error!!!\n");
+               pr_err("PCI init error!!!\n");
                /* Not in Host Mode, return ERROR */
                return -1;
        }
index fadd874..e7a12ff 100644 (file)
  */
 #include <linux/kernel.h>
 
+#include <asm/processor.h>
 #include <asm/reboot.h>
 #include <glb.h>
 
 void pnx8550_machine_restart(char *command)
 {
-       char head[] = "************* Machine restart *************";
-       char foot[] = "*******************************************";
-
-       printk("\n\n");
-       printk("%s\n", head);
-       if (command != NULL)
-               printk("* %s\n", command);
-       printk("%s\n", foot);
-
        PNX8550_RST_CTL = PNX8550_RST_DO_SW_RST;
 }
 
 void pnx8550_machine_halt(void)
 {
-       printk("*** Machine halt. (Not implemented) ***\n");
-}
-
-void pnx8550_machine_power_off(void)
-{
-       printk("*** Machine power off.  (Not implemented) ***\n");
+       while (1) {
+               if (cpu_wait)
+                       cpu_wait();
+       }
 }
index 64246c9..43cb394 100644 (file)
@@ -44,7 +44,6 @@
 extern void __init board_setup(void);
 extern void pnx8550_machine_restart(char *);
 extern void pnx8550_machine_halt(void);
-extern void pnx8550_machine_power_off(void);
 extern struct resource ioport_resource;
 extern struct resource iomem_resource;
 extern char *prom_getcmdline(void);
@@ -100,7 +99,7 @@ void __init plat_mem_setup(void)
 
         _machine_restart = pnx8550_machine_restart;
         _machine_halt = pnx8550_machine_halt;
-        pm_power_off = pnx8550_machine_power_off;
+        pm_power_off = pnx8550_machine_halt;
 
        /* Clear the Global 2 Register, PCI Inta Output Enable Registers
           Bit 1:Enable DAC Powerdown
index 444b9f9..7c2a2f7 100644 (file)
@@ -8,7 +8,6 @@ mainmenu "Linux Kernel Configuration"
 config MN10300
        def_bool y
        select HAVE_OPROFILE
-       select HAVE_ARCH_TRACEHOOK
 
 config AM33
        def_bool y
index ff80e86..ce83c74 100644 (file)
@@ -101,7 +101,7 @@ config GDBSTUB_DEBUG_BREAKPOINT
 
 choice
        prompt "GDB stub port"
-       default GDBSTUB_TTYSM0
+       default GDBSTUB_ON_TTYSM0
        depends on GDBSTUB
        help
          Select the serial port used for GDB-stub.
index f49ac49..3f50e96 100644 (file)
@@ -229,9 +229,9 @@ int ffs(int x)
 #include <asm-generic/bitops/hweight.h>
 
 #define ext2_set_bit_atomic(lock, nr, addr) \
-       test_and_set_bit((nr) ^ 0x18, (addr))
+       test_and_set_bit((nr), (addr))
 #define ext2_clear_bit_atomic(lock, nr, addr) \
-       test_and_clear_bit((nr) ^ 0x18, (addr))
+       test_and_clear_bit((nr), (addr))
 
 #include <asm-generic/bitops/ext2-non-atomic.h>
 #include <asm-generic/bitops/minix-le.h>
index 7e891fc..1865d72 100644 (file)
@@ -78,7 +78,7 @@ typedef unsigned long sigset_t;
 
 /* These should not be considered constants from userland.  */
 #define SIGRTMIN       32
-#define SIGRTMAX       (_NSIG-1)
+#define SIGRTMAX       _NSIG
 
 /*
  * SA_FLAGS values:
index 6aea7fd..196a111 100644 (file)
@@ -206,7 +206,7 @@ int module_finalize(const Elf_Ehdr *hdr,
                    const Elf_Shdr *sechdrs,
                    struct module *me)
 {
-       return module_bug_finalize(hdr, sechdrs, me);
+       return 0;
 }
 
 /*
@@ -214,5 +214,4 @@ int module_finalize(const Elf_Ehdr *hdr,
  */
 void module_arch_cleanup(struct module *mod)
 {
-       module_bug_cleanup(mod);
 }
index 717db14..d4de05a 100644 (file)
@@ -65,10 +65,10 @@ asmlinkage long sys_sigaction(int sig,
                old_sigset_t mask;
                if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
                    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
-                   __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+                   __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
+                   __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
+                   __get_user(mask, &act->sa_mask))
                        return -EFAULT;
-               __get_user(new_ka.sa.sa_flags, &act->sa_flags);
-               __get_user(mask, &act->sa_mask);
                siginitset(&new_ka.sa.sa_mask, mask);
        }
 
@@ -77,10 +77,10 @@ asmlinkage long sys_sigaction(int sig,
        if (!ret && oact) {
                if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
                    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
-                   __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+                   __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
+                   __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
+                   __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
                        return -EFAULT;
-               __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
-               __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
        }
 
        return ret;
@@ -102,6 +102,9 @@ static int restore_sigcontext(struct pt_regs *regs,
 {
        unsigned int err = 0;
 
+       /* Always make any pending restarted system calls return -EINTR */
+       current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
        if (is_using_fpu(current))
                fpu_kill_state(current);
 
@@ -330,8 +333,6 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
        regs->d0 = sig;
        regs->d1 = (unsigned long) &frame->sc;
 
-       set_fs(USER_DS);
-
        /* the tracer may want to single-step inside the handler */
        if (test_thread_flag(TIF_SINGLESTEP))
                ptrace_notify(SIGTRAP);
@@ -345,7 +346,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
        return 0;
 
 give_sigsegv:
-       force_sig(SIGSEGV, current);
+       force_sigsegv(sig, current);
        return -EFAULT;
 }
 
@@ -413,8 +414,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        regs->d0 = sig;
        regs->d1 = (long) &frame->info;
 
-       set_fs(USER_DS);
-
        /* the tracer may want to single-step inside the handler */
        if (test_thread_flag(TIF_SINGLESTEP))
                ptrace_notify(SIGTRAP);
@@ -428,10 +427,16 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        return 0;
 
 give_sigsegv:
-       force_sig(SIGSEGV, current);
+       force_sigsegv(sig, current);
        return -EFAULT;
 }
 
+static inline void stepback(struct pt_regs *regs)
+{
+       regs->pc -= 2;
+       regs->orig_d0 = -1;
+}
+
 /*
  * handle the actual delivery of a signal to userspace
  */
@@ -459,7 +464,7 @@ static int handle_signal(int sig,
                        /* fallthrough */
                case -ERESTARTNOINTR:
                        regs->d0 = regs->orig_d0;
-                       regs->pc -= 2;
+                       stepback(regs);
                }
        }
 
@@ -527,12 +532,12 @@ static void do_signal(struct pt_regs *regs)
                case -ERESTARTSYS:
                case -ERESTARTNOINTR:
                        regs->d0 = regs->orig_d0;
-                       regs->pc -= 2;
+                       stepback(regs);
                        break;
 
                case -ERESTART_RESTARTBLOCK:
                        regs->d0 = __NR_restart_syscall;
-                       regs->pc -= 2;
+                       stepback(regs);
                        break;
                }
        }
index 28b9d98..1557277 100644 (file)
@@ -2,13 +2,11 @@
 # Makefile for the MN10300-specific memory management code
 #
 
+cacheflush-y   := cache.o cache-mn10300.o
+cacheflush-$(CONFIG_MN10300_CACHE_WBACK) += cache-flush-mn10300.o
+
+cacheflush-$(CONFIG_MN10300_CACHE_DISABLED) := cache-disabled.o
+
 obj-y := \
        init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \
-       misalignment.o dma-alloc.o
-
-ifneq ($(CONFIG_MN10300_CACHE_DISABLED),y)
-obj-y  += cache.o cache-mn10300.o
-ifeq ($(CONFIG_MN10300_CACHE_WBACK),y)
-obj-y  += cache-flush-mn10300.o
-endif
-endif
+       misalignment.o dma-alloc.o $(cacheflush-y)
diff --git a/arch/mn10300/mm/cache-disabled.c b/arch/mn10300/mm/cache-disabled.c
new file mode 100644 (file)
index 0000000..f669ea4
--- /dev/null
@@ -0,0 +1,21 @@
+/* Handle the cache being disabled
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/mm.h>
+
+/*
+ * allow userspace to flush the instruction cache
+ */
+asmlinkage long sys_cacheflush(unsigned long start, unsigned long end)
+{
+       if (end < start)
+               return -EINVAL;
+       return 0;
+}
index 1b76719..9261217 100644 (file)
@@ -54,13 +54,30 @@ EXPORT_SYMBOL(flush_icache_page);
 void flush_icache_range(unsigned long start, unsigned long end)
 {
 #ifdef CONFIG_MN10300_CACHE_WBACK
-       unsigned long addr, size, off;
+       unsigned long addr, size, base, off;
        struct page *page;
        pgd_t *pgd;
        pud_t *pud;
        pmd_t *pmd;
        pte_t *ppte, pte;
 
+       if (end > 0x80000000UL) {
+               /* addresses above 0xa0000000 do not go through the cache */
+               if (end > 0xa0000000UL) {
+                       end = 0xa0000000UL;
+                       if (start >= end)
+                               return;
+               }
+
+               /* kernel addresses between 0x80000000 and 0x9fffffff do not
+                * require page tables, so we just map such addresses directly */
+               base = (start >= 0x80000000UL) ? start : 0x80000000UL;
+               mn10300_dcache_flush_range(base, end);
+               if (base == start)
+                       goto invalidate;
+               end = base;
+       }
+
        for (; start < end; start += size) {
                /* work out how much of the page to flush */
                off = start & (PAGE_SIZE - 1);
@@ -104,6 +121,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
        }
 #endif
 
+invalidate:
        mn10300_icache_inv();
 }
 EXPORT_SYMBOL(flush_icache_range);
index 159a2b8..6e81bb5 100644 (file)
@@ -941,11 +941,10 @@ int module_finalize(const Elf_Ehdr *hdr,
        nsyms = newptr - (Elf_Sym *)symhdr->sh_addr;
        DEBUGP("NEW num_symtab %lu\n", nsyms);
        symhdr->sh_size = nsyms * sizeof(Elf_Sym);
-       return module_bug_finalize(hdr, sechdrs, me);
+       return 0;
 }
 
 void module_arch_cleanup(struct module *mod)
 {
        deregister_unwind_table(mod);
-       module_bug_cleanup(mod);
 }
index 477c663..49cee9d 100644 (file)
@@ -63,11 +63,6 @@ int module_finalize(const Elf_Ehdr *hdr,
                const Elf_Shdr *sechdrs, struct module *me)
 {
        const Elf_Shdr *sect;
-       int err;
-
-       err = module_bug_finalize(hdr, sechdrs, me);
-       if (err)
-               return err;
 
        /* Apply feature fixups */
        sect = find_section(hdr, sechdrs, "__ftr_fixup");
@@ -101,5 +96,4 @@ int module_finalize(const Elf_Ehdr *hdr,
 
 void module_arch_cleanup(struct module *mod)
 {
-       module_bug_cleanup(mod);
 }
index 95ad9da..d05ae42 100644 (file)
 #include "ppc32.h"
 #endif
 
-/*
- * Store another value in a callchain_entry.
- */
-static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
-{
-       unsigned int nr = entry->nr;
-
-       if (nr < PERF_MAX_STACK_DEPTH) {
-               entry->ip[nr] = ip;
-               entry->nr = nr + 1;
-       }
-}
 
 /*
  * Is sp valid as the address of the next kernel stack frame after prev_sp?
@@ -58,8 +46,8 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
        return 0;
 }
 
-static void perf_callchain_kernel(struct pt_regs *regs,
-                                 struct perf_callchain_entry *entry)
+void
+perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
        unsigned long sp, next_sp;
        unsigned long next_ip;
@@ -69,8 +57,7 @@ static void perf_callchain_kernel(struct pt_regs *regs,
 
        lr = regs->link;
        sp = regs->gpr[1];
-       callchain_store(entry, PERF_CONTEXT_KERNEL);
-       callchain_store(entry, regs->nip);
+       perf_callchain_store(entry, regs->nip);
 
        if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
                return;
@@ -89,7 +76,7 @@ static void perf_callchain_kernel(struct pt_regs *regs,
                        next_ip = regs->nip;
                        lr = regs->link;
                        level = 0;
-                       callchain_store(entry, PERF_CONTEXT_KERNEL);
+                       perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
 
                } else {
                        if (level == 0)
@@ -111,7 +98,7 @@ static void perf_callchain_kernel(struct pt_regs *regs,
                        ++level;
                }
 
-               callchain_store(entry, next_ip);
+               perf_callchain_store(entry, next_ip);
                if (!valid_next_sp(next_sp, sp))
                        return;
                sp = next_sp;
@@ -233,8 +220,8 @@ static int sane_signal_64_frame(unsigned long sp)
                puc == (unsigned long) &sf->uc;
 }
 
-static void perf_callchain_user_64(struct pt_regs *regs,
-                                  struct perf_callchain_entry *entry)
+static void perf_callchain_user_64(struct perf_callchain_entry *entry,
+                                  struct pt_regs *regs)
 {
        unsigned long sp, next_sp;
        unsigned long next_ip;
@@ -246,8 +233,7 @@ static void perf_callchain_user_64(struct pt_regs *regs,
        next_ip = regs->nip;
        lr = regs->link;
        sp = regs->gpr[1];
-       callchain_store(entry, PERF_CONTEXT_USER);
-       callchain_store(entry, next_ip);
+       perf_callchain_store(entry, next_ip);
 
        for (;;) {
                fp = (unsigned long __user *) sp;
@@ -276,14 +262,14 @@ static void perf_callchain_user_64(struct pt_regs *regs,
                            read_user_stack_64(&uregs[PT_R1], &sp))
                                return;
                        level = 0;
-                       callchain_store(entry, PERF_CONTEXT_USER);
-                       callchain_store(entry, next_ip);
+                       perf_callchain_store(entry, PERF_CONTEXT_USER);
+                       perf_callchain_store(entry, next_ip);
                        continue;
                }
 
                if (level == 0)
                        next_ip = lr;
-               callchain_store(entry, next_ip);
+               perf_callchain_store(entry, next_ip);
                ++level;
                sp = next_sp;
        }
@@ -315,8 +301,8 @@ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
        return __get_user_inatomic(*ret, ptr);
 }
 
-static inline void perf_callchain_user_64(struct pt_regs *regs,
-                                         struct perf_callchain_entry *entry)
+static inline void perf_callchain_user_64(struct perf_callchain_entry *entry,
+                                         struct pt_regs *regs)
 {
 }
 
@@ -435,8 +421,8 @@ static unsigned int __user *signal_frame_32_regs(unsigned int sp,
        return mctx->mc_gregs;
 }
 
-static void perf_callchain_user_32(struct pt_regs *regs,
-                                  struct perf_callchain_entry *entry)
+static void perf_callchain_user_32(struct perf_callchain_entry *entry,
+                                  struct pt_regs *regs)
 {
        unsigned int sp, next_sp;
        unsigned int next_ip;
@@ -447,8 +433,7 @@ static void perf_callchain_user_32(struct pt_regs *regs,
        next_ip = regs->nip;
        lr = regs->link;
        sp = regs->gpr[1];
-       callchain_store(entry, PERF_CONTEXT_USER);
-       callchain_store(entry, next_ip);
+       perf_callchain_store(entry, next_ip);
 
        while (entry->nr < PERF_MAX_STACK_DEPTH) {
                fp = (unsigned int __user *) (unsigned long) sp;
@@ -470,45 +455,24 @@ static void perf_callchain_user_32(struct pt_regs *regs,
                            read_user_stack_32(&uregs[PT_R1], &sp))
                                return;
                        level = 0;
-                       callchain_store(entry, PERF_CONTEXT_USER);
-                       callchain_store(entry, next_ip);
+                       perf_callchain_store(entry, PERF_CONTEXT_USER);
+                       perf_callchain_store(entry, next_ip);
                        continue;
                }
 
                if (level == 0)
                        next_ip = lr;
-               callchain_store(entry, next_ip);
+               perf_callchain_store(entry, next_ip);
                ++level;
                sp = next_sp;
        }
 }
 
-/*
- * Since we can't get PMU interrupts inside a PMU interrupt handler,
- * we don't need separate irq and nmi entries here.
- */
-static DEFINE_PER_CPU(struct perf_callchain_entry, cpu_perf_callchain);
-
-struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+void
+perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
-       struct perf_callchain_entry *entry = &__get_cpu_var(cpu_perf_callchain);
-
-       entry->nr = 0;
-
-       if (!user_mode(regs)) {
-               perf_callchain_kernel(regs, entry);
-               if (current->mm)
-                       regs = task_pt_regs(current);
-               else
-                       regs = NULL;
-       }
-
-       if (regs) {
-               if (current_is_64bit())
-                       perf_callchain_user_64(regs, entry);
-               else
-                       perf_callchain_user_32(regs, entry);
-       }
-
-       return entry;
+       if (current_is_64bit())
+               perf_callchain_user_64(entry, regs);
+       else
+               perf_callchain_user_32(entry, regs);
 }
index d301a30..9cb4924 100644 (file)
@@ -402,6 +402,9 @@ static void power_pmu_read(struct perf_event *event)
 {
        s64 val, delta, prev;
 
+       if (event->hw.state & PERF_HES_STOPPED)
+               return;
+
        if (!event->hw.idx)
                return;
        /*
@@ -517,7 +520,7 @@ static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
  * Disable all events to prevent PMU interrupts and to allow
  * events to be added or removed.
  */
-void hw_perf_disable(void)
+static void power_pmu_disable(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuhw;
        unsigned long flags;
@@ -565,7 +568,7 @@ void hw_perf_disable(void)
  * If we were previously disabled and events were added, then
  * put the new config on the PMU.
  */
-void hw_perf_enable(void)
+static void power_pmu_enable(struct pmu *pmu)
 {
        struct perf_event *event;
        struct cpu_hw_events *cpuhw;
@@ -672,6 +675,8 @@ void hw_perf_enable(void)
                }
                local64_set(&event->hw.prev_count, val);
                event->hw.idx = idx;
+               if (event->hw.state & PERF_HES_STOPPED)
+                       val = 0;
                write_pmc(idx, val);
                perf_event_update_userpage(event);
        }
@@ -727,7 +732,7 @@ static int collect_events(struct perf_event *group, int max_count,
  * re-enable the PMU in order to get hw_perf_enable to do the
  * actual work of reconfiguring the PMU.
  */
-static int power_pmu_enable(struct perf_event *event)
+static int power_pmu_add(struct perf_event *event, int ef_flags)
 {
        struct cpu_hw_events *cpuhw;
        unsigned long flags;
@@ -735,7 +740,7 @@ static int power_pmu_enable(struct perf_event *event)
        int ret = -EAGAIN;
 
        local_irq_save(flags);
-       perf_disable();
+       perf_pmu_disable(event->pmu);
 
        /*
         * Add the event to the list (if there is room)
@@ -749,6 +754,9 @@ static int power_pmu_enable(struct perf_event *event)
        cpuhw->events[n0] = event->hw.config;
        cpuhw->flags[n0] = event->hw.event_base;
 
+       if (!(ef_flags & PERF_EF_START))
+               event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+
        /*
         * If group events scheduling transaction was started,
         * skip the schedulability test here, it will be peformed
@@ -769,7 +777,7 @@ nocheck:
 
        ret = 0;
  out:
-       perf_enable();
+       perf_pmu_enable(event->pmu);
        local_irq_restore(flags);
        return ret;
 }
@@ -777,14 +785,14 @@ nocheck:
 /*
  * Remove a event from the PMU.
  */
-static void power_pmu_disable(struct perf_event *event)
+static void power_pmu_del(struct perf_event *event, int ef_flags)
 {
        struct cpu_hw_events *cpuhw;
        long i;
        unsigned long flags;
 
        local_irq_save(flags);
-       perf_disable();
+       perf_pmu_disable(event->pmu);
 
        power_pmu_read(event);
 
@@ -821,34 +829,60 @@ static void power_pmu_disable(struct perf_event *event)
                cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
        }
 
-       perf_enable();
+       perf_pmu_enable(event->pmu);
        local_irq_restore(flags);
 }
 
 /*
- * Re-enable interrupts on a event after they were throttled
- * because they were coming too fast.
+ * POWER-PMU does not support disabling individual counters, hence
+ * program their cycle counter to their max value and ignore the interrupts.
  */
-static void power_pmu_unthrottle(struct perf_event *event)
+
+static void power_pmu_start(struct perf_event *event, int ef_flags)
+{
+       unsigned long flags;
+       s64 left;
+
+       if (!event->hw.idx || !event->hw.sample_period)
+               return;
+
+       if (!(event->hw.state & PERF_HES_STOPPED))
+               return;
+
+       if (ef_flags & PERF_EF_RELOAD)
+               WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+
+       local_irq_save(flags);
+       perf_pmu_disable(event->pmu);
+
+       event->hw.state = 0;
+       left = local64_read(&event->hw.period_left);
+       write_pmc(event->hw.idx, left);
+
+       perf_event_update_userpage(event);
+       perf_pmu_enable(event->pmu);
+       local_irq_restore(flags);
+}
+
+static void power_pmu_stop(struct perf_event *event, int ef_flags)
 {
-       s64 val, left;
        unsigned long flags;
 
        if (!event->hw.idx || !event->hw.sample_period)
                return;
+
+       if (event->hw.state & PERF_HES_STOPPED)
+               return;
+
        local_irq_save(flags);
-       perf_disable();
+       perf_pmu_disable(event->pmu);
+
        power_pmu_read(event);
-       left = event->hw.sample_period;
-       event->hw.last_period = left;
-       val = 0;
-       if (left < 0x80000000L)
-               val = 0x80000000L - left;
-       write_pmc(event->hw.idx, val);
-       local64_set(&event->hw.prev_count, val);
-       local64_set(&event->hw.period_left, left);
+       event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+       write_pmc(event->hw.idx, 0);
+
        perf_event_update_userpage(event);
-       perf_enable();
+       perf_pmu_enable(event->pmu);
        local_irq_restore(flags);
 }
 
@@ -857,10 +891,11 @@ static void power_pmu_unthrottle(struct perf_event *event)
  * Set the flag to make pmu::enable() not perform the
  * schedulability test, it will be performed at commit time
  */
-void power_pmu_start_txn(const struct pmu *pmu)
+void power_pmu_start_txn(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
 
+       perf_pmu_disable(pmu);
        cpuhw->group_flag |= PERF_EVENT_TXN;
        cpuhw->n_txn_start = cpuhw->n_events;
 }
@@ -870,11 +905,12 @@ void power_pmu_start_txn(const struct pmu *pmu)
  * Clear the flag and pmu::enable() will perform the
  * schedulability test.
  */
-void power_pmu_cancel_txn(const struct pmu *pmu)
+void power_pmu_cancel_txn(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
 
        cpuhw->group_flag &= ~PERF_EVENT_TXN;
+       perf_pmu_enable(pmu);
 }
 
 /*
@@ -882,7 +918,7 @@ void power_pmu_cancel_txn(const struct pmu *pmu)
  * Perform the group schedulability test as a whole
  * Return 0 if success
  */
-int power_pmu_commit_txn(const struct pmu *pmu)
+int power_pmu_commit_txn(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuhw;
        long i, n;
@@ -901,19 +937,10 @@ int power_pmu_commit_txn(const struct pmu *pmu)
                cpuhw->event[i]->hw.config = cpuhw->events[i];
 
        cpuhw->group_flag &= ~PERF_EVENT_TXN;
+       perf_pmu_enable(pmu);
        return 0;
 }
 
-struct pmu power_pmu = {
-       .enable         = power_pmu_enable,
-       .disable        = power_pmu_disable,
-       .read           = power_pmu_read,
-       .unthrottle     = power_pmu_unthrottle,
-       .start_txn      = power_pmu_start_txn,
-       .cancel_txn     = power_pmu_cancel_txn,
-       .commit_txn     = power_pmu_commit_txn,
-};
-
 /*
  * Return 1 if we might be able to put event on a limited PMC,
  * or 0 if not.
@@ -1014,7 +1041,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp)
        return 0;
 }
 
-const struct pmu *hw_perf_event_init(struct perf_event *event)
+static int power_pmu_event_init(struct perf_event *event)
 {
        u64 ev;
        unsigned long flags;
@@ -1026,25 +1053,27 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
        struct cpu_hw_events *cpuhw;
 
        if (!ppmu)
-               return ERR_PTR(-ENXIO);
+               return -ENOENT;
+
        switch (event->attr.type) {
        case PERF_TYPE_HARDWARE:
                ev = event->attr.config;
                if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
-                       return ERR_PTR(-EOPNOTSUPP);
+                       return -EOPNOTSUPP;
                ev = ppmu->generic_events[ev];
                break;
        case PERF_TYPE_HW_CACHE:
                err = hw_perf_cache_event(event->attr.config, &ev);
                if (err)
-                       return ERR_PTR(err);
+                       return err;
                break;
        case PERF_TYPE_RAW:
                ev = event->attr.config;
                break;
        default:
-               return ERR_PTR(-EINVAL);
+               return -ENOENT;
        }
+
        event->hw.config_base = ev;
        event->hw.idx = 0;
 
@@ -1081,7 +1110,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
                         */
                        ev = normal_pmc_alternative(ev, flags);
                        if (!ev)
-                               return ERR_PTR(-EINVAL);
+                               return -EINVAL;
                }
        }
 
@@ -1095,19 +1124,19 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
                n = collect_events(event->group_leader, ppmu->n_counter - 1,
                                   ctrs, events, cflags);
                if (n < 0)
-                       return ERR_PTR(-EINVAL);
+                       return -EINVAL;
        }
        events[n] = ev;
        ctrs[n] = event;
        cflags[n] = flags;
        if (check_excludes(ctrs, cflags, n, 1))
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
 
        cpuhw = &get_cpu_var(cpu_hw_events);
        err = power_check_constraints(cpuhw, events, cflags, n + 1);
        put_cpu_var(cpu_hw_events);
        if (err)
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
 
        event->hw.config = events[n];
        event->hw.event_base = cflags[n];
@@ -1132,11 +1161,23 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
        }
        event->destroy = hw_perf_event_destroy;
 
-       if (err)
-               return ERR_PTR(err);
-       return &power_pmu;
+       return err;
 }
 
+struct pmu power_pmu = {
+       .pmu_enable     = power_pmu_enable,
+       .pmu_disable    = power_pmu_disable,
+       .event_init     = power_pmu_event_init,
+       .add            = power_pmu_add,
+       .del            = power_pmu_del,
+       .start          = power_pmu_start,
+       .stop           = power_pmu_stop,
+       .read           = power_pmu_read,
+       .start_txn      = power_pmu_start_txn,
+       .cancel_txn     = power_pmu_cancel_txn,
+       .commit_txn     = power_pmu_commit_txn,
+};
+
 /*
  * A counter has overflowed; update its count and record
  * things if requested.  Note that interrupts are hard-disabled
@@ -1149,6 +1190,11 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
        s64 prev, delta, left;
        int record = 0;
 
+       if (event->hw.state & PERF_HES_STOPPED) {
+               write_pmc(event->hw.idx, 0);
+               return;
+       }
+
        /* we don't have to worry about interrupts here */
        prev = local64_read(&event->hw.prev_count);
        delta = (val - prev) & 0xfffffffful;
@@ -1171,6 +1217,11 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
                        val = 0x80000000LL - left;
        }
 
+       write_pmc(event->hw.idx, val);
+       local64_set(&event->hw.prev_count, val);
+       local64_set(&event->hw.period_left, left);
+       perf_event_update_userpage(event);
+
        /*
         * Finally record data if requested.
         */
@@ -1183,23 +1234,9 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
                if (event->attr.sample_type & PERF_SAMPLE_ADDR)
                        perf_get_data_addr(regs, &data.addr);
 
-               if (perf_event_overflow(event, nmi, &data, regs)) {
-                       /*
-                        * Interrupts are coming too fast - throttle them
-                        * by setting the event to 0, so it will be
-                        * at least 2^30 cycles until the next interrupt
-                        * (assuming each event counts at most 2 counts
-                        * per cycle).
-                        */
-                       val = 0;
-                       left = ~0ULL >> 1;
-               }
+               if (perf_event_overflow(event, nmi, &data, regs))
+                       power_pmu_stop(event, 0);
        }
-
-       write_pmc(event->hw.idx, val);
-       local64_set(&event->hw.prev_count, val);
-       local64_set(&event->hw.period_left, left);
-       perf_event_update_userpage(event);
 }
 
 /*
@@ -1342,6 +1379,7 @@ int register_power_pmu(struct power_pmu *pmu)
                freeze_events_kernel = MMCR0_FCHV;
 #endif /* CONFIG_PPC64 */
 
+       perf_pmu_register(&power_pmu);
        perf_cpu_notifier(power_pmu_notifier);
 
        return 0;
index 1ba4547..7ecca59 100644 (file)
@@ -156,6 +156,9 @@ static void fsl_emb_pmu_read(struct perf_event *event)
 {
        s64 val, delta, prev;
 
+       if (event->hw.state & PERF_HES_STOPPED)
+               return;
+
        /*
         * Performance monitor interrupts come even when interrupts
         * are soft-disabled, as long as interrupts are hard-enabled.
@@ -177,7 +180,7 @@ static void fsl_emb_pmu_read(struct perf_event *event)
  * Disable all events to prevent PMU interrupts and to allow
  * events to be added or removed.
  */
-void hw_perf_disable(void)
+static void fsl_emb_pmu_disable(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuhw;
        unsigned long flags;
@@ -216,7 +219,7 @@ void hw_perf_disable(void)
  * If we were previously disabled and events were added, then
  * put the new config on the PMU.
  */
-void hw_perf_enable(void)
+static void fsl_emb_pmu_enable(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuhw;
        unsigned long flags;
@@ -262,8 +265,8 @@ static int collect_events(struct perf_event *group, int max_count,
        return n;
 }
 
-/* perf must be disabled, context locked on entry */
-static int fsl_emb_pmu_enable(struct perf_event *event)
+/* context locked on entry */
+static int fsl_emb_pmu_add(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuhw;
        int ret = -EAGAIN;
@@ -271,6 +274,7 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
        u64 val;
        int i;
 
+       perf_pmu_disable(event->pmu);
        cpuhw = &get_cpu_var(cpu_hw_events);
 
        if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
@@ -301,6 +305,12 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
                        val = 0x80000000L - left;
        }
        local64_set(&event->hw.prev_count, val);
+
+       if (!(flags & PERF_EF_START)) {
+               event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+               val = 0;
+       }
+
        write_pmc(i, val);
        perf_event_update_userpage(event);
 
@@ -310,15 +320,17 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
        ret = 0;
  out:
        put_cpu_var(cpu_hw_events);
+       perf_pmu_enable(event->pmu);
        return ret;
 }
 
-/* perf must be disabled, context locked on entry */
-static void fsl_emb_pmu_disable(struct perf_event *event)
+/* context locked on entry */
+static void fsl_emb_pmu_del(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuhw;
        int i = event->hw.idx;
 
+       perf_pmu_disable(event->pmu);
        if (i < 0)
                goto out;
 
@@ -346,44 +358,57 @@ static void fsl_emb_pmu_disable(struct perf_event *event)
        cpuhw->n_events--;
 
  out:
+       perf_pmu_enable(event->pmu);
        put_cpu_var(cpu_hw_events);
 }
 
-/*
- * Re-enable interrupts on a event after they were throttled
- * because they were coming too fast.
- *
- * Context is locked on entry, but perf is not disabled.
- */
-static void fsl_emb_pmu_unthrottle(struct perf_event *event)
+static void fsl_emb_pmu_start(struct perf_event *event, int ef_flags)
 {
-       s64 val, left;
        unsigned long flags;
+       s64 left;
 
        if (event->hw.idx < 0 || !event->hw.sample_period)
                return;
+
+       if (!(event->hw.state & PERF_HES_STOPPED))
+               return;
+
+       if (ef_flags & PERF_EF_RELOAD)
+               WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+
        local_irq_save(flags);
-       perf_disable();
-       fsl_emb_pmu_read(event);
-       left = event->hw.sample_period;
-       event->hw.last_period = left;
-       val = 0;
-       if (left < 0x80000000L)
-               val = 0x80000000L - left;
-       write_pmc(event->hw.idx, val);
-       local64_set(&event->hw.prev_count, val);
-       local64_set(&event->hw.period_left, left);
+       perf_pmu_disable(event->pmu);
+
+       event->hw.state = 0;
+       left = local64_read(&event->hw.period_left);
+       write_pmc(event->hw.idx, left);
+
        perf_event_update_userpage(event);
-       perf_enable();
+       perf_pmu_enable(event->pmu);
        local_irq_restore(flags);
 }
 
-static struct pmu fsl_emb_pmu = {
-       .enable         = fsl_emb_pmu_enable,
-       .disable        = fsl_emb_pmu_disable,
-       .read           = fsl_emb_pmu_read,
-       .unthrottle     = fsl_emb_pmu_unthrottle,
-};
+static void fsl_emb_pmu_stop(struct perf_event *event, int ef_flags)
+{
+       unsigned long flags;
+
+       if (event->hw.idx < 0 || !event->hw.sample_period)
+               return;
+
+       if (event->hw.state & PERF_HES_STOPPED)
+               return;
+
+       local_irq_save(flags);
+       perf_pmu_disable(event->pmu);
+
+       fsl_emb_pmu_read(event);
+       event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+       write_pmc(event->hw.idx, 0);
+
+       perf_event_update_userpage(event);
+       perf_pmu_enable(event->pmu);
+       local_irq_restore(flags);
+}
 
 /*
  * Release the PMU if this is the last perf_event.
@@ -428,7 +453,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp)
        return 0;
 }
 
-const struct pmu *hw_perf_event_init(struct perf_event *event)
+static int fsl_emb_pmu_event_init(struct perf_event *event)
 {
        u64 ev;
        struct perf_event *events[MAX_HWEVENTS];
@@ -441,14 +466,14 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
        case PERF_TYPE_HARDWARE:
                ev = event->attr.config;
                if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
-                       return ERR_PTR(-EOPNOTSUPP);
+                       return -EOPNOTSUPP;
                ev = ppmu->generic_events[ev];
                break;
 
        case PERF_TYPE_HW_CACHE:
                err = hw_perf_cache_event(event->attr.config, &ev);
                if (err)
-                       return ERR_PTR(err);
+                       return err;
                break;
 
        case PERF_TYPE_RAW:
@@ -456,12 +481,12 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
                break;
 
        default:
-               return ERR_PTR(-EINVAL);
+               return -ENOENT;
        }
 
        event->hw.config = ppmu->xlate_event(ev);
        if (!(event->hw.config & FSL_EMB_EVENT_VALID))
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
 
        /*
         * If this is in a group, check if it can go on with all the
@@ -473,7 +498,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
                n = collect_events(event->group_leader,
                                   ppmu->n_counter - 1, events);
                if (n < 0)
-                       return ERR_PTR(-EINVAL);
+                       return -EINVAL;
        }
 
        if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) {
@@ -484,7 +509,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
                }
 
                if (num_restricted >= ppmu->n_restricted)
-                       return ERR_PTR(-EINVAL);
+                       return -EINVAL;
        }
 
        event->hw.idx = -1;
@@ -497,7 +522,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
        if (event->attr.exclude_kernel)
                event->hw.config_base |= PMLCA_FCS;
        if (event->attr.exclude_idle)
-               return ERR_PTR(-ENOTSUPP);
+               return -ENOTSUPP;
 
        event->hw.last_period = event->hw.sample_period;
        local64_set(&event->hw.period_left, event->hw.last_period);
@@ -523,11 +548,20 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
        }
        event->destroy = hw_perf_event_destroy;
 
-       if (err)
-               return ERR_PTR(err);
-       return &fsl_emb_pmu;
+       return err;
 }
 
+static struct pmu fsl_emb_pmu = {
+       .pmu_enable     = fsl_emb_pmu_enable,
+       .pmu_disable    = fsl_emb_pmu_disable,
+       .event_init     = fsl_emb_pmu_event_init,
+       .add            = fsl_emb_pmu_add,
+       .del            = fsl_emb_pmu_del,
+       .start          = fsl_emb_pmu_start,
+       .stop           = fsl_emb_pmu_stop,
+       .read           = fsl_emb_pmu_read,
+};
+
 /*
  * A counter has overflowed; update its count and record
  * things if requested.  Note that interrupts are hard-disabled
@@ -540,6 +574,11 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
        s64 prev, delta, left;
        int record = 0;
 
+       if (event->hw.state & PERF_HES_STOPPED) {
+               write_pmc(event->hw.idx, 0);
+               return;
+       }
+
        /* we don't have to worry about interrupts here */
        prev = local64_read(&event->hw.prev_count);
        delta = (val - prev) & 0xfffffffful;
@@ -562,6 +601,11 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
                        val = 0x80000000LL - left;
        }
 
+       write_pmc(event->hw.idx, val);
+       local64_set(&event->hw.prev_count, val);
+       local64_set(&event->hw.period_left, left);
+       perf_event_update_userpage(event);
+
        /*
         * Finally record data if requested.
         */
@@ -571,23 +615,9 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
                perf_sample_data_init(&data, 0);
                data.period = event->hw.last_period;
 
-               if (perf_event_overflow(event, nmi, &data, regs)) {
-                       /*
-                        * Interrupts are coming too fast - throttle them
-                        * by setting the event to 0, so it will be
-                        * at least 2^30 cycles until the next interrupt
-                        * (assuming each event counts at most 2 counts
-                        * per cycle).
-                        */
-                       val = 0;
-                       left = ~0ULL >> 1;
-               }
+               if (perf_event_overflow(event, nmi, &data, regs))
+                       fsl_emb_pmu_stop(event, 0);
        }
-
-       write_pmc(event->hw.idx, val);
-       local64_set(&event->hw.prev_count, val);
-       local64_set(&event->hw.period_left, left);
-       perf_event_update_userpage(event);
 }
 
 static void perf_event_interrupt(struct pt_regs *regs)
@@ -651,5 +681,7 @@ int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu)
        pr_info("%s performance monitor hardware support registered\n",
                pmu->name);
 
+       perf_pmu_register(&fsl_emb_pmu);
+
        return 0;
 }
index 5b243bd..3dc2a8d 100644 (file)
@@ -57,7 +57,7 @@ static struct clk *mpc5121_clk_get(struct device *dev, const char *id)
        int id_match = 0;
 
        if (dev == NULL || id == NULL)
-               return NULL;
+               return clk;
 
        mutex_lock(&clocks_mutex);
        list_for_each_entry(p, &clocks, node) {
index 45c0cb9..18c1048 100644 (file)
@@ -99,7 +99,7 @@ static void __init efika_pcisetup(void)
        if (bus_range == NULL || len < 2 * sizeof(int)) {
                printk(KERN_WARNING EFIKA_PLATFORM_NAME
                       ": Can't get bus-range for %s\n", pcictrl->full_name);
-               return;
+               goto out_put;
        }
 
        if (bus_range[1] == bus_range[0])
@@ -111,12 +111,12 @@ static void __init efika_pcisetup(void)
        printk(" controlled by %s\n", pcictrl->full_name);
        printk("\n");
 
-       hose = pcibios_alloc_controller(of_node_get(pcictrl));
+       hose = pcibios_alloc_controller(pcictrl);
        if (!hose) {
                printk(KERN_WARNING EFIKA_PLATFORM_NAME
                       ": Can't allocate PCI controller structure for %s\n",
                       pcictrl->full_name);
-               return;
+               goto out_put;
        }
 
        hose->first_busno = bus_range[0];
@@ -124,6 +124,9 @@ static void __init efika_pcisetup(void)
        hose->ops = &rtas_pci_ops;
 
        pci_process_bridge_OF_ranges(hose, pcictrl, 0);
+       return;
+out_put:
+       of_node_put(pcictrl);
 }
 
 #else
index 6e90531..41f3a7e 100644 (file)
@@ -325,12 +325,16 @@ int mpc5200_psc_ac97_gpio_reset(int psc_number)
        clrbits32(&simple_gpio->simple_dvo, sync | out);
        clrbits8(&wkup_gpio->wkup_dvo, reset);
 
-       /* wait at lease 1 us */
-       udelay(2);
+       /* wait for 1 us */
+       udelay(1);
 
        /* Deassert reset */
        setbits8(&wkup_gpio->wkup_dvo, reset);
 
+       /* wait at least 200ns */
+       /* 7 ~= (200ns * timebase) / ns2sec */
+       __delay(7);
+
        /* Restore pin-muxing */
        out_be32(&simple_gpio->port_config, mux);
 
index 22cfd63..f7167ee 100644 (file)
@@ -407,10 +407,9 @@ int module_finalize(const Elf_Ehdr *hdr,
 {
        vfree(me->arch.syminfo);
        me->arch.syminfo = NULL;
-       return module_bug_finalize(hdr, sechdrs, me);
+       return 0;
 }
 
 void module_arch_cleanup(struct module *mod)
 {
-       module_bug_cleanup(mod);
 }
index 43adddf..ae0be69 100644 (file)
@@ -149,13 +149,11 @@ int module_finalize(const Elf_Ehdr *hdr,
        int ret = 0;
 
        ret |= module_dwarf_finalize(hdr, sechdrs, me);
-       ret |= module_bug_finalize(hdr, sechdrs, me);
 
        return ret;
 }
 
 void module_arch_cleanup(struct module *mod)
 {
-       module_bug_cleanup(mod);
        module_dwarf_cleanup(mod);
 }
index a9dd3ab..d5ca1ef 100644 (file)
 #include <asm/unwinder.h>
 #include <asm/ptrace.h>
 
-static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
-{
-       if (entry->nr < PERF_MAX_STACK_DEPTH)
-               entry->ip[entry->nr++] = ip;
-}
 
 static void callchain_warning(void *data, char *msg)
 {
@@ -39,7 +34,7 @@ static void callchain_address(void *data, unsigned long addr, int reliable)
        struct perf_callchain_entry *entry = data;
 
        if (reliable)
-               callchain_store(entry, addr);
+               perf_callchain_store(entry, addr);
 }
 
 static const struct stacktrace_ops callchain_ops = {
@@ -49,47 +44,10 @@ static const struct stacktrace_ops callchain_ops = {
        .address        = callchain_address,
 };
 
-static void
-perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
+void
+perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
-       callchain_store(entry, PERF_CONTEXT_KERNEL);
-       callchain_store(entry, regs->pc);
+       perf_callchain_store(entry, regs->pc);
 
        unwind_stack(NULL, regs, NULL, &callchain_ops, entry);
 }
-
-static void
-perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
-{
-       int is_user;
-
-       if (!regs)
-               return;
-
-       is_user = user_mode(regs);
-
-       if (is_user && current->state != TASK_RUNNING)
-               return;
-
-       /*
-        * Only the kernel side is implemented for now.
-        */
-       if (!is_user)
-               perf_callchain_kernel(regs, entry);
-}
-
-/*
- * No need for separate IRQ and NMI entries.
- */
-static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
-
-struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
-{
-       struct perf_callchain_entry *entry = &__get_cpu_var(callchain);
-
-       entry->nr = 0;
-
-       perf_do_callchain(regs, entry);
-
-       return entry;
-}
index 55fe89b..5a4b334 100644 (file)
@@ -224,50 +224,80 @@ again:
        local64_add(delta, &event->count);
 }
 
-static void sh_pmu_disable(struct perf_event *event)
+static void sh_pmu_stop(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
        int idx = hwc->idx;
 
-       clear_bit(idx, cpuc->active_mask);
-       sh_pmu->disable(hwc, idx);
+       if (!(event->hw.state & PERF_HES_STOPPED)) {
+               sh_pmu->disable(hwc, idx);
+               cpuc->events[idx] = NULL;
+               event->hw.state |= PERF_HES_STOPPED;
+       }
+
+       if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) {
+               sh_perf_event_update(event, &event->hw, idx);
+               event->hw.state |= PERF_HES_UPTODATE;
+       }
+}
 
-       barrier();
+static void sh_pmu_start(struct perf_event *event, int flags)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       struct hw_perf_event *hwc = &event->hw;
+       int idx = hwc->idx;
 
-       sh_perf_event_update(event, &event->hw, idx);
+       if (WARN_ON_ONCE(idx == -1))
+               return;
+
+       if (flags & PERF_EF_RELOAD)
+               WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+
+       cpuc->events[idx] = event;
+       event->hw.state = 0;
+       sh_pmu->enable(hwc, idx);
+}
+
+static void sh_pmu_del(struct perf_event *event, int flags)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
-       cpuc->events[idx] = NULL;
-       clear_bit(idx, cpuc->used_mask);
+       sh_pmu_stop(event, PERF_EF_UPDATE);
+       __clear_bit(event->hw.idx, cpuc->used_mask);
 
        perf_event_update_userpage(event);
 }
 
-static int sh_pmu_enable(struct perf_event *event)
+static int sh_pmu_add(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
        int idx = hwc->idx;
+       int ret = -EAGAIN;
+
+       perf_pmu_disable(event->pmu);
 
-       if (test_and_set_bit(idx, cpuc->used_mask)) {
+       if (__test_and_set_bit(idx, cpuc->used_mask)) {
                idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events);
                if (idx == sh_pmu->num_events)
-                       return -EAGAIN;
+                       goto out;
 
-               set_bit(idx, cpuc->used_mask);
+               __set_bit(idx, cpuc->used_mask);
                hwc->idx = idx;
        }
 
        sh_pmu->disable(hwc, idx);
 
-       cpuc->events[idx] = event;
-       set_bit(idx, cpuc->active_mask);
-
-       sh_pmu->enable(hwc, idx);
+       event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+       if (flags & PERF_EF_START)
+               sh_pmu_start(event, PERF_EF_RELOAD);
 
        perf_event_update_userpage(event);
-
-       return 0;
+       ret = 0;
+out:
+       perf_pmu_enable(event->pmu);
+       return ret;
 }
 
 static void sh_pmu_read(struct perf_event *event)
@@ -275,24 +305,56 @@ static void sh_pmu_read(struct perf_event *event)
        sh_perf_event_update(event, &event->hw, event->hw.idx);
 }
 
-static const struct pmu pmu = {
-       .enable         = sh_pmu_enable,
-       .disable        = sh_pmu_disable,
-       .read           = sh_pmu_read,
-};
-
-const struct pmu *hw_perf_event_init(struct perf_event *event)
+static int sh_pmu_event_init(struct perf_event *event)
 {
-       int err = __hw_perf_event_init(event);
+       int err;
+
+       switch (event->attr.type) {
+       case PERF_TYPE_RAW:
+       case PERF_TYPE_HW_CACHE:
+       case PERF_TYPE_HARDWARE:
+               err = __hw_perf_event_init(event);
+               break;
+
+       default:
+               return -ENOENT;
+       }
+
        if (unlikely(err)) {
                if (event->destroy)
                        event->destroy(event);
-               return ERR_PTR(err);
        }
 
-       return &pmu;
+       return err;
+}
+
+static void sh_pmu_enable(struct pmu *pmu)
+{
+       if (!sh_pmu_initialized())
+               return;
+
+       sh_pmu->enable_all();
+}
+
+static void sh_pmu_disable(struct pmu *pmu)
+{
+       if (!sh_pmu_initialized())
+               return;
+
+       sh_pmu->disable_all();
 }
 
+static struct pmu pmu = {
+       .pmu_enable     = sh_pmu_enable,
+       .pmu_disable    = sh_pmu_disable,
+       .event_init     = sh_pmu_event_init,
+       .add            = sh_pmu_add,
+       .del            = sh_pmu_del,
+       .start          = sh_pmu_start,
+       .stop           = sh_pmu_stop,
+       .read           = sh_pmu_read,
+};
+
 static void sh_pmu_setup(int cpu)
 {
        struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
@@ -317,32 +379,17 @@ sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
        return NOTIFY_OK;
 }
 
-void hw_perf_enable(void)
-{
-       if (!sh_pmu_initialized())
-               return;
-
-       sh_pmu->enable_all();
-}
-
-void hw_perf_disable(void)
-{
-       if (!sh_pmu_initialized())
-               return;
-
-       sh_pmu->disable_all();
-}
-
-int __cpuinit register_sh_pmu(struct sh_pmu *pmu)
+int __cpuinit register_sh_pmu(struct sh_pmu *_pmu)
 {
        if (sh_pmu)
                return -EBUSY;
-       sh_pmu = pmu;
+       sh_pmu = _pmu;
 
-       pr_info("Performance Events: %s support registered\n", pmu->name);
+       pr_info("Performance Events: %s support registered\n", _pmu->name);
 
-       WARN_ON(pmu->num_events > MAX_HWEVENTS);
+       WARN_ON(_pmu->num_events > MAX_HWEVENTS);
 
+       perf_pmu_register(&pmu);
        perf_cpu_notifier(sh_pmu_notifier);
        return 0;
 }
index 491e9d6..9212cd4 100644 (file)
@@ -30,6 +30,7 @@ config SPARC
        select PERF_USE_VMALLOC
        select HAVE_DMA_ATTRS
        select HAVE_DMA_API_DEBUG
+       select HAVE_ARCH_JUMP_LABEL
 
 config SPARC32
        def_bool !64BIT
diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
new file mode 100644 (file)
index 0000000..62e66d7
--- /dev/null
@@ -0,0 +1,32 @@
+#ifndef _ASM_SPARC_JUMP_LABEL_H
+#define _ASM_SPARC_JUMP_LABEL_H
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <asm/system.h>
+
+#define JUMP_LABEL_NOP_SIZE 4
+
+#define JUMP_LABEL(key, label)                                 \
+       do {                                                    \
+               asm goto("1:\n\t"                               \
+                        "nop\n\t"                              \
+                        "nop\n\t"                              \
+                        ".pushsection __jump_table,  \"a\"\n\t"\
+                        ".word 1b, %l[" #label "], %c0\n\t"    \
+                        ".popsection \n\t"                     \
+                        : :  "i" (key) :  : label);\
+       } while (0)
+
+#endif /* __KERNEL__ */
+
+typedef u32 jump_label_t;
+
+struct jump_entry {
+       jump_label_t code;
+       jump_label_t target;
+       jump_label_t key;
+};
+
+#endif
index 0c2dc1f..599398f 100644 (file)
@@ -119,3 +119,5 @@ obj-$(CONFIG_COMPAT)    += $(audit--y)
 
 pc--$(CONFIG_PERF_EVENTS) := perf_event.o
 obj-$(CONFIG_SPARC64)  += $(pc--y)
+
+obj-$(CONFIG_SPARC64)  += jump_label.o
diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c
new file mode 100644 (file)
index 0000000..ea2dafc
--- /dev/null
@@ -0,0 +1,47 @@
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/cpu.h>
+
+#include <linux/jump_label.h>
+#include <linux/memory.h>
+
+#ifdef HAVE_JUMP_LABEL
+
+void arch_jump_label_transform(struct jump_entry *entry,
+                              enum jump_label_type type)
+{
+       u32 val;
+       u32 *insn = (u32 *) (unsigned long) entry->code;
+
+       if (type == JUMP_LABEL_ENABLE) {
+               s32 off = (s32)entry->target - (s32)entry->code;
+
+#ifdef CONFIG_SPARC64
+               /* ba,pt %xcc, . + (off << 2) */
+               val = 0x10680000 | ((u32) off >> 2);
+#else
+               /* ba . + (off << 2) */
+               val = 0x10800000 | ((u32) off >> 2);
+#endif
+       } else {
+               val = 0x01000000;
+       }
+
+       get_online_cpus();
+       mutex_lock(&text_mutex);
+       *insn = val;
+       flushi(insn);
+       mutex_unlock(&text_mutex);
+       put_online_cpus();
+}
+
+void arch_jump_label_text_poke_early(jump_label_t addr)
+{
+       u32 *insn_p = (u32 *) (unsigned long) addr;
+
+       *insn_p = 0x01000000;
+       flushi(insn_p);
+}
+
+#endif
index f848aad..ee3c7dd 100644 (file)
@@ -18,6 +18,9 @@
 #include <asm/spitfire.h>
 
 #ifdef CONFIG_SPARC64
+
+#include <linux/jump_label.h>
+
 static void *module_map(unsigned long size)
 {
        struct vm_struct *area;
@@ -227,6 +230,9 @@ int module_finalize(const Elf_Ehdr *hdr,
                    const Elf_Shdr *sechdrs,
                    struct module *me)
 {
+       /* make jump label nops */
+       jump_label_apply_nops(me);
+
        /* Cheetah's I-cache is fully coherent.  */
        if (tlb_type == spitfire) {
                unsigned long va;
index 6318e62..0d6deb5 100644 (file)
@@ -658,13 +658,16 @@ static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr)
 
                enc = perf_event_get_enc(cpuc->events[i]);
                pcr &= ~mask_for_index(idx);
-               pcr |= event_encoding(enc, idx);
+               if (hwc->state & PERF_HES_STOPPED)
+                       pcr |= nop_for_index(idx);
+               else
+                       pcr |= event_encoding(enc, idx);
        }
 out:
        return pcr;
 }
 
-void hw_perf_enable(void)
+static void sparc_pmu_enable(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        u64 pcr;
@@ -691,7 +694,7 @@ void hw_perf_enable(void)
        pcr_ops->write(cpuc->pcr);
 }
 
-void hw_perf_disable(void)
+static void sparc_pmu_disable(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        u64 val;
@@ -710,19 +713,65 @@ void hw_perf_disable(void)
        pcr_ops->write(cpuc->pcr);
 }
 
-static void sparc_pmu_disable(struct perf_event *event)
+static int active_event_index(struct cpu_hw_events *cpuc,
+                             struct perf_event *event)
+{
+       int i;
+
+       for (i = 0; i < cpuc->n_events; i++) {
+               if (cpuc->event[i] == event)
+                       break;
+       }
+       BUG_ON(i == cpuc->n_events);
+       return cpuc->current_idx[i];
+}
+
+static void sparc_pmu_start(struct perf_event *event, int flags)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       int idx = active_event_index(cpuc, event);
+
+       if (flags & PERF_EF_RELOAD) {
+               WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+               sparc_perf_event_set_period(event, &event->hw, idx);
+       }
+
+       event->hw.state = 0;
+
+       sparc_pmu_enable_event(cpuc, &event->hw, idx);
+}
+
+static void sparc_pmu_stop(struct perf_event *event, int flags)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       int idx = active_event_index(cpuc, event);
+
+       if (!(event->hw.state & PERF_HES_STOPPED)) {
+               sparc_pmu_disable_event(cpuc, &event->hw, idx);
+               event->hw.state |= PERF_HES_STOPPED;
+       }
+
+       if (!(event->hw.state & PERF_HES_UPTODATE) && (flags & PERF_EF_UPDATE)) {
+               sparc_perf_event_update(event, &event->hw, idx);
+               event->hw.state |= PERF_HES_UPTODATE;
+       }
+}
+
+static void sparc_pmu_del(struct perf_event *event, int _flags)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-       struct hw_perf_event *hwc = &event->hw;
        unsigned long flags;
        int i;
 
        local_irq_save(flags);
-       perf_disable();
+       perf_pmu_disable(event->pmu);
 
        for (i = 0; i < cpuc->n_events; i++) {
                if (event == cpuc->event[i]) {
-                       int idx = cpuc->current_idx[i];
+                       /* Absorb the final count and turn off the
+                        * event.
+                        */
+                       sparc_pmu_stop(event, PERF_EF_UPDATE);
 
                        /* Shift remaining entries down into
                         * the existing slot.
@@ -734,13 +783,6 @@ static void sparc_pmu_disable(struct perf_event *event)
                                        cpuc->current_idx[i];
                        }
 
-                       /* Absorb the final count and turn off the
-                        * event.
-                        */
-                       sparc_pmu_disable_event(cpuc, hwc, idx);
-                       barrier();
-                       sparc_perf_event_update(event, hwc, idx);
-
                        perf_event_update_userpage(event);
 
                        cpuc->n_events--;
@@ -748,23 +790,10 @@ static void sparc_pmu_disable(struct perf_event *event)
                }
        }
 
-       perf_enable();
+       perf_pmu_enable(event->pmu);
        local_irq_restore(flags);
 }
 
-static int active_event_index(struct cpu_hw_events *cpuc,
-                             struct perf_event *event)
-{
-       int i;
-
-       for (i = 0; i < cpuc->n_events; i++) {
-               if (cpuc->event[i] == event)
-                       break;
-       }
-       BUG_ON(i == cpuc->n_events);
-       return cpuc->current_idx[i];
-}
-
 static void sparc_pmu_read(struct perf_event *event)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -774,15 +803,6 @@ static void sparc_pmu_read(struct perf_event *event)
        sparc_perf_event_update(event, hwc, idx);
 }
 
-static void sparc_pmu_unthrottle(struct perf_event *event)
-{
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-       int idx = active_event_index(cpuc, event);
-       struct hw_perf_event *hwc = &event->hw;
-
-       sparc_pmu_enable_event(cpuc, hwc, idx);
-}
-
 static atomic_t active_events = ATOMIC_INIT(0);
 static DEFINE_MUTEX(pmc_grab_mutex);
 
@@ -877,7 +897,7 @@ static int sparc_check_constraints(struct perf_event **evts,
        if (!n_ev)
                return 0;
 
-       if (n_ev > perf_max_events)
+       if (n_ev > MAX_HWEVENTS)
                return -1;
 
        msk0 = perf_event_get_msk(events[0]);
@@ -984,23 +1004,27 @@ static int collect_events(struct perf_event *group, int max_count,
        return n;
 }
 
-static int sparc_pmu_enable(struct perf_event *event)
+static int sparc_pmu_add(struct perf_event *event, int ef_flags)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        int n0, ret = -EAGAIN;
        unsigned long flags;
 
        local_irq_save(flags);
-       perf_disable();
+       perf_pmu_disable(event->pmu);
 
        n0 = cpuc->n_events;
-       if (n0 >= perf_max_events)
+       if (n0 >= MAX_HWEVENTS)
                goto out;
 
        cpuc->event[n0] = event;
        cpuc->events[n0] = event->hw.event_base;
        cpuc->current_idx[n0] = PIC_NO_INDEX;
 
+       event->hw.state = PERF_HES_UPTODATE;
+       if (!(ef_flags & PERF_EF_START))
+               event->hw.state |= PERF_HES_STOPPED;
+
        /*
         * If group events scheduling transaction was started,
         * skip the schedulability test here, it will be peformed
@@ -1020,12 +1044,12 @@ nocheck:
 
        ret = 0;
 out:
-       perf_enable();
+       perf_pmu_enable(event->pmu);
        local_irq_restore(flags);
        return ret;
 }
 
-static int __hw_perf_event_init(struct perf_event *event)
+static int sparc_pmu_event_init(struct perf_event *event)
 {
        struct perf_event_attr *attr = &event->attr;
        struct perf_event *evts[MAX_HWEVENTS];
@@ -1038,22 +1062,33 @@ static int __hw_perf_event_init(struct perf_event *event)
        if (atomic_read(&nmi_active) < 0)
                return -ENODEV;
 
-       pmap = NULL;
-       if (attr->type == PERF_TYPE_HARDWARE) {
+       switch (attr->type) {
+       case PERF_TYPE_HARDWARE:
                if (attr->config >= sparc_pmu->max_events)
                        return -EINVAL;
                pmap = sparc_pmu->event_map(attr->config);
-       } else if (attr->type == PERF_TYPE_HW_CACHE) {
+               break;
+
+       case PERF_TYPE_HW_CACHE:
                pmap = sparc_map_cache_event(attr->config);
                if (IS_ERR(pmap))
                        return PTR_ERR(pmap);
-       } else if (attr->type != PERF_TYPE_RAW)
-               return -EOPNOTSUPP;
+               break;
+
+       case PERF_TYPE_RAW:
+               pmap = NULL;
+               break;
+
+       default:
+               return -ENOENT;
+
+       }
 
        if (pmap) {
                hwc->event_base = perf_event_encode(pmap);
        } else {
-               /* User gives us "(encoding << 16) | pic_mask" for
+               /*
+                * User gives us "(encoding << 16) | pic_mask" for
                 * PERF_TYPE_RAW events.
                 */
                hwc->event_base = attr->config;
@@ -1071,7 +1106,7 @@ static int __hw_perf_event_init(struct perf_event *event)
        n = 0;
        if (event->group_leader != event) {
                n = collect_events(event->group_leader,
-                                  perf_max_events - 1,
+                                  MAX_HWEVENTS - 1,
                                   evts, events, current_idx_dmy);
                if (n < 0)
                        return -EINVAL;
@@ -1107,10 +1142,11 @@ static int __hw_perf_event_init(struct perf_event *event)
  * Set the flag to make pmu::enable() not perform the
  * schedulability test, it will be performed at commit time
  */
-static void sparc_pmu_start_txn(const struct pmu *pmu)
+static void sparc_pmu_start_txn(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
 
+       perf_pmu_disable(pmu);
        cpuhw->group_flag |= PERF_EVENT_TXN;
 }
 
@@ -1119,11 +1155,12 @@ static void sparc_pmu_start_txn(const struct pmu *pmu)
  * Clear the flag and pmu::enable() will perform the
  * schedulability test.
  */
-static void sparc_pmu_cancel_txn(const struct pmu *pmu)
+static void sparc_pmu_cancel_txn(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
 
        cpuhw->group_flag &= ~PERF_EVENT_TXN;
+       perf_pmu_enable(pmu);
 }
 
 /*
@@ -1131,7 +1168,7 @@ static void sparc_pmu_cancel_txn(const struct pmu *pmu)
  * Perform the group schedulability test as a whole
  * Return 0 if success
  */
-static int sparc_pmu_commit_txn(const struct pmu *pmu)
+static int sparc_pmu_commit_txn(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        int n;
@@ -1147,28 +1184,24 @@ static int sparc_pmu_commit_txn(const struct pmu *pmu)
                return -EAGAIN;
 
        cpuc->group_flag &= ~PERF_EVENT_TXN;
+       perf_pmu_enable(pmu);
        return 0;
 }
 
-static const struct pmu pmu = {
-       .enable         = sparc_pmu_enable,
-       .disable        = sparc_pmu_disable,
+static struct pmu pmu = {
+       .pmu_enable     = sparc_pmu_enable,
+       .pmu_disable    = sparc_pmu_disable,
+       .event_init     = sparc_pmu_event_init,
+       .add            = sparc_pmu_add,
+       .del            = sparc_pmu_del,
+       .start          = sparc_pmu_start,
+       .stop           = sparc_pmu_stop,
        .read           = sparc_pmu_read,
-       .unthrottle     = sparc_pmu_unthrottle,
        .start_txn      = sparc_pmu_start_txn,
        .cancel_txn     = sparc_pmu_cancel_txn,
        .commit_txn     = sparc_pmu_commit_txn,
 };
 
-const struct pmu *hw_perf_event_init(struct perf_event *event)
-{
-       int err = __hw_perf_event_init(event);
-
-       if (err)
-               return ERR_PTR(err);
-       return &pmu;
-}
-
 void perf_event_print_debug(void)
 {
        unsigned long flags;
@@ -1244,7 +1277,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
                        continue;
 
                if (perf_event_overflow(event, 1, &data, regs))
-                       sparc_pmu_disable_event(cpuc, hwc, idx);
+                       sparc_pmu_stop(event, 0);
        }
 
        return NOTIFY_STOP;
@@ -1285,28 +1318,21 @@ void __init init_hw_perf_events(void)
 
        pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
 
-       /* All sparc64 PMUs currently have 2 events.  */
-       perf_max_events = 2;
-
+       perf_pmu_register(&pmu);
        register_die_notifier(&perf_event_nmi_notifier);
 }
 
-static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
-{
-       if (entry->nr < PERF_MAX_STACK_DEPTH)
-               entry->ip[entry->nr++] = ip;
-}
-
-static void perf_callchain_kernel(struct pt_regs *regs,
-                                 struct perf_callchain_entry *entry)
+void perf_callchain_kernel(struct perf_callchain_entry *entry,
+                          struct pt_regs *regs)
 {
        unsigned long ksp, fp;
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
        int graph = 0;
 #endif
 
-       callchain_store(entry, PERF_CONTEXT_KERNEL);
-       callchain_store(entry, regs->tpc);
+       stack_trace_flush();
+
+       perf_callchain_store(entry, regs->tpc);
 
        ksp = regs->u_regs[UREG_I6];
        fp = ksp + STACK_BIAS;
@@ -1330,13 +1356,13 @@ static void perf_callchain_kernel(struct pt_regs *regs,
                        pc = sf->callers_pc;
                        fp = (unsigned long)sf->fp + STACK_BIAS;
                }
-               callchain_store(entry, pc);
+               perf_callchain_store(entry, pc);
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
                if ((pc + 8UL) == (unsigned long) &return_to_handler) {
                        int index = current->curr_ret_stack;
                        if (current->ret_stack && index >= graph) {
                                pc = current->ret_stack[index - graph].ret;
-                               callchain_store(entry, pc);
+                               perf_callchain_store(entry, pc);
                                graph++;
                        }
                }
@@ -1344,13 +1370,12 @@ static void perf_callchain_kernel(struct pt_regs *regs,
        } while (entry->nr < PERF_MAX_STACK_DEPTH);
 }
 
-static void perf_callchain_user_64(struct pt_regs *regs,
-                                  struct perf_callchain_entry *entry)
+static void perf_callchain_user_64(struct perf_callchain_entry *entry,
+                                  struct pt_regs *regs)
 {
        unsigned long ufp;
 
-       callchain_store(entry, PERF_CONTEXT_USER);
-       callchain_store(entry, regs->tpc);
+       perf_callchain_store(entry, regs->tpc);
 
        ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
        do {
@@ -1363,17 +1388,16 @@ static void perf_callchain_user_64(struct pt_regs *regs,
 
                pc = sf.callers_pc;
                ufp = (unsigned long)sf.fp + STACK_BIAS;
-               callchain_store(entry, pc);
+               perf_callchain_store(entry, pc);
        } while (entry->nr < PERF_MAX_STACK_DEPTH);
 }
 
-static void perf_callchain_user_32(struct pt_regs *regs,
-                                  struct perf_callchain_entry *entry)
+static void perf_callchain_user_32(struct perf_callchain_entry *entry,
+                                  struct pt_regs *regs)
 {
        unsigned long ufp;
 
-       callchain_store(entry, PERF_CONTEXT_USER);
-       callchain_store(entry, regs->tpc);
+       perf_callchain_store(entry, regs->tpc);
 
        ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
        do {
@@ -1386,34 +1410,16 @@ static void perf_callchain_user_32(struct pt_regs *regs,
 
                pc = sf.callers_pc;
                ufp = (unsigned long)sf.fp;
-               callchain_store(entry, pc);
+               perf_callchain_store(entry, pc);
        } while (entry->nr < PERF_MAX_STACK_DEPTH);
 }
 
-/* Like powerpc we can't get PMU interrupts within the PMU handler,
- * so no need for separate NMI and IRQ chains as on x86.
- */
-static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
-
-struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+void
+perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
-       struct perf_callchain_entry *entry = &__get_cpu_var(callchain);
-
-       entry->nr = 0;
-       if (!user_mode(regs)) {
-               stack_trace_flush();
-               perf_callchain_kernel(regs, entry);
-               if (current->mm)
-                       regs = task_pt_regs(current);
-               else
-                       regs = NULL;
-       }
-       if (regs) {
-               flushw_user();
-               if (test_thread_flag(TIF_32BIT))
-                       perf_callchain_user_32(regs, entry);
-               else
-                       perf_callchain_user_64(regs, entry);
-       }
-       return entry;
+       flushw_user();
+       if (test_thread_flag(TIF_32BIT))
+               perf_callchain_user_32(entry, regs);
+       else
+               perf_callchain_user_64(entry, regs);
 }
index 84f296c..8f58bdf 100644 (file)
@@ -1506,13 +1506,6 @@ handle_ill:
        }
        STD_ENDPROC(handle_ill)
 
-       .pushsection .rodata, "a"
-       .align  8
-bpt_code:
-       bpt
-