Merge commit 'v3.3-rc6' into android-3.3
Colin Cross [Mon, 5 Mar 2012 22:17:12 +0000 (14:17 -0800)]
398 files changed:
Documentation/android.txt [new file with mode: 0644]
Documentation/cgroups/cgroups.txt
Documentation/cpu-freq/governors.txt
arch/arm/Kconfig
arch/arm/Kconfig.debug
arch/arm/boot/compressed/head.S
arch/arm/common/Kconfig
arch/arm/common/Makefile
arch/arm/common/fiq_debugger.c [new file with mode: 0644]
arch/arm/common/fiq_debugger_ringbuf.h [new file with mode: 0644]
arch/arm/common/fiq_glue.S [new file with mode: 0644]
arch/arm/common/fiq_glue_setup.c [new file with mode: 0644]
arch/arm/include/asm/cacheflush.h
arch/arm/include/asm/fiq_debugger.h [new file with mode: 0644]
arch/arm/include/asm/fiq_glue.h [new file with mode: 0644]
arch/arm/include/asm/hardirq.h
arch/arm/include/asm/hardware/cache-l2x0.h
arch/arm/include/asm/hardware/coresight.h
arch/arm/include/asm/irq.h
arch/arm/include/asm/mach/mmc.h [new file with mode: 0644]
arch/arm/include/asm/rodata.h [new file with mode: 0644]
arch/arm/include/asm/smp.h
arch/arm/kernel/etm.c
arch/arm/kernel/ftrace.c
arch/arm/kernel/leds.c
arch/arm/kernel/process.c
arch/arm/kernel/smp.c
arch/arm/kernel/traps.c
arch/arm/mm/Makefile
arch/arm/mm/cache-l2x0.c
arch/arm/mm/cache-v6.S
arch/arm/mm/mmu.c
arch/arm/mm/rodata.c [new file with mode: 0644]
arch/arm/vfp/entry.S
arch/arm/vfp/vfpmodule.c
arch/x86/include/asm/idle.h
arch/x86/kernel/process_64.c
block/genhd.c
block/partition-generic.c
drivers/Kconfig
drivers/Makefile
drivers/base/power/main.c
drivers/char/Kconfig
drivers/char/Makefile
drivers/char/dcc_tty.c [new file with mode: 0644]
drivers/char/mem.c
drivers/cpufreq/Kconfig
drivers/cpufreq/Makefile
drivers/cpufreq/cpufreq_interactive.c [new file with mode: 0644]
drivers/cpufreq/cpufreq_stats.c
drivers/cpuidle/governors/menu.c
drivers/gpu/Makefile
drivers/gpu/ion/Kconfig [new file with mode: 0644]
drivers/gpu/ion/Makefile [new file with mode: 0644]
drivers/gpu/ion/ion.c [new file with mode: 0644]
drivers/gpu/ion/ion_carveout_heap.c [new file with mode: 0644]
drivers/gpu/ion/ion_heap.c [new file with mode: 0644]
drivers/gpu/ion/ion_priv.h [new file with mode: 0644]
drivers/gpu/ion/ion_system_heap.c [new file with mode: 0644]
drivers/gpu/ion/ion_system_mapper.c [new file with mode: 0644]
drivers/gpu/ion/tegra/Makefile [new file with mode: 0644]
drivers/gpu/ion/tegra/tegra_ion.c [new file with mode: 0644]
drivers/hid/hid-input.c
drivers/hid/hid-magicmouse.c
drivers/hid/hid-multitouch.c
drivers/input/Kconfig
drivers/input/Makefile
drivers/input/evdev.c
drivers/input/keyreset.c [new file with mode: 0644]
drivers/input/misc/Kconfig
drivers/input/misc/Makefile
drivers/input/misc/gpio_axis.c [new file with mode: 0644]
drivers/input/misc/gpio_event.c [new file with mode: 0644]
drivers/input/misc/gpio_input.c [new file with mode: 0644]
drivers/input/misc/gpio_matrix.c [new file with mode: 0644]
drivers/input/misc/gpio_output.c [new file with mode: 0644]
drivers/input/misc/keychord.c [new file with mode: 0644]
drivers/input/touchscreen/Kconfig
drivers/input/touchscreen/Makefile
drivers/input/touchscreen/synaptics_i2c_rmi.c [new file with mode: 0644]
drivers/leds/Kconfig
drivers/leds/Makefile
drivers/leds/ledtrig-sleep.c [new file with mode: 0644]
drivers/misc/Kconfig
drivers/misc/Makefile
drivers/misc/akm8975.c [new file with mode: 0644]
drivers/misc/uid_stat.c [new file with mode: 0644]
drivers/misc/wl127x-rfkill.c [new file with mode: 0644]
drivers/mmc/card/Kconfig
drivers/mmc/card/block.c
drivers/mmc/core/Kconfig
drivers/mmc/core/bus.c
drivers/mmc/core/core.c
drivers/mmc/core/host.c
drivers/mmc/core/sd.c
drivers/mmc/core/sdio.c
drivers/mmc/core/sdio_bus.c
drivers/mmc/core/sdio_io.c [changed mode: 0644->0755]
drivers/mtd/nand/Kconfig
drivers/net/ppp/Kconfig
drivers/net/ppp/Makefile
drivers/net/ppp/pppolac.c [new file with mode: 0644]
drivers/net/ppp/pppopns.c [new file with mode: 0644]
drivers/net/tun.c
drivers/net/wireless/Kconfig
drivers/net/wireless/Makefile
drivers/net/wireless/bcmdhd/Kconfig [new file with mode: 0644]
drivers/net/wireless/bcmdhd/Makefile [new file with mode: 0644]
drivers/net/wireless/bcmdhd/aiutils.c [new file with mode: 0644]
drivers/net/wireless/bcmdhd/bcmevent.c [new file with mode: 0644]
drivers/net/wireless/bcmdhd/bcmsdh.c [new file with mode: 0644]
drivers/net/wireless/bcmdhd/bcmsdh_linux.c [new file with mode: 0644]
drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c [new file with mode: 0644]
drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c [new file with mode: 0644]
drivers/net/wireless/bcmdhd/bcmutils.c [new file with mode: 0644]
drivers/net/wireless/bcmdhd/bcmwifi.c [new file with mode: 0644]
drivers/net/wireless/bcmdhd/dhd.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/dhd_bta.c [new file with mode: 0644]
drivers/net/wireless/bcmdhd/dhd_bta.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/dhd_bus.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/dhd_cdc.c [new file with mode: 0644]
drivers/net/wireless/bcmdhd/dhd_cfg80211.c [new file with mode: 0644]
drivers/net/wireless/bcmdhd/dhd_cfg80211.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/dhd_common.c [new file with mode: 0644]
drivers/net/wireless/bcmdhd/dhd_custom_gpio.c [new file with mode: 0644]
drivers/net/wireless/bcmdhd/dhd_dbg.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/dhd_linux.c [new file with mode: 0644]
drivers/net/wireless/bcmdhd/dhd_linux_sched.c [new file with mode: 0644]
drivers/net/wireless/bcmdhd/dhd_proto.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/dhd_sdio.c [new file with mode: 0644]
drivers/net/wireless/bcmdhd/dhd_wlfc.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/dngl_stats.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/dngl_wlhdr.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/hndpmu.c [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/Makefile [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/aidmp.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/bcmcdc.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/bcmdefs.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/bcmdevs.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/bcmendian.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/bcmpcispi.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/bcmperf.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/bcmsdbus.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/bcmsdh.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/bcmsdpcm.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/bcmsdspi.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/bcmsdstd.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/bcmspi.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/bcmutils.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/bcmwifi.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/dhdioctl.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/epivers.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/hndpmu.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/hndrte_armtrap.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/hndrte_cons.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/hndsoc.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/htsf.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/linux_osl.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/linuxver.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/miniopt.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/msgtrace.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/osl.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/packed_section_end.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/packed_section_start.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/pcicfg.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/proto/802.11.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/proto/802.11_bta.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/proto/802.11e.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/proto/802.1d.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/proto/bcmeth.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/proto/bcmevent.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/proto/bcmip.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/proto/bt_amp_hci.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/proto/eapol.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/proto/ethernet.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/proto/p2p.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/proto/sdspi.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/proto/vlan.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/proto/wpa.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/sbchipc.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/sbconfig.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/sbhnddma.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/sbpcmcia.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/sbsdio.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/sbsocram.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/sdio.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/sdioh.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/sdiovar.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/siutils.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/trxhdr.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/typedefs.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/wlfc_proto.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/include/wlioctl.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/linux_osl.c [new file with mode: 0644]
drivers/net/wireless/bcmdhd/sbutils.c [new file with mode: 0644]
drivers/net/wireless/bcmdhd/siutils.c [new file with mode: 0644]
drivers/net/wireless/bcmdhd/siutils_priv.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/uamp_api.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/wl_android.c [new file with mode: 0644]
drivers/net/wireless/bcmdhd/wl_android.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/wl_cfg80211.c [new file with mode: 0644]
drivers/net/wireless/bcmdhd/wl_cfg80211.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/wl_cfgp2p.c [new file with mode: 0644]
drivers/net/wireless/bcmdhd/wl_cfgp2p.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/wl_dbg.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/wl_iw.c [new file with mode: 0644]
drivers/net/wireless/bcmdhd/wl_iw.h [new file with mode: 0644]
drivers/net/wireless/bcmdhd/wl_linux_mon.c [new file with mode: 0644]
drivers/net/wireless/bcmdhd/wldev_common.c [new file with mode: 0644]
drivers/net/wireless/bcmdhd/wldev_common.h [new file with mode: 0644]
drivers/power/power_supply_core.c
drivers/staging/android/Kconfig
drivers/staging/android/Makefile
drivers/staging/android/TODO [deleted file]
drivers/staging/android/alarm-dev.c [new file with mode: 0644]
drivers/staging/android/android_alarm.h [new file with mode: 0644]
drivers/staging/android/ashmem.c
drivers/staging/android/binder.c
drivers/staging/android/lowmemorykiller.c
drivers/staging/android/persistent_ram.c [new file with mode: 0644]
drivers/staging/android/ram_console.c
drivers/staging/android/trace_persistent.c [new file with mode: 0644]
drivers/switch/Kconfig [new file with mode: 0644]
drivers/switch/Makefile [new file with mode: 0644]
drivers/switch/switch_class.c [new file with mode: 0644]
drivers/switch/switch_gpio.c [new file with mode: 0644]
drivers/tty/serial/serial_core.c
drivers/usb/gadget/Kconfig
drivers/usb/gadget/Makefile
drivers/usb/gadget/android.c [new file with mode: 0644]
drivers/usb/gadget/composite.c
drivers/usb/gadget/f_accessory.c [new file with mode: 0644]
drivers/usb/gadget/f_adb.c [new file with mode: 0644]
drivers/usb/gadget/f_mtp.c [new file with mode: 0644]
drivers/usb/gadget/f_rndis.c
drivers/usb/gadget/u_ether.c
drivers/usb/gadget/u_ether.h
drivers/usb/gadget/u_serial.c
drivers/usb/otg/Kconfig
drivers/usb/otg/Makefile
drivers/usb/otg/otg-wakelock.c [new file with mode: 0644]
drivers/video/Kconfig
fs/Kconfig
fs/Makefile
fs/fat/dir.c
fs/fat/fat.h
fs/fat/inode.c
fs/fs-writeback.c
fs/fuse/dev.c
fs/proc/base.c
fs/yaffs2/Kconfig [new file with mode: 0644]
fs/yaffs2/Makefile [new file with mode: 0644]
fs/yaffs2/yaffs_allocator.c [new file with mode: 0644]
fs/yaffs2/yaffs_allocator.h [new file with mode: 0644]
fs/yaffs2/yaffs_attribs.c [new file with mode: 0644]
fs/yaffs2/yaffs_attribs.h [new file with mode: 0644]
fs/yaffs2/yaffs_bitmap.c [new file with mode: 0644]
fs/yaffs2/yaffs_bitmap.h [new file with mode: 0644]
fs/yaffs2/yaffs_checkptrw.c [new file with mode: 0644]
fs/yaffs2/yaffs_checkptrw.h [new file with mode: 0644]
fs/yaffs2/yaffs_ecc.c [new file with mode: 0644]
fs/yaffs2/yaffs_ecc.h [new file with mode: 0644]
fs/yaffs2/yaffs_getblockinfo.h [new file with mode: 0644]
fs/yaffs2/yaffs_guts.c [new file with mode: 0644]
fs/yaffs2/yaffs_guts.h [new file with mode: 0644]
fs/yaffs2/yaffs_linux.h [new file with mode: 0644]
fs/yaffs2/yaffs_mtdif.c [new file with mode: 0644]
fs/yaffs2/yaffs_mtdif.h [new file with mode: 0644]
fs/yaffs2/yaffs_mtdif1.c [new file with mode: 0644]
fs/yaffs2/yaffs_mtdif1.h [new file with mode: 0644]
fs/yaffs2/yaffs_mtdif2.c [new file with mode: 0644]
fs/yaffs2/yaffs_mtdif2.h [new file with mode: 0644]
fs/yaffs2/yaffs_nameval.c [new file with mode: 0644]
fs/yaffs2/yaffs_nameval.h [new file with mode: 0644]
fs/yaffs2/yaffs_nand.c [new file with mode: 0644]
fs/yaffs2/yaffs_nand.h [new file with mode: 0644]
fs/yaffs2/yaffs_packedtags1.c [new file with mode: 0644]
fs/yaffs2/yaffs_packedtags1.h [new file with mode: 0644]
fs/yaffs2/yaffs_packedtags2.c [new file with mode: 0644]
fs/yaffs2/yaffs_packedtags2.h [new file with mode: 0644]
fs/yaffs2/yaffs_tagscompat.c [new file with mode: 0644]
fs/yaffs2/yaffs_tagscompat.h [new file with mode: 0644]
fs/yaffs2/yaffs_tagsvalidity.c [new file with mode: 0644]
fs/yaffs2/yaffs_tagsvalidity.h [new file with mode: 0644]
fs/yaffs2/yaffs_trace.h [new file with mode: 0644]
fs/yaffs2/yaffs_verify.c [new file with mode: 0644]
fs/yaffs2/yaffs_verify.h [new file with mode: 0644]
fs/yaffs2/yaffs_vfs.c [new file with mode: 0644]
fs/yaffs2/yaffs_yaffs1.c [new file with mode: 0644]
fs/yaffs2/yaffs_yaffs1.h [new file with mode: 0644]
fs/yaffs2/yaffs_yaffs2.c [new file with mode: 0644]
fs/yaffs2/yaffs_yaffs2.h [new file with mode: 0644]
fs/yaffs2/yportenv.h [new file with mode: 0644]
include/linux/akm8975.h [new file with mode: 0644]
include/linux/alarmtimer.h
include/linux/amba/mmci.h
include/linux/android_aid.h [new file with mode: 0644]
include/linux/cgroup.h
include/linux/cpu.h
include/linux/cpufreq.h
include/linux/gpio_event.h [new file with mode: 0644]
include/linux/hid.h
include/linux/if_pppolac.h [new file with mode: 0644]
include/linux/if_pppopns.h [new file with mode: 0644]
include/linux/if_pppox.h
include/linux/input.h
include/linux/ion.h [new file with mode: 0644]
include/linux/kernel.h
include/linux/keychord.h [new file with mode: 0644]
include/linux/keyreset.h [new file with mode: 0644]
include/linux/mm.h
include/linux/mmc/host.h
include/linux/mmc/pm.h
include/linux/mmc/sdio_func.h [changed mode: 0644->0755]
include/linux/msdos_fs.h
include/linux/netfilter/xt_qtaguid.h [new file with mode: 0644]
include/linux/netfilter/xt_quota2.h [new file with mode: 0644]
include/linux/netfilter/xt_socket.h
include/linux/persistent_ram.h [new file with mode: 0644]
include/linux/power_supply.h
include/linux/sched.h
include/linux/serial_core.h
include/linux/sockios.h
include/linux/switch.h [new file with mode: 0644]
include/linux/synaptics_i2c_rmi.h [new file with mode: 0644]
include/linux/uid_stat.h [new file with mode: 0644]
include/linux/usb/composite.h
include/linux/usb/f_accessory.h [new file with mode: 0644]
include/linux/usb/f_mtp.h [new file with mode: 0644]
include/linux/wakelock.h [new file with mode: 0755]
include/linux/wifi_tiwlan.h [new file with mode: 0644]
include/linux/wl127x-rfkill.h [new file with mode: 0644]
include/linux/wlan_plat.h [new file with mode: 0644]
include/net/activity_stats.h [new file with mode: 0644]
include/net/bluetooth/hci.h
include/net/bluetooth/hci_core.h
include/net/bluetooth/sco.h
include/net/tcp.h
init/Kconfig
kernel/cgroup.c
kernel/cpu.c
kernel/fork.c
kernel/irq/pm.c
kernel/panic.c
kernel/power/Kconfig
kernel/power/Makefile
kernel/power/main.c
kernel/power/power.h
kernel/power/process.c
kernel/power/suspend_time.c [new file with mode: 0644]
kernel/power/userwakelock.c [new file with mode: 0644]
kernel/power/wakelock.c [new file with mode: 0644]
kernel/printk.c
kernel/sched/core.c
kernel/sysctl.c
kernel/time/alarmtimer.c
lib/Kconfig.debug
mm/page_alloc.c
mm/shmem.c
net/Kconfig
net/Makefile
net/activity_stats.c [new file with mode: 0644]
net/bluetooth/af_bluetooth.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_event.c [changed mode: 0644->0755]
net/bluetooth/l2cap_core.c
net/bluetooth/mgmt.c
net/bluetooth/rfcomm/core.c
net/bluetooth/sco.c
net/bluetooth/smp.c
net/bridge/br_device.c
net/ipv4/Makefile
net/ipv4/af_inet.c
net/ipv4/devinet.c
net/ipv4/netfilter/Kconfig
net/ipv4/netfilter/ipt_REJECT.c
net/ipv4/sysfs_net_ipv4.c [new file with mode: 0644]
net/ipv4/tcp.c
net/ipv6/af_inet6.c
net/ipv6/netfilter/Kconfig
net/ipv6/netfilter/ip6_tables.c
net/ipv6/netfilter/ip6t_REJECT.c
net/netfilter/Kconfig
net/netfilter/Makefile
net/netfilter/xt_qtaguid.c [new file with mode: 0644]
net/netfilter/xt_qtaguid_internal.h [new file with mode: 0644]
net/netfilter/xt_qtaguid_print.c [new file with mode: 0644]
net/netfilter/xt_qtaguid_print.h [new file with mode: 0644]
net/netfilter/xt_quota2.c [new file with mode: 0644]
net/netfilter/xt_socket.c
net/rfkill/Kconfig
net/rfkill/core.c
net/wireless/Kconfig
net/wireless/scan.c
net/wireless/sme.c
security/commoncap.c

diff --git a/Documentation/android.txt b/Documentation/android.txt
new file mode 100644 (file)
index 0000000..72a62af
--- /dev/null
@@ -0,0 +1,121 @@
+                               =============
+                               A N D R O I D
+                               =============
+
+Copyright (C) 2009 Google, Inc.
+Written by Mike Chan <mike@android.com>
+
+CONTENTS:
+---------
+
+1. Android
+  1.1 Required enabled config options
+  1.2 Required disabled config options
+  1.3 Recommended enabled config options
+2. Contact
+
+
+1. Android
+==========
+
+Android (www.android.com) is an open source operating system for mobile devices.
+This document describes configurations needed to run the Android framework on
+top of the Linux kernel.
+
+To see a working defconfig look at msm_defconfig or goldfish_defconfig
+which can be found at http://android.git.kernel.org in kernel/common.git
+and kernel/msm.git
+
+
+1.1 Required enabled config options
+-----------------------------------
+After building a standard defconfig, ensure that these options are enabled in
+your .config or defconfig if they are not already. Based off the msm_defconfig.
+You should keep the rest of the default options enabled in the defconfig
+unless you know what you are doing.
+
+ANDROID_PARANOID_NETWORK
+ASHMEM
+CONFIG_FB_MODE_HELPERS
+CONFIG_FONT_8x16
+CONFIG_FONT_8x8
+CONFIG_YAFFS_SHORT_NAMES_IN_RAM
+DAB
+EARLYSUSPEND
+FB
+FB_CFB_COPYAREA
+FB_CFB_FILLRECT
+FB_CFB_IMAGEBLIT
+FB_DEFERRED_IO
+FB_TILEBLITTING
+HIGH_RES_TIMERS
+INOTIFY
+INOTIFY_USER
+INPUT_EVDEV
+INPUT_GPIO
+INPUT_MISC
+LEDS_CLASS
+LEDS_GPIO
+LOCK_KERNEL
+LkOGGER
+LOW_MEMORY_KILLER
+MISC_DEVICES
+NEW_LEDS
+NO_HZ
+POWER_SUPPLY
+PREEMPT
+RAMFS
+RTC_CLASS
+RTC_LIB
+SWITCH
+SWITCH_GPIO
+TMPFS
+UID_STAT
+UID16
+USB_FUNCTION
+USB_FUNCTION_ADB
+USER_WAKELOCK
+VIDEO_OUTPUT_CONTROL
+WAKELOCK
+YAFFS_AUTO_YAFFS2
+YAFFS_FS
+YAFFS_YAFFS1
+YAFFS_YAFFS2
+
+
+1.2 Required disabled config options
+------------------------------------
+CONFIG_YAFFS_DISABLE_LAZY_LOAD
+DNOTIFY
+
+
+1.3 Recommended enabled config options
+------------------------------
+ANDROID_PMEM
+ANDROID_RAM_CONSOLE
+ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+SCHEDSTATS
+DEBUG_PREEMPT
+DEBUG_MUTEXES
+DEBUG_SPINLOCK_SLEEP
+DEBUG_INFO
+FRAME_POINTER
+CPU_FREQ
+CPU_FREQ_TABLE
+CPU_FREQ_DEFAULT_GOV_ONDEMAND
+CPU_FREQ_GOV_ONDEMAND
+CRC_CCITT
+EMBEDDED
+INPUT_TOUCHSCREEN
+I2C
+I2C_BOARDINFO
+LOG_BUF_SHIFT=17
+SERIAL_CORE
+SERIAL_CORE_CONSOLE
+
+
+2. Contact
+==========
+website: http://android.git.kernel.org
+
+mailing-lists: android-kernel@googlegroups.com
index a7c96ae..5e2063d 100644 (file)
@@ -593,6 +593,15 @@ there are not tasks in the cgroup. If pre_destroy() returns error code,
 rmdir() will fail with it. From this behavior, pre_destroy() can be
 called multiple times against a cgroup.
 
+int allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+(cgroup_mutex held by caller)
+
+Called prior to moving a task into a cgroup; if the subsystem
+returns an error, this will abort the attach operation.  Used
+to extend the permission checks - if all subsystems in a cgroup
+return 0, the attach will be allowed to proceed, even if the
+default permission check (root or same user) fails.
+
 int can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
               struct cgroup_taskset *tset)
 (cgroup_mutex held by caller)
index c7a2eb8..d6ef94a 100644 (file)
@@ -28,6 +28,7 @@ Contents:
 2.3  Userspace
 2.4  Ondemand
 2.5  Conservative
+2.6  Interactive
 
 3.   The Governor Interface in the CPUfreq Core
 
@@ -191,6 +192,43 @@ governor but for the opposite direction.  For example when set to its
 default value of '20' it means that if the CPU usage needs to be below
 20% between samples to have the frequency decreased.
 
+
+2.6 Interactive
+---------------
+
+The CPUfreq governor "interactive" is designed for latency-sensitive,
+interactive workloads. This governor sets the CPU speed depending on
+usage, similar to "ondemand" and "conservative" governors.  However,
+the governor is more aggressive about scaling the CPU speed up in
+response to CPU-intensive activity.
+
+Sampling the CPU load every X ms can lead to under-powering the CPU
+for X ms, leading to dropped frames, stuttering UI, etc.  Instead of
+sampling the cpu at a specified rate, the interactive governor will
+check whether to scale the cpu frequency up soon after coming out of
+idle.  When the cpu comes out of idle, a timer is configured to fire
+within 1-2 ticks.  If the cpu is very busy between exiting idle and
+when the timer fires then we assume the cpu is underpowered and ramp
+to MAX speed.
+
+If the cpu was not sufficiently busy to immediately ramp to MAX speed,
+then governor evaluates the cpu load since the last speed adjustment,
+choosing the highest value between that longer-term load or the
+short-term load since idle exit to determine the cpu speed to ramp to.
+
+The tuneable values for this governor are:
+
+min_sample_time: The minimum amount of time to spend at the current
+frequency before ramping down. This is to ensure that the governor has
+seen enough historic cpu load data to determine the appropriate
+workload.  Default is 80000 uS.
+
+go_maxspeed_load: The CPU load at which to ramp to max speed.  Default
+is 85.
+
+timer_rate: Sample rate for reevaluating cpu load when the system is
+not idle.  Default is 30000 uS.
+
 3. The Governor Interface in the CPUfreq Core
 =============================================
 
index a48aecc..9723259 100644 (file)
@@ -1852,6 +1852,15 @@ config DEPRECATED_PARAM_STRUCT
          This was deprecated in 2001 and announced to live on for 5 years.
          Some old boot loaders still use this way.
 
+config ARM_FLUSH_CONSOLE_ON_RESTART
+       bool "Force flush the console on restart"
+       help
+         If the console is locked while the system is rebooted, the messages
+         in the temporary logbuffer would not have propogated to all the
+         console drivers. This option forces the console lock to be
+         released if it failed to be acquired, which will cause all the
+         pending messages to be flushed.
+
 endmenu
 
 menu "Boot options"
index e0d236d..c81df6c 100644 (file)
@@ -63,6 +63,27 @@ config DEBUG_USER
              8 - SIGSEGV faults
             16 - SIGBUS faults
 
+config DEBUG_RODATA
+       bool "Write protect kernel text section"
+       default n
+       depends on DEBUG_KERNEL && MMU
+       ---help---
+         Mark the kernel text section as write-protected in the pagetables,
+         in order to catch accidental (and incorrect) writes to such const
+         data. This will cause the size of the kernel, plus up to 4MB, to
+         be mapped as pages instead of sections, which will increase TLB
+         pressure.
+         If in doubt, say "N".
+
+config DEBUG_RODATA_TEST
+       bool "Testcase for the DEBUG_RODATA feature"
+       depends on DEBUG_RODATA
+       default n
+       ---help---
+         This option enables a testcase for the DEBUG_RODATA
+         feature.
+         If in doubt, say "N"
+
 # These options are only for real kernel hackers who want to get their hands dirty.
 config DEBUG_LL
        bool "Kernel low-level debugging functions (read help!)"
index c5d6025..38342c7 100644 (file)
@@ -766,6 +766,8 @@ proc_types:
 @              b       __arm6_mmu_cache_off
 @              b       __armv3_mmu_cache_flush
 
+#if !defined(CONFIG_CPU_V7)
+               /* This collides with some V7 IDs, preventing correct detection */
                .word   0x00000000              @ old ARM ID
                .word   0x0000f000
                mov     pc, lr
@@ -774,6 +776,7 @@ proc_types:
  THUMB(                nop                             )
                mov     pc, lr
  THUMB(                nop                             )
+#endif
 
                .word   0x41007000              @ ARM7/710
                .word   0xfff8fe00
index 81a933e..b98465a 100644 (file)
@@ -46,3 +46,53 @@ config SHARP_PARAM
 
 config SHARP_SCOOP
        bool
+
+config FIQ_GLUE
+       bool
+       select FIQ
+
+config FIQ_DEBUGGER
+       bool "FIQ Mode Serial Debugger"
+       select FIQ
+       select FIQ_GLUE
+       default n
+       help
+         The FIQ serial debugger can accept commands even when the
+         kernel is unresponsive due to being stuck with interrupts
+         disabled.
+
+
+config FIQ_DEBUGGER_NO_SLEEP
+       bool "Keep serial debugger active"
+       depends on FIQ_DEBUGGER
+       default n
+       help
+         Enables the serial debugger at boot. Passing
+         fiq_debugger.no_sleep on the kernel commandline will
+         override this config option.
+
+config FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON
+       bool "Don't disable wakeup IRQ when debugger is active"
+       depends on FIQ_DEBUGGER
+       default n
+       help
+         Don't disable the wakeup irq when enabling the uart clock.  This will
+         cause extra interrupts, but it makes the serial debugger usable with
+         on some MSM radio builds that ignore the uart clock request in power
+         collapse.
+
+config FIQ_DEBUGGER_CONSOLE
+       bool "Console on FIQ Serial Debugger port"
+       depends on FIQ_DEBUGGER
+       default n
+       help
+         Enables a console so that printk messages are displayed on
+         the debugger serial port as the occur.
+
+config FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE
+       bool "Put the FIQ debugger into console mode by default"
+       depends on FIQ_DEBUGGER_CONSOLE
+       default n
+       help
+         If enabled, this puts the fiq debugger into console mode by default.
+         Otherwise, the fiq debugger will start out in debug mode.
index 6ea9b6f..3ab5d76 100644 (file)
@@ -17,3 +17,5 @@ obj-$(CONFIG_ARCH_IXP2000)    += uengine.o
 obj-$(CONFIG_ARCH_IXP23XX)     += uengine.o
 obj-$(CONFIG_PCI_HOST_ITE8152)  += it8152.o
 obj-$(CONFIG_ARM_TIMER_SP804)  += timer-sp.o
+obj-$(CONFIG_FIQ_GLUE)         += fiq_glue.o fiq_glue_setup.o
+obj-$(CONFIG_FIQ_DEBUGGER)     += fiq_debugger.o
diff --git a/arch/arm/common/fiq_debugger.c b/arch/arm/common/fiq_debugger.c
new file mode 100644 (file)
index 0000000..3ed18ae
--- /dev/null
@@ -0,0 +1,1196 @@
+/*
+ * arch/arm/common/fiq_debugger.c
+ *
+ * Serial Debugger Interface accessed through an FIQ interrupt.
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <stdarg.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/console.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/kernel_stat.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/timer.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/wakelock.h>
+
+#include <asm/fiq_debugger.h>
+#include <asm/fiq_glue.h>
+#include <asm/stacktrace.h>
+
+#include <mach/system.h>
+
+#include <linux/uaccess.h>
+
+#include "fiq_debugger_ringbuf.h"
+
+#define DEBUG_MAX 64
+#define MAX_UNHANDLED_FIQ_COUNT 1000000
+
+#define THREAD_INFO(sp) ((struct thread_info *) \
+               ((unsigned long)(sp) & ~(THREAD_SIZE - 1)))
+
+struct fiq_debugger_state {
+       struct fiq_glue_handler handler;
+
+       int fiq;
+       int uart_irq;
+       int signal_irq;
+       int wakeup_irq;
+       bool wakeup_irq_no_set_wake;
+       struct clk *clk;
+       struct fiq_debugger_pdata *pdata;
+       struct platform_device *pdev;
+
+       char debug_cmd[DEBUG_MAX];
+       int debug_busy;
+       int debug_abort;
+
+       char debug_buf[DEBUG_MAX];
+       int debug_count;
+
+       bool no_sleep;
+       bool debug_enable;
+       bool ignore_next_wakeup_irq;
+       struct timer_list sleep_timer;
+       spinlock_t sleep_timer_lock;
+       bool uart_enabled;
+       struct wake_lock debugger_wake_lock;
+       bool console_enable;
+       int current_cpu;
+       atomic_t unhandled_fiq_count;
+       bool in_fiq;
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+       struct console console;
+       struct tty_driver *tty_driver;
+       struct tty_struct *tty;
+       int tty_open_count;
+       struct fiq_debugger_ringbuf *tty_rbuf;
+       bool syslog_dumping;
+#endif
+
+       unsigned int last_irqs[NR_IRQS];
+       unsigned int last_local_timer_irqs[NR_CPUS];
+};
+
+#ifdef CONFIG_FIQ_DEBUGGER_NO_SLEEP
+static bool initial_no_sleep = true;
+#else
+static bool initial_no_sleep;
+#endif
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE
+static bool initial_debug_enable = true;
+static bool initial_console_enable = true;
+#else
+static bool initial_debug_enable;
+static bool initial_console_enable;
+#endif
+
+module_param_named(no_sleep, initial_no_sleep, bool, 0644);
+module_param_named(debug_enable, initial_debug_enable, bool, 0644);
+module_param_named(console_enable, initial_console_enable, bool, 0644);
+
+#ifdef CONFIG_FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON
+static inline void enable_wakeup_irq(struct fiq_debugger_state *state) {}
+static inline void disable_wakeup_irq(struct fiq_debugger_state *state) {}
+#else
+static inline void enable_wakeup_irq(struct fiq_debugger_state *state)
+{
+       if (state->wakeup_irq < 0)
+               return;
+       enable_irq(state->wakeup_irq);
+       if (!state->wakeup_irq_no_set_wake)
+               enable_irq_wake(state->wakeup_irq);
+}
+static inline void disable_wakeup_irq(struct fiq_debugger_state *state)
+{
+       if (state->wakeup_irq < 0)
+               return;
+       disable_irq_nosync(state->wakeup_irq);
+       if (!state->wakeup_irq_no_set_wake)
+               disable_irq_wake(state->wakeup_irq);
+}
+#endif
+
+static bool inline debug_have_fiq(struct fiq_debugger_state *state)
+{
+       return (state->fiq >= 0);
+}
+
+static void debug_force_irq(struct fiq_debugger_state *state)
+{
+       unsigned int irq = state->signal_irq;
+
+       if (WARN_ON(!debug_have_fiq(state)))
+               return;
+       if (state->pdata->force_irq) {
+               state->pdata->force_irq(state->pdev, irq);
+       } else {
+               struct irq_chip *chip = irq_get_chip(irq);
+               if (chip && chip->irq_retrigger)
+                       chip->irq_retrigger(irq_get_irq_data(irq));
+       }
+}
+
+static void debug_uart_enable(struct fiq_debugger_state *state)
+{
+       if (state->clk)
+               clk_enable(state->clk);
+       if (state->pdata->uart_enable)
+               state->pdata->uart_enable(state->pdev);
+}
+
+static void debug_uart_disable(struct fiq_debugger_state *state)
+{
+       if (state->pdata->uart_disable)
+               state->pdata->uart_disable(state->pdev);
+       if (state->clk)
+               clk_disable(state->clk);
+}
+
+static void debug_uart_flush(struct fiq_debugger_state *state)
+{
+       if (state->pdata->uart_flush)
+               state->pdata->uart_flush(state->pdev);
+}
+
+static void debug_puts(struct fiq_debugger_state *state, char *s)
+{
+       unsigned c;
+       while ((c = *s++)) {
+               if (c == '\n')
+                       state->pdata->uart_putc(state->pdev, '\r');
+               state->pdata->uart_putc(state->pdev, c);
+       }
+}
+
+static void debug_prompt(struct fiq_debugger_state *state)
+{
+       debug_puts(state, "debug> ");
+}
+
+int log_buf_copy(char *dest, int idx, int len);
+static void dump_kernel_log(struct fiq_debugger_state *state)
+{
+       char buf[1024];
+       int idx = 0;
+       int ret;
+       int saved_oip;
+
+       /* setting oops_in_progress prevents log_buf_copy()
+        * from trying to take a spinlock which will make it
+        * very unhappy in some cases...
+        */
+       saved_oip = oops_in_progress;
+       oops_in_progress = 1;
+       for (;;) {
+               ret = log_buf_copy(buf, idx, 1023);
+               if (ret <= 0)
+                       break;
+               buf[ret] = 0;
+               debug_puts(state, buf);
+               idx += ret;
+       }
+       oops_in_progress = saved_oip;
+}
+
+static char *mode_name(unsigned cpsr)
+{
+       switch (cpsr & MODE_MASK) {
+       case USR_MODE: return "USR";
+       case FIQ_MODE: return "FIQ";
+       case IRQ_MODE: return "IRQ";
+       case SVC_MODE: return "SVC";
+       case ABT_MODE: return "ABT";
+       case UND_MODE: return "UND";
+       case SYSTEM_MODE: return "SYS";
+       default: return "???";
+       }
+}
+
+static int debug_printf(void *cookie, const char *fmt, ...)
+{
+       struct fiq_debugger_state *state = cookie;
+       char buf[256];
+       va_list ap;
+
+       va_start(ap, fmt);
+       vsnprintf(buf, sizeof(buf), fmt, ap);
+       va_end(ap);
+
+       debug_puts(state, buf);
+       return state->debug_abort;
+}
+
+/* Safe outside fiq context */
+static int debug_printf_nfiq(void *cookie, const char *fmt, ...)
+{
+       struct fiq_debugger_state *state = cookie;
+       char buf[256];
+       va_list ap;
+       unsigned long irq_flags;
+
+       va_start(ap, fmt);
+       vsnprintf(buf, 128, fmt, ap);
+       va_end(ap);
+
+       local_irq_save(irq_flags);
+       debug_puts(state, buf);
+       debug_uart_flush(state);
+       local_irq_restore(irq_flags);
+       return state->debug_abort;
+}
+
+static void dump_regs(struct fiq_debugger_state *state, unsigned *regs)
+{
+       debug_printf(state, " r0 %08x  r1 %08x  r2 %08x  r3 %08x\n",
+                       regs[0], regs[1], regs[2], regs[3]);
+       debug_printf(state, " r4 %08x  r5 %08x  r6 %08x  r7 %08x\n",
+                       regs[4], regs[5], regs[6], regs[7]);
+       debug_printf(state, " r8 %08x  r9 %08x r10 %08x r11 %08x  mode %s\n",
+                       regs[8], regs[9], regs[10], regs[11],
+                       mode_name(regs[16]));
+       if ((regs[16] & MODE_MASK) == USR_MODE)
+               debug_printf(state, " ip %08x  sp %08x  lr %08x  pc %08x  "
+                               "cpsr %08x\n", regs[12], regs[13], regs[14],
+                               regs[15], regs[16]);
+       else
+               debug_printf(state, " ip %08x  sp %08x  lr %08x  pc %08x  "
+                               "cpsr %08x  spsr %08x\n", regs[12], regs[13],
+                               regs[14], regs[15], regs[16], regs[17]);
+}
+
+struct mode_regs {
+       unsigned long sp_svc;
+       unsigned long lr_svc;
+       unsigned long spsr_svc;
+
+       unsigned long sp_abt;
+       unsigned long lr_abt;
+       unsigned long spsr_abt;
+
+       unsigned long sp_und;
+       unsigned long lr_und;
+       unsigned long spsr_und;
+
+       unsigned long sp_irq;
+       unsigned long lr_irq;
+       unsigned long spsr_irq;
+
+       unsigned long r8_fiq;
+       unsigned long r9_fiq;
+       unsigned long r10_fiq;
+       unsigned long r11_fiq;
+       unsigned long r12_fiq;
+       unsigned long sp_fiq;
+       unsigned long lr_fiq;
+       unsigned long spsr_fiq;
+};
+
+void __naked get_mode_regs(struct mode_regs *regs)
+{
+       asm volatile (
+       "mrs    r1, cpsr\n"
+       "msr    cpsr_c, #0xd3 @(SVC_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+       "stmia  r0!, {r13 - r14}\n"
+       "mrs    r2, spsr\n"
+       "msr    cpsr_c, #0xd7 @(ABT_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+       "stmia  r0!, {r2, r13 - r14}\n"
+       "mrs    r2, spsr\n"
+       "msr    cpsr_c, #0xdb @(UND_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+       "stmia  r0!, {r2, r13 - r14}\n"
+       "mrs    r2, spsr\n"
+       "msr    cpsr_c, #0xd2 @(IRQ_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+       "stmia  r0!, {r2, r13 - r14}\n"
+       "mrs    r2, spsr\n"
+       "msr    cpsr_c, #0xd1 @(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+       "stmia  r0!, {r2, r8 - r14}\n"
+       "mrs    r2, spsr\n"
+       "stmia  r0!, {r2}\n"
+       "msr    cpsr_c, r1\n"
+       "bx     lr\n");
+}
+
+
+static void dump_allregs(struct fiq_debugger_state *state, unsigned *regs)
+{
+       struct mode_regs mode_regs;
+       dump_regs(state, regs);
+       get_mode_regs(&mode_regs);
+       debug_printf(state, " svc: sp %08x  lr %08x  spsr %08x\n",
+                       mode_regs.sp_svc, mode_regs.lr_svc, mode_regs.spsr_svc);
+       debug_printf(state, " abt: sp %08x  lr %08x  spsr %08x\n",
+                       mode_regs.sp_abt, mode_regs.lr_abt, mode_regs.spsr_abt);
+       debug_printf(state, " und: sp %08x  lr %08x  spsr %08x\n",
+                       mode_regs.sp_und, mode_regs.lr_und, mode_regs.spsr_und);
+       debug_printf(state, " irq: sp %08x  lr %08x  spsr %08x\n",
+                       mode_regs.sp_irq, mode_regs.lr_irq, mode_regs.spsr_irq);
+       debug_printf(state, " fiq: r8 %08x  r9 %08x  r10 %08x  r11 %08x  "
+                       "r12 %08x\n",
+                       mode_regs.r8_fiq, mode_regs.r9_fiq, mode_regs.r10_fiq,
+                       mode_regs.r11_fiq, mode_regs.r12_fiq);
+       debug_printf(state, " fiq: sp %08x  lr %08x  spsr %08x\n",
+                       mode_regs.sp_fiq, mode_regs.lr_fiq, mode_regs.spsr_fiq);
+}
+
+static void dump_irqs(struct fiq_debugger_state *state)
+{
+       int n;
+       unsigned int cpu;
+
+       debug_printf(state, "irqnr       total  since-last   status  name\n");
+       for (n = 0; n < NR_IRQS; n++) {
+               struct irqaction *act = irq_desc[n].action;
+               if (!act && !kstat_irqs(n))
+                       continue;
+               debug_printf(state, "%5d: %10u %11u %8x  %s\n", n,
+                       kstat_irqs(n),
+                       kstat_irqs(n) - state->last_irqs[n],
+                       irq_desc[n].status_use_accessors,
+                       (act && act->name) ? act->name : "???");
+               state->last_irqs[n] = kstat_irqs(n);
+       }
+
+       for (cpu = 0; cpu < NR_CPUS; cpu++) {
+
+               debug_printf(state, "LOC %d: %10u %11u\n", cpu,
+                            __IRQ_STAT(cpu, local_timer_irqs),
+                            __IRQ_STAT(cpu, local_timer_irqs) -
+                            state->last_local_timer_irqs[cpu]);
+               state->last_local_timer_irqs[cpu] =
+                       __IRQ_STAT(cpu, local_timer_irqs);
+       }
+}
+
+struct stacktrace_state {
+       struct fiq_debugger_state *state;
+       unsigned int depth;
+};
+
+static int report_trace(struct stackframe *frame, void *d)
+{
+       struct stacktrace_state *sts = d;
+
+       if (sts->depth) {
+               debug_printf(sts->state,
+                       "  pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n",
+                       frame->pc, frame->pc, frame->lr, frame->lr,
+                       frame->sp, frame->fp);
+               sts->depth--;
+               return 0;
+       }
+       debug_printf(sts->state, "  ...\n");
+
+       return sts->depth == 0;
+}
+
+struct frame_tail {
+       struct frame_tail *fp;
+       unsigned long sp;
+       unsigned long lr;
+} __attribute__((packed));
+
+static struct frame_tail *user_backtrace(struct fiq_debugger_state *state,
+                                       struct frame_tail *tail)
+{
+       struct frame_tail buftail[2];
+
+       /* Also check accessibility of one struct frame_tail beyond */
+       if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) {
+               debug_printf(state, "  invalid frame pointer %p\n", tail);
+               return NULL;
+       }
+       if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail))) {
+               debug_printf(state,
+                       "  failed to copy frame pointer %p\n", tail);
+               return NULL;
+       }
+
+       debug_printf(state, "  %p\n", buftail[0].lr);
+
+       /* frame pointers should strictly progress back up the stack
+        * (towards higher addresses) */
+       if (tail >= buftail[0].fp)
+               return NULL;
+
+       return buftail[0].fp-1;
+}
+
+void dump_stacktrace(struct fiq_debugger_state *state,
+               struct pt_regs * const regs, unsigned int depth, void *ssp)
+{
+       struct frame_tail *tail;
+       struct thread_info *real_thread_info = THREAD_INFO(ssp);
+       struct stacktrace_state sts;
+
+       sts.depth = depth;
+       sts.state = state;
+       *current_thread_info() = *real_thread_info;
+
+       if (!current)
+               debug_printf(state, "current NULL\n");
+       else
+               debug_printf(state, "pid: %d  comm: %s\n",
+                       current->pid, current->comm);
+       dump_regs(state, (unsigned *)regs);
+
+       if (!user_mode(regs)) {
+               struct stackframe frame;
+               frame.fp = regs->ARM_fp;
+               frame.sp = regs->ARM_sp;
+               frame.lr = regs->ARM_lr;
+               frame.pc = regs->ARM_pc;
+               debug_printf(state,
+                       "  pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n",
+                       regs->ARM_pc, regs->ARM_pc, regs->ARM_lr, regs->ARM_lr,
+                       regs->ARM_sp, regs->ARM_fp);
+               walk_stackframe(&frame, report_trace, &sts);
+               return;
+       }
+
+       tail = ((struct frame_tail *) regs->ARM_fp) - 1;
+       while (depth-- && tail && !((unsigned long) tail & 3))
+               tail = user_backtrace(state, tail);
+}
+
+static void do_ps(struct fiq_debugger_state *state)
+{
+       struct task_struct *g;
+       struct task_struct *p;
+       unsigned task_state;
+       static const char stat_nam[] = "RSDTtZX";
+
+       debug_printf(state, "pid   ppid  prio task            pc\n");
+       read_lock(&tasklist_lock);
+       do_each_thread(g, p) {
+               task_state = p->state ? __ffs(p->state) + 1 : 0;
+               debug_printf(state,
+                            "%5d %5d %4d ", p->pid, p->parent->pid, p->prio);
+               debug_printf(state, "%-13.13s %c", p->comm,
+                            task_state >= sizeof(stat_nam) ? '?' : stat_nam[task_state]);
+               if (task_state == TASK_RUNNING)
+                       debug_printf(state, " running\n");
+               else
+                       debug_printf(state, " %08lx\n", thread_saved_pc(p));
+       } while_each_thread(g, p);
+       read_unlock(&tasklist_lock);
+}
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+static void begin_syslog_dump(struct fiq_debugger_state *state)
+{
+       state->syslog_dumping = true;
+}
+
+static void end_syslog_dump(struct fiq_debugger_state *state)
+{
+       state->syslog_dumping = false;
+}
+#else
+extern int do_syslog(int type, char __user *bug, int count);
+static void begin_syslog_dump(struct fiq_debugger_state *state)
+{
+       do_syslog(5 /* clear */, NULL, 0);
+}
+
+static void end_syslog_dump(struct fiq_debugger_state *state)
+{
+       char buf[128];
+       int ret;
+       int idx = 0;
+
+       while (1) {
+               ret = log_buf_copy(buf, idx, sizeof(buf) - 1);
+               if (ret <= 0)
+                       break;
+               buf[ret] = 0;
+               debug_printf(state, "%s", buf);
+               idx += ret;
+       }
+}
+#endif
+
+static void do_sysrq(struct fiq_debugger_state *state, char rq)
+{
+       begin_syslog_dump(state);
+       handle_sysrq(rq);
+       end_syslog_dump(state);
+}
+
+/* This function CANNOT be called in FIQ context */
+static void debug_irq_exec(struct fiq_debugger_state *state, char *cmd)
+{
+       if (!strcmp(cmd, "ps"))
+               do_ps(state);
+       if (!strcmp(cmd, "sysrq"))
+               do_sysrq(state, 'h');
+       if (!strncmp(cmd, "sysrq ", 6))
+               do_sysrq(state, cmd[6]);
+}
+
+static void debug_help(struct fiq_debugger_state *state)
+{
+       debug_printf(state,     "FIQ Debugger commands:\n"
+                               " pc            PC status\n"
+                               " regs          Register dump\n"
+                               " allregs       Extended Register dump\n"
+                               " bt            Stack trace\n"
+                               " reboot        Reboot\n"
+                               " irqs          Interupt status\n"
+                               " kmsg          Kernel log\n"
+                               " version       Kernel version\n");
+       debug_printf(state,     " sleep         Allow sleep while in FIQ\n"
+                               " nosleep       Disable sleep while in FIQ\n"
+                               " console       Switch terminal to console\n"
+                               " cpu           Current CPU\n"
+                               " cpu <number>  Switch to CPU<number>\n");
+       debug_printf(state,     " ps            Process list\n"
+                               " sysrq         sysrq options\n"
+                               " sysrq <param> Execute sysrq with <param>\n");
+}
+
+static void take_affinity(void *info)
+{
+       struct fiq_debugger_state *state = info;
+       struct cpumask cpumask;
+
+       cpumask_clear(&cpumask);
+       cpumask_set_cpu(get_cpu(), &cpumask);
+
+       irq_set_affinity(state->uart_irq, &cpumask);
+}
+
+static void switch_cpu(struct fiq_debugger_state *state, int cpu)
+{
+       if (!debug_have_fiq(state))
+               smp_call_function_single(cpu, take_affinity, state, false);
+       state->current_cpu = cpu;
+}
+
+static bool debug_fiq_exec(struct fiq_debugger_state *state,
+                       const char *cmd, unsigned *regs, void *svc_sp)
+{
+       bool signal_helper = false;
+
+       if (!strcmp(cmd, "help") || !strcmp(cmd, "?")) {
+               debug_help(state);
+       } else if (!strcmp(cmd, "pc")) {
+               debug_printf(state, " pc %08x cpsr %08x mode %s\n",
+                       regs[15], regs[16], mode_name(regs[16]));
+       } else if (!strcmp(cmd, "regs")) {
+               dump_regs(state, regs);
+       } else if (!strcmp(cmd, "allregs")) {
+               dump_allregs(state, regs);
+       } else if (!strcmp(cmd, "bt")) {
+               dump_stacktrace(state, (struct pt_regs *)regs, 100, svc_sp);
+       } else if (!strcmp(cmd, "reboot")) {
+               arch_reset(0, 0);
+       } else if (!strcmp(cmd, "irqs")) {
+               dump_irqs(state);
+       } else if (!strcmp(cmd, "kmsg")) {
+               dump_kernel_log(state);
+       } else if (!strcmp(cmd, "version")) {
+               debug_printf(state, "%s\n", linux_banner);
+       } else if (!strcmp(cmd, "sleep")) {
+               state->no_sleep = false;
+               debug_printf(state, "enabling sleep\n");
+       } else if (!strcmp(cmd, "nosleep")) {
+               state->no_sleep = true;
+               debug_printf(state, "disabling sleep\n");
+       } else if (!strcmp(cmd, "console")) {
+               state->console_enable = true;
+               debug_printf(state, "console mode\n");
+       } else if (!strcmp(cmd, "cpu")) {
+               debug_printf(state, "cpu %d\n", state->current_cpu);
+       } else if (!strncmp(cmd, "cpu ", 4)) {
+               unsigned long cpu = 0;
+               if (strict_strtoul(cmd + 4, 10, &cpu) == 0)
+                       switch_cpu(state, cpu);
+               else
+                       debug_printf(state, "invalid cpu\n");
+               debug_printf(state, "cpu %d\n", state->current_cpu);
+       } else {
+               if (state->debug_busy) {
+                       debug_printf(state,
+                               "command processor busy. trying to abort.\n");
+                       state->debug_abort = -1;
+               } else {
+                       strcpy(state->debug_cmd, cmd);
+                       state->debug_busy = 1;
+               }
+
+               return true;
+       }
+       if (!state->console_enable)
+               debug_prompt(state);
+
+       return signal_helper;
+}
+
+static void sleep_timer_expired(unsigned long data)
+{
+       struct fiq_debugger_state *state = (struct fiq_debugger_state *)data;
+       unsigned long flags;
+
+       spin_lock_irqsave(&state->sleep_timer_lock, flags);
+       if (state->uart_enabled && !state->no_sleep) {
+               if (state->debug_enable && !state->console_enable) {
+                       state->debug_enable = false;
+                       debug_printf_nfiq(state, "suspending fiq debugger\n");
+               }
+               state->ignore_next_wakeup_irq = true;
+               debug_uart_disable(state);
+               state->uart_enabled = false;
+               enable_wakeup_irq(state);
+       }
+       wake_unlock(&state->debugger_wake_lock);
+       spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
+}
+
+static void handle_wakeup(struct fiq_debugger_state *state)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&state->sleep_timer_lock, flags);
+       if (state->wakeup_irq >= 0 && state->ignore_next_wakeup_irq) {
+               state->ignore_next_wakeup_irq = false;
+       } else if (!state->uart_enabled) {
+               wake_lock(&state->debugger_wake_lock);
+               debug_uart_enable(state);
+               state->uart_enabled = true;
+               disable_wakeup_irq(state);
+               mod_timer(&state->sleep_timer, jiffies + HZ / 2);
+       }
+       spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
+}
+
+static irqreturn_t wakeup_irq_handler(int irq, void *dev)
+{
+       struct fiq_debugger_state *state = dev;
+
+       if (!state->no_sleep)
+               debug_puts(state, "WAKEUP\n");
+       handle_wakeup(state);
+
+       return IRQ_HANDLED;
+}
+
+
+static void debug_handle_irq_context(struct fiq_debugger_state *state)
+{
+       if (!state->no_sleep) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&state->sleep_timer_lock, flags);
+               wake_lock(&state->debugger_wake_lock);
+               mod_timer(&state->sleep_timer, jiffies + HZ * 5);
+               spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
+       }
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+       if (state->tty) {
+               int i;
+               int count = fiq_debugger_ringbuf_level(state->tty_rbuf);
+               for (i = 0; i < count; i++) {
+                       int c = fiq_debugger_ringbuf_peek(state->tty_rbuf, 0);
+                       tty_insert_flip_char(state->tty, c, TTY_NORMAL);
+                       if (!fiq_debugger_ringbuf_consume(state->tty_rbuf, 1))
+                               pr_warn("fiq tty failed to consume byte\n");
+               }
+               tty_flip_buffer_push(state->tty);
+       }
+#endif
+       if (state->debug_busy) {
+               debug_irq_exec(state, state->debug_cmd);
+               debug_prompt(state);
+               state->debug_busy = 0;
+       }
+}
+
+static int debug_getc(struct fiq_debugger_state *state)
+{
+       return state->pdata->uart_getc(state->pdev);
+}
+
+static bool debug_handle_uart_interrupt(struct fiq_debugger_state *state,
+                       int this_cpu, void *regs, void *svc_sp)
+{
+       int c;
+       static int last_c;
+       int count = 0;
+       bool signal_helper = false;
+
+       if (this_cpu != state->current_cpu) {
+               if (state->in_fiq)
+                       return false;
+
+               if (atomic_inc_return(&state->unhandled_fiq_count) !=
+                                       MAX_UNHANDLED_FIQ_COUNT)
+                       return false;
+
+               debug_printf(state, "fiq_debugger: cpu %d not responding, "
+                       "reverting to cpu %d\n", state->current_cpu,
+                       this_cpu);
+
+               atomic_set(&state->unhandled_fiq_count, 0);
+               switch_cpu(state, this_cpu);
+               return false;
+       }
+
+       state->in_fiq = true;
+
+       while ((c = debug_getc(state)) != FIQ_DEBUGGER_NO_CHAR) {
+               count++;
+               if (!state->debug_enable) {
+                       if ((c == 13) || (c == 10)) {
+                               state->debug_enable = true;
+                               state->debug_count = 0;
+                               debug_prompt(state);
+                       }
+               } else if (c == FIQ_DEBUGGER_BREAK) {
+                       state->console_enable = false;
+                       debug_puts(state, "fiq debugger mode\n");
+                       state->debug_count = 0;
+                       debug_prompt(state);
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+               } else if (state->console_enable && state->tty_rbuf) {
+                       fiq_debugger_ringbuf_push(state->tty_rbuf, c);
+                       signal_helper = true;
+#endif
+               } else if ((c >= ' ') && (c < 127)) {
+                       if (state->debug_count < (DEBUG_MAX - 1)) {
+                               state->debug_buf[state->debug_count++] = c;
+                               state->pdata->uart_putc(state->pdev, c);
+                       }
+               } else if ((c == 8) || (c == 127)) {
+                       if (state->debug_count > 0) {
+                               state->debug_count--;
+                               state->pdata->uart_putc(state->pdev, 8);
+                               state->pdata->uart_putc(state->pdev, ' ');
+                               state->pdata->uart_putc(state->pdev, 8);
+                       }
+               } else if ((c == 13) || (c == 10)) {
+                       if (c == '\r' || (c == '\n' && last_c != '\r')) {
+                               state->pdata->uart_putc(state->pdev, '\r');
+                               state->pdata->uart_putc(state->pdev, '\n');
+                       }
+                       if (state->debug_count) {
+                               state->debug_buf[state->debug_count] = 0;
+                               state->debug_count = 0;
+                               signal_helper |=
+                                       debug_fiq_exec(state, state->debug_buf,
+                                                      regs, svc_sp);
+                       } else {
+                               debug_prompt(state);
+                       }
+               }
+               last_c = c;
+       }
+       debug_uart_flush(state);
+       if (state->pdata->fiq_ack)
+               state->pdata->fiq_ack(state->pdev, state->fiq);
+
+       /* poke sleep timer if necessary */
+       if (state->debug_enable && !state->no_sleep)
+               signal_helper = true;
+
+       atomic_set(&state->unhandled_fiq_count, 0);
+       state->in_fiq = false;
+
+       return signal_helper;
+}
+
+static void debug_fiq(struct fiq_glue_handler *h, void *regs, void *svc_sp)
+{
+       struct fiq_debugger_state *state =
+               container_of(h, struct fiq_debugger_state, handler);
+       unsigned int this_cpu = THREAD_INFO(svc_sp)->cpu;
+       bool need_irq;
+
+       need_irq = debug_handle_uart_interrupt(state, this_cpu, regs, svc_sp);
+       if (need_irq)
+               debug_force_irq(state);
+}
+
+/*
+ * When not using FIQs, we only use this single interrupt as an entry point.
+ * This just effectively takes over the UART interrupt and does all the work
+ * in this context.
+ */
+static irqreturn_t debug_uart_irq(int irq, void *dev)
+{
+       struct fiq_debugger_state *state = dev;
+       bool not_done;
+
+       handle_wakeup(state);
+
+       /* handle the debugger irq in regular context */
+       not_done = debug_handle_uart_interrupt(state, smp_processor_id(),
+                                             get_irq_regs(),
+                                             current_thread_info());
+       if (not_done)
+               debug_handle_irq_context(state);
+
+       return IRQ_HANDLED;
+}
+
+/*
+ * If FIQs are used, not everything can happen in fiq context.
+ * FIQ handler does what it can and then signals this interrupt to finish the
+ * job in irq context.
+ */
+static irqreturn_t debug_signal_irq(int irq, void *dev)
+{
+       struct fiq_debugger_state *state = dev;
+
+       if (state->pdata->force_irq_ack)
+               state->pdata->force_irq_ack(state->pdev, state->signal_irq);
+
+       debug_handle_irq_context(state);
+
+       return IRQ_HANDLED;
+}
+
+static void debug_resume(struct fiq_glue_handler *h)
+{
+       struct fiq_debugger_state *state =
+               container_of(h, struct fiq_debugger_state, handler);
+       if (state->pdata->uart_resume)
+               state->pdata->uart_resume(state->pdev);
+}
+
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+struct tty_driver *debug_console_device(struct console *co, int *index)
+{
+       struct fiq_debugger_state *state;
+       state = container_of(co, struct fiq_debugger_state, console);
+       *index = 0;
+       return state->tty_driver;
+}
+
+static void debug_console_write(struct console *co,
+                               const char *s, unsigned int count)
+{
+       struct fiq_debugger_state *state;
+
+       state = container_of(co, struct fiq_debugger_state, console);
+
+       if (!state->console_enable && !state->syslog_dumping)
+               return;
+
+       debug_uart_enable(state);
+       while (count--) {
+               if (*s == '\n')
+                       state->pdata->uart_putc(state->pdev, '\r');
+               state->pdata->uart_putc(state->pdev, *s++);
+       }
+       debug_uart_flush(state);
+       debug_uart_disable(state);
+}
+
+static struct console fiq_debugger_console = {
+       .name = "ttyFIQ",
+       .device = debug_console_device,
+       .write = debug_console_write,
+       .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_ENABLED,
+};
+
+int fiq_tty_open(struct tty_struct *tty, struct file *filp)
+{
+       struct fiq_debugger_state *state = tty->driver->driver_state;
+       if (state->tty_open_count++)
+               return 0;
+
+       tty->driver_data = state;
+       state->tty = tty;
+       return 0;
+}
+
+void fiq_tty_close(struct tty_struct *tty, struct file *filp)
+{
+       struct fiq_debugger_state *state = tty->driver_data;
+       if (--state->tty_open_count)
+               return;
+       state->tty = NULL;
+}
+
+int  fiq_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+       int i;
+       struct fiq_debugger_state *state = tty->driver_data;
+
+       if (!state->console_enable)
+               return count;
+
+       debug_uart_enable(state);
+       for (i = 0; i < count; i++)
+               state->pdata->uart_putc(state->pdev, *buf++);
+       debug_uart_disable(state);
+
+       return count;
+}
+
+int  fiq_tty_write_room(struct tty_struct *tty)
+{
+       return 1024;
+}
+
+static const struct tty_operations fiq_tty_driver_ops = {
+       .write = fiq_tty_write,
+       .write_room = fiq_tty_write_room,
+       .open = fiq_tty_open,
+       .close = fiq_tty_close,
+};
+
+static int fiq_debugger_tty_init(struct fiq_debugger_state *state)
+{
+       int ret = -EINVAL;
+
+       state->tty_driver = alloc_tty_driver(1);
+       if (!state->tty_driver) {
+               pr_err("Failed to allocate fiq debugger tty\n");
+               return -ENOMEM;
+       }
+
+       state->tty_driver->owner                = THIS_MODULE;
+       state->tty_driver->driver_name  = "fiq-debugger";
+       state->tty_driver->name         = "ttyFIQ";
+       state->tty_driver->type         = TTY_DRIVER_TYPE_SERIAL;
+       state->tty_driver->subtype      = SERIAL_TYPE_NORMAL;
+       state->tty_driver->init_termios = tty_std_termios;
+       state->tty_driver->init_termios.c_cflag =
+                                       B115200 | CS8 | CREAD | HUPCL | CLOCAL;
+       state->tty_driver->init_termios.c_ispeed =
+               state->tty_driver->init_termios.c_ospeed = 115200;
+       state->tty_driver->flags                = TTY_DRIVER_REAL_RAW;
+       tty_set_operations(state->tty_driver, &fiq_tty_driver_ops);
+       state->tty_driver->driver_state = state;
+
+       ret = tty_register_driver(state->tty_driver);
+       if (ret) {
+               pr_err("Failed to register fiq tty: %d\n", ret);
+               goto err;
+       }
+
+       state->tty_rbuf = fiq_debugger_ringbuf_alloc(1024);
+       if (!state->tty_rbuf) {
+               pr_err("Failed to allocate fiq debugger ringbuf\n");
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       pr_info("Registered FIQ tty driver %p\n", state->tty_driver);
+       return 0;
+
+err:
+       fiq_debugger_ringbuf_free(state->tty_rbuf);
+       state->tty_rbuf = NULL;
+       put_tty_driver(state->tty_driver);
+       return ret;
+}
+#endif
+
+static int fiq_debugger_dev_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct fiq_debugger_state *state = platform_get_drvdata(pdev);
+
+       if (state->pdata->uart_dev_suspend)
+               return state->pdata->uart_dev_suspend(pdev);
+       return 0;
+}
+
+static int fiq_debugger_dev_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct fiq_debugger_state *state = platform_get_drvdata(pdev);
+
+       if (state->pdata->uart_dev_resume)
+               return state->pdata->uart_dev_resume(pdev);
+       return 0;
+}
+
+static int fiq_debugger_probe(struct platform_device *pdev)
+{
+       int ret;
+       struct fiq_debugger_pdata *pdata = dev_get_platdata(&pdev->dev);
+       struct fiq_debugger_state *state;
+       int fiq;
+       int uart_irq;
+
+       if (!pdata->uart_getc || !pdata->uart_putc)
+               return -EINVAL;
+       if ((pdata->uart_enable && !pdata->uart_disable) ||
+           (!pdata->uart_enable && pdata->uart_disable))
+               return -EINVAL;
+
+       fiq = platform_get_irq_byname(pdev, "fiq");
+       uart_irq = platform_get_irq_byname(pdev, "uart_irq");
+
+       /* uart_irq mode and fiq mode are mutually exclusive, but one of them
+        * is required */
+       if ((uart_irq < 0 && fiq < 0) || (uart_irq >= 0 && fiq >= 0))
+               return -EINVAL;
+       if (fiq >= 0 && !pdata->fiq_enable)
+               return -EINVAL;
+
+       state = kzalloc(sizeof(*state), GFP_KERNEL);
+       setup_timer(&state->sleep_timer, sleep_timer_expired,
+                   (unsigned long)state);
+       state->pdata = pdata;
+       state->pdev = pdev;
+       state->no_sleep = initial_no_sleep;
+       state->debug_enable = initial_debug_enable;
+       state->console_enable = initial_console_enable;
+
+       state->fiq = fiq;
+       state->uart_irq = uart_irq;
+       state->signal_irq = platform_get_irq_byname(pdev, "signal");
+       state->wakeup_irq = platform_get_irq_byname(pdev, "wakeup");
+
+       platform_set_drvdata(pdev, state);
+
+       spin_lock_init(&state->sleep_timer_lock);
+
+       if (state->wakeup_irq < 0 && debug_have_fiq(state))
+               state->no_sleep = true;
+       state->ignore_next_wakeup_irq = !state->no_sleep;
+
+       wake_lock_init(&state->debugger_wake_lock,
+                       WAKE_LOCK_SUSPEND, "serial-debug");
+
+       state->clk = clk_get(&pdev->dev, NULL);
+       if (IS_ERR(state->clk))
+               state->clk = NULL;
+
+       /* do not call pdata->uart_enable here since uart_init may still
+        * need to do some initialization before uart_enable can work.
+        * So, only try to manage the clock during init.
+        */
+       if (state->clk)
+               clk_enable(state->clk);
+
+       if (pdata->uart_init) {
+               ret = pdata->uart_init(pdev);
+               if (ret)
+                       goto err_uart_init;
+       }
+
+       debug_printf_nfiq(state, "<hit enter %sto activate fiq debugger>\n",
+                               state->no_sleep ? "" : "twice ");
+
+       if (debug_have_fiq(state)) {
+               state->handler.fiq = debug_fiq;
+               state->handler.resume = debug_resume;
+               ret = fiq_glue_register_handler(&state->handler);
+               if (ret) {
+                       pr_err("%s: could not install fiq handler\n", __func__);
+                       goto err_register_fiq;
+               }
+
+               pdata->fiq_enable(pdev, state->fiq, 1);
+       } else {
+               ret = request_irq(state->uart_irq, debug_uart_irq,
+                                 IRQF_NO_SUSPEND, "debug", state);
+               if (ret) {
+                       pr_err("%s: could not install irq handler\n", __func__);
+                       goto err_register_irq;
+               }
+
+               /* for irq-only mode, we want this irq to wake us up, if it
+                * can.
+                */
+               enable_irq_wake(state->uart_irq);
+       }
+
+       if (state->clk)
+               clk_disable(state->clk);
+
+       if (state->signal_irq >= 0) {
+               ret = request_irq(state->signal_irq, debug_signal_irq,
+                         IRQF_TRIGGER_RISING, "debug-signal", state);
+               if (ret)
+                       pr_err("serial_debugger: could not install signal_irq");
+       }
+
+       if (state->wakeup_irq >= 0) {
+               ret = request_irq(state->wakeup_irq, wakeup_irq_handler,
+                                 IRQF_TRIGGER_FALLING | IRQF_DISABLED,
+                                 "debug-wakeup", state);
+               if (ret) {
+                       pr_err("serial_debugger: "
+                               "could not install wakeup irq\n");
+                       state->wakeup_irq = -1;
+               } else {
+                       ret = enable_irq_wake(state->wakeup_irq);
+                       if (ret) {
+                               pr_err("serial_debugger: "
+                                       "could not enable wakeup\n");
+                               state->wakeup_irq_no_set_wake = true;
+                       }
+               }
+       }
+       if (state->no_sleep)
+               handle_wakeup(state);
+
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+       state->console = fiq_debugger_console;
+       register_console(&state->console);
+       fiq_debugger_tty_init(state);
+#endif
+       return 0;
+
+err_register_irq:
+err_register_fiq:
+       if (pdata->uart_free)
+               pdata->uart_free(pdev);
+err_uart_init:
+       if (state->clk)
+               clk_disable(state->clk);
+       if (state->clk)
+               clk_put(state->clk);
+       wake_lock_destroy(&state->debugger_wake_lock);
+       platform_set_drvdata(pdev, NULL);
+       kfree(state);
+       return ret;
+}
+
+static const struct dev_pm_ops fiq_debugger_dev_pm_ops = {
+       .suspend        = fiq_debugger_dev_suspend,
+       .resume         = fiq_debugger_dev_resume,
+};
+
+static struct platform_driver fiq_debugger_driver = {
+       .probe  = fiq_debugger_probe,
+       .driver = {
+               .name   = "fiq_debugger",
+               .pm     = &fiq_debugger_dev_pm_ops,
+       },
+};
+
+static int __init fiq_debugger_init(void)
+{
+       return platform_driver_register(&fiq_debugger_driver);
+}
+
+postcore_initcall(fiq_debugger_init);
diff --git a/arch/arm/common/fiq_debugger_ringbuf.h b/arch/arm/common/fiq_debugger_ringbuf.h
new file mode 100644 (file)
index 0000000..2649b55
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+ * arch/arm/common/fiq_debugger_ringbuf.c
+ *
+ * simple lockless ringbuffer
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+struct fiq_debugger_ringbuf {
+       int len;
+       int head;
+       int tail;
+       u8 buf[];
+};
+
+
+static inline struct fiq_debugger_ringbuf *fiq_debugger_ringbuf_alloc(int len)
+{
+       struct fiq_debugger_ringbuf *rbuf;
+
+       rbuf = kzalloc(sizeof(*rbuf) + len, GFP_KERNEL);
+       if (rbuf == NULL)
+               return NULL;
+
+       rbuf->len = len;
+       rbuf->head = 0;
+       rbuf->tail = 0;
+       smp_mb();
+
+       return rbuf;
+}
+
+static inline void fiq_debugger_ringbuf_free(struct fiq_debugger_ringbuf *rbuf)
+{
+       kfree(rbuf);
+}
+
+static inline int fiq_debugger_ringbuf_level(struct fiq_debugger_ringbuf *rbuf)
+{
+       int level = rbuf->head - rbuf->tail;
+
+       if (level < 0)
+               level = rbuf->len + level;
+
+       return level;
+}
+
+static inline int fiq_debugger_ringbuf_room(struct fiq_debugger_ringbuf *rbuf)
+{
+       return rbuf->len - fiq_debugger_ringbuf_level(rbuf) - 1;
+}
+
+static inline u8
+fiq_debugger_ringbuf_peek(struct fiq_debugger_ringbuf *rbuf, int i)
+{
+       return rbuf->buf[(rbuf->tail + i) % rbuf->len];
+}
+
+static inline int
+fiq_debugger_ringbuf_consume(struct fiq_debugger_ringbuf *rbuf, int count)
+{
+       count = min(count, fiq_debugger_ringbuf_level(rbuf));
+
+       rbuf->tail = (rbuf->tail + count) % rbuf->len;
+       smp_mb();
+
+       return count;
+}
+
+static inline int
+fiq_debugger_ringbuf_push(struct fiq_debugger_ringbuf *rbuf, u8 datum)
+{
+       if (fiq_debugger_ringbuf_room(rbuf) == 0)
+               return 0;
+
+       rbuf->buf[rbuf->head] = datum;
+       smp_mb();
+       rbuf->head = (rbuf->head + 1) % rbuf->len;
+       smp_mb();
+
+       return 1;
+}
diff --git a/arch/arm/common/fiq_glue.S b/arch/arm/common/fiq_glue.S
new file mode 100644 (file)
index 0000000..9e3455a
--- /dev/null
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+               .text
+
+               .global fiq_glue_end
+
+               /* fiq stack: r0-r15,cpsr,spsr of interrupted mode */
+
+ENTRY(fiq_glue)
+               /* store pc, cpsr from previous mode */
+               mrs     r12, spsr
+               sub     r11, lr, #4
+               subs    r10, #1
+               bne     nested_fiq
+
+               stmfd   sp!, {r11-r12, lr}
+
+               /* store r8-r14 from previous mode */
+               sub     sp, sp, #(7 * 4)
+               stmia   sp, {r8-r14}^
+               nop
+
+               /* store r0-r7 from previous mode */
+               stmfd   sp!, {r0-r7}
+
+               /* setup func(data,regs) arguments */
+               mov     r0, r9
+               mov     r1, sp
+               mov     r3, r8
+
+               mov     r7, sp
+
+               /* Get sp and lr from non-user modes */
+               and     r4, r12, #MODE_MASK
+               cmp     r4, #USR_MODE
+               beq     fiq_from_usr_mode
+
+               mov     r7, sp
+               orr     r4, r4, #(PSR_I_BIT | PSR_F_BIT)
+               msr     cpsr_c, r4
+               str     sp, [r7, #(4 * 13)]
+               str     lr, [r7, #(4 * 14)]
+               mrs     r5, spsr
+               str     r5, [r7, #(4 * 17)]
+
+               cmp     r4, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT)
+               /* use fiq stack if we reenter this mode */
+               subne   sp, r7, #(4 * 3)
+
+fiq_from_usr_mode:
+               msr     cpsr_c, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT)
+               mov     r2, sp
+               sub     sp, r7, #12
+               stmfd   sp!, {r2, ip, lr}
+               /* call func(data,regs) */
+               blx     r3
+               ldmfd   sp, {r2, ip, lr}
+               mov     sp, r2
+
+               /* restore/discard saved state */
+               cmp     r4, #USR_MODE
+               beq     fiq_from_usr_mode_exit
+
+               msr     cpsr_c, r4
+               ldr     sp, [r7, #(4 * 13)]
+               ldr     lr, [r7, #(4 * 14)]
+               msr     spsr_cxsf, r5
+
+fiq_from_usr_mode_exit:
+               msr     cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
+
+               ldmfd   sp!, {r0-r7}
+               add     sp, sp, #(7 * 4)
+               ldmfd   sp!, {r11-r12, lr}
+exit_fiq:
+               msr     spsr_cxsf, r12
+               add     r10, #1
+               movs    pc, r11
+
+nested_fiq:
+               orr     r12, r12, #(PSR_F_BIT)
+               b       exit_fiq
+
+fiq_glue_end:
+
+ENTRY(fiq_glue_setup) /* func, data, sp */
+               mrs             r3, cpsr
+               msr             cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
+               movs            r8, r0
+               mov             r9, r1
+               mov             sp, r2
+               moveq           r10, #0
+               movne           r10, #1
+               msr             cpsr_c, r3
+               bx              lr
+
diff --git a/arch/arm/common/fiq_glue_setup.c b/arch/arm/common/fiq_glue_setup.c
new file mode 100644 (file)
index 0000000..4044c7d
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <asm/fiq.h>
+#include <asm/fiq_glue.h>
+
+extern unsigned char fiq_glue, fiq_glue_end;
+extern void fiq_glue_setup(void *func, void *data, void *sp);
+
+static struct fiq_handler fiq_debbuger_fiq_handler = {
+       .name = "fiq_glue",
+};
+DEFINE_PER_CPU(void *, fiq_stack);
+static struct fiq_glue_handler *current_handler;
+static DEFINE_MUTEX(fiq_glue_lock);
+
+static void fiq_glue_setup_helper(void *info)
+{
+       struct fiq_glue_handler *handler = info;
+       fiq_glue_setup(handler->fiq, handler,
+               __get_cpu_var(fiq_stack) + THREAD_START_SP);
+}
+
+int fiq_glue_register_handler(struct fiq_glue_handler *handler)
+{
+       int ret;
+       int cpu;
+
+       if (!handler || !handler->fiq)
+               return -EINVAL;
+
+       mutex_lock(&fiq_glue_lock);
+       if (fiq_stack) {
+               ret = -EBUSY;
+               goto err_busy;
+       }
+
+       for_each_possible_cpu(cpu) {
+               void *stack;
+               stack = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
+               if (WARN_ON(!stack)) {
+                       ret = -ENOMEM;
+                       goto err_alloc_fiq_stack;
+               }
+               per_cpu(fiq_stack, cpu) = stack;
+       }
+
+       ret = claim_fiq(&fiq_debbuger_fiq_handler);
+       if (WARN_ON(ret))
+               goto err_claim_fiq;
+
+       current_handler = handler;
+       on_each_cpu(fiq_glue_setup_helper, handler, true);
+       set_fiq_handler(&fiq_glue, &fiq_glue_end - &fiq_glue);
+
+       mutex_unlock(&fiq_glue_lock);
+       return 0;
+
+err_claim_fiq:
+err_alloc_fiq_stack:
+       for_each_possible_cpu(cpu) {
+               __free_pages(per_cpu(fiq_stack, cpu), THREAD_SIZE_ORDER);
+               per_cpu(fiq_stack, cpu) = NULL;
+       }
+err_busy:
+       mutex_unlock(&fiq_glue_lock);
+       return ret;
+}
+
+/**
+ * fiq_glue_resume - Restore fiqs after suspend or low power idle states
+ *
+ * This must be called before calling local_fiq_enable after returning from a
+ * power state where the fiq mode registers were lost. If a driver provided
+ * a resume hook when it registered the handler it will be called.
+ */
+
+void fiq_glue_resume(void)
+{
+       if (!current_handler)
+               return;
+       fiq_glue_setup(current_handler->fiq, current_handler,
+               __get_cpu_var(fiq_stack) + THREAD_START_SP);
+       if (current_handler->resume)
+               current_handler->resume(current_handler);
+}
+
index d5d8d5c..5684cbc 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/shmparam.h>
 #include <asm/cachetype.h>
 #include <asm/outercache.h>
+#include <asm/rodata.h>
 
 #define CACHE_COLOUR(vaddr)    ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
 
@@ -249,7 +250,7 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
  * Harvard caches are synchronised for the user space address range.
  * This is used for the ARM private sys_cacheflush system call.
  */
-#define flush_cache_user_range(vma,start,end) \
+#define flush_cache_user_range(start,end) \
        __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
 
 /*
diff --git a/arch/arm/include/asm/fiq_debugger.h b/arch/arm/include/asm/fiq_debugger.h
new file mode 100644 (file)
index 0000000..4d27488
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+ * arch/arm/include/asm/fiq_debugger.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_
+#define _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_
+
+#include <linux/serial_core.h>
+
+#define FIQ_DEBUGGER_NO_CHAR NO_POLL_CHAR
+#define FIQ_DEBUGGER_BREAK 0x00ff0100
+
+#define FIQ_DEBUGGER_FIQ_IRQ_NAME      "fiq"
+#define FIQ_DEBUGGER_SIGNAL_IRQ_NAME   "signal"
+#define FIQ_DEBUGGER_WAKEUP_IRQ_NAME   "wakeup"
+
+/**
+ * struct fiq_debugger_pdata - fiq debugger platform data
+ * @uart_resume:       used to restore uart state right before enabling
+ *                     the fiq.
+ * @uart_enable:       Do the work necessary to communicate with the uart
+ *                     hw (enable clocks, etc.). This must be ref-counted.
+ * @uart_disable:      Do the work necessary to disable the uart hw
+ *                     (disable clocks, etc.). This must be ref-counted.
+ * @uart_dev_suspend:  called during PM suspend, generally not needed
+ *                     for real fiq mode debugger.
+ * @uart_dev_resume:   called during PM resume, generally not needed
+ *                     for real fiq mode debugger.
+ */
+struct fiq_debugger_pdata {
+       int (*uart_init)(struct platform_device *pdev);
+       void (*uart_free)(struct platform_device *pdev);
+       int (*uart_resume)(struct platform_device *pdev);
+       int (*uart_getc)(struct platform_device *pdev);
+       void (*uart_putc)(struct platform_device *pdev, unsigned int c);
+       void (*uart_flush)(struct platform_device *pdev);
+       void (*uart_enable)(struct platform_device *pdev);
+       void (*uart_disable)(struct platform_device *pdev);
+
+       int (*uart_dev_suspend)(struct platform_device *pdev);
+       int (*uart_dev_resume)(struct platform_device *pdev);
+
+       void (*fiq_enable)(struct platform_device *pdev, unsigned int fiq,
+                                                               bool enable);
+       void (*fiq_ack)(struct platform_device *pdev, unsigned int fiq);
+
+       void (*force_irq)(struct platform_device *pdev, unsigned int irq);
+       void (*force_irq_ack)(struct platform_device *pdev, unsigned int irq);
+};
+
+#endif
diff --git a/arch/arm/include/asm/fiq_glue.h b/arch/arm/include/asm/fiq_glue.h
new file mode 100644 (file)
index 0000000..d54c29d
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_FIQ_GLUE_H
+#define __ASM_FIQ_GLUE_H
+
+struct fiq_glue_handler {
+       void (*fiq)(struct fiq_glue_handler *h, void *regs, void *svc_sp);
+       void (*resume)(struct fiq_glue_handler *h);
+};
+
+int fiq_glue_register_handler(struct fiq_glue_handler *handler);
+
+#ifdef CONFIG_FIQ_GLUE
+void fiq_glue_resume(void);
+#else
+static inline void fiq_glue_resume(void) {}
+#endif
+
+#endif
index 436e60b..2740c2a 100644 (file)
@@ -5,7 +5,7 @@
 #include <linux/threads.h>
 #include <asm/irq.h>
 
-#define NR_IPI 5
+#define NR_IPI 6
 
 typedef struct {
        unsigned int __softirq_pending;
index 7df239b..5e69437 100644 (file)
@@ -66,6 +66,7 @@
 #define   L2X0_STNDBY_MODE_EN          (1 << 0)
 
 /* Registers shifts and masks */
+#define L2X0_CACHE_ID_REV_MASK         (0x3f)
 #define L2X0_CACHE_ID_PART_MASK                (0xf << 6)
 #define L2X0_CACHE_ID_PART_L210                (1 << 6)
 #define L2X0_CACHE_ID_PART_L310                (3 << 6)
 
 #define L2X0_ADDR_FILTER_EN            1
 
+#define REV_PL310_R2P0                         4
+
 #ifndef __ASSEMBLY__
 extern void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask);
 #if defined(CONFIG_CACHE_L2X0) && defined(CONFIG_OF)
index 7ecd793..6643d6c 100644 (file)
 #define TRACER_ACCESSED_BIT    0
 #define TRACER_RUNNING_BIT     1
 #define TRACER_CYCLE_ACC_BIT   2
+#define TRACER_TRACE_DATA_BIT  3
 #define TRACER_ACCESSED                BIT(TRACER_ACCESSED_BIT)
 #define TRACER_RUNNING         BIT(TRACER_RUNNING_BIT)
 #define TRACER_CYCLE_ACC       BIT(TRACER_CYCLE_ACC_BIT)
+#define TRACER_TRACE_DATA      BIT(TRACER_TRACE_DATA_BIT)
 
 #define TRACER_TIMEOUT 10000
 
-#define etm_writel(t, v, x) \
-       (__raw_writel((v), (t)->etm_regs + (x)))
-#define etm_readl(t, x) (__raw_readl((t)->etm_regs + (x)))
+#define etm_writel(t, id, v, x) \
+       (__raw_writel((v), (t)->etm_regs[(id)] + (x)))
+#define etm_readl(t, id, x) (__raw_readl((t)->etm_regs[(id)] + (x)))
 
 /* CoreSight Management Registers */
 #define CSMR_LOCKACCESS 0xfb0
 #define ETMR_TRACEENCTRL       0x24
 #define ETMTE_INCLEXCL         BIT(24)
 #define ETMR_TRACEENEVT                0x20
+
+#define ETMR_VIEWDATAEVT       0x30
+#define ETMR_VIEWDATACTRL1     0x34
+#define ETMR_VIEWDATACTRL2     0x38
+#define ETMR_VIEWDATACTRL3     0x3c
+#define ETMVDC3_EXCLONLY       BIT(16)
+
 #define ETMCTRL_OPTS           (ETMCTRL_DO_CPRT | \
-                               ETMCTRL_DATA_DO_ADDR | \
                                ETMCTRL_BRANCH_OUTPUT | \
                                ETMCTRL_DO_CONTEXTID)
 
+#define ETMR_TRACEIDR          0x200
+
 /* ETM management registers, "ETM Architecture", 3.5.24 */
 #define ETMMR_OSLAR    0x300
 #define ETMMR_OSLSR    0x304
 #define ETBFF_TRIGIN           BIT(8)
 #define ETBFF_TRIGEVT          BIT(9)
 #define ETBFF_TRIGFL           BIT(10)
+#define ETBFF_STOPFL           BIT(12)
 
 #define etb_writel(t, v, x) \
        (__raw_writel((v), (t)->etb_regs + (x)))
 #define etb_readl(t, x) (__raw_readl((t)->etb_regs + (x)))
 
-#define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0)
-#define etm_unlock(t) \
-       do { etm_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0)
+#define etm_lock(t, id) \
+       do { etm_writel((t), (id), 0, CSMR_LOCKACCESS); } while (0)
+#define etm_unlock(t, id) \
+       do { etm_writel((t), (id), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0)
 
 #define etb_lock(t) do { etb_writel((t), 0, CSMR_LOCKACCESS); } while (0)
 #define etb_unlock(t) \
index 5a526af..a565633 100644 (file)
@@ -26,6 +26,9 @@ extern void asm_do_IRQ(unsigned int, struct pt_regs *);
 void handle_IRQ(unsigned int, struct pt_regs *);
 void init_IRQ(void);
 
+void arch_trigger_all_cpu_backtrace(void);
+#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
+
 #endif
 
 #endif
diff --git a/arch/arm/include/asm/mach/mmc.h b/arch/arm/include/asm/mach/mmc.h
new file mode 100644 (file)
index 0000000..bca864a
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ *  arch/arm/include/asm/mach/mmc.h
+ */
+#ifndef ASMARM_MACH_MMC_H
+#define ASMARM_MACH_MMC_H
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+
+struct embedded_sdio_data {
+        struct sdio_cis cis;
+        struct sdio_cccr cccr;
+        struct sdio_embedded_func *funcs;
+        int num_funcs;
+};
+
+struct mmc_platform_data {
+       unsigned int ocr_mask;                  /* available voltages */
+       int built_in;                           /* built-in device flag */
+       int card_present;                       /* card detect state */
+       u32 (*translate_vdd)(struct device *, unsigned int);
+       unsigned int (*status)(struct device *);
+       struct embedded_sdio_data *embedded_sdio;
+       int (*register_status_notify)(void (*callback)(int card_present, void *dev_id), void *dev_id);
+};
+
+#endif
diff --git a/arch/arm/include/asm/rodata.h b/arch/arm/include/asm/rodata.h
new file mode 100644 (file)
index 0000000..8c8add8
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ *  arch/arm/include/asm/rodata.h
+ *
+ *  Copyright (C) 2011 Google, Inc.
+ *
+ *  Author: Colin Cross <ccross@android.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASMARM_RODATA_H
+#define _ASMARM_RODATA_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_DEBUG_RODATA
+
+int set_memory_rw(unsigned long virt, int numpages);
+int set_memory_ro(unsigned long virt, int numpages);
+
+void mark_rodata_ro(void);
+void set_kernel_text_rw(void);
+void set_kernel_text_ro(void);
+#else
+static inline void set_kernel_text_rw(void) { }
+static inline void set_kernel_text_ro(void) { }
+#endif
+
+#endif
+
+#endif
index ae29293..7f74b59 100644 (file)
@@ -93,4 +93,6 @@ extern void platform_cpu_enable(unsigned int cpu);
 extern void arch_send_call_function_single_ipi(int cpu);
 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
 
+extern void smp_send_all_cpu_backtrace(void);
+
 #endif /* ifndef __ASM_ARM_SMP_H */
index 36d20bd..66bf592 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/io.h>
+#include <linux/slab.h>
 #include <linux/sysrq.h>
 #include <linux/device.h>
 #include <linux/clk.h>
@@ -37,26 +38,36 @@ MODULE_AUTHOR("Alexander Shishkin");
 struct tracectx {
        unsigned int    etb_bufsz;
        void __iomem    *etb_regs;
-       void __iomem    *etm_regs;
+       void __iomem    **etm_regs;
+       int             etm_regs_count;
        unsigned long   flags;
        int             ncmppairs;
        int             etm_portsz;
+       u32             etb_fc;
+       unsigned long   range_start;
+       unsigned long   range_end;
+       unsigned long   data_range_start;
+       unsigned long   data_range_end;
+       bool            dump_initial_etb;
        struct device   *dev;
        struct clk      *emu_clk;
        struct mutex    mutex;
 };
 
-static struct tracectx tracer;
+static struct tracectx tracer = {
+       .range_start = (unsigned long)_stext,
+       .range_end = (unsigned long)_etext,
+};
 
 static inline bool trace_isrunning(struct tracectx *t)
 {
        return !!(t->flags & TRACER_RUNNING);
 }
 
-static int etm_setup_address_range(struct tracectx *t, int n,
+static int etm_setup_address_range(struct tracectx *t, int id, int n,
                unsigned long start, unsigned long end, int exclude, int data)
 {
-       u32 flags = ETMAAT_ARM | ETMAAT_IGNCONTEXTID | ETMAAT_NSONLY | \
+       u32 flags = ETMAAT_ARM | ETMAAT_IGNCONTEXTID | ETMAAT_IGNSECURITY |
                    ETMAAT_NOVALCMP;
 
        if (n < 1 || n > t->ncmppairs)
@@ -72,95 +83,155 @@ static int etm_setup_address_range(struct tracectx *t, int n,
                flags |= ETMAAT_IEXEC;
 
        /* first comparator for the range */
-       etm_writel(t, flags, ETMR_COMP_ACC_TYPE(n * 2));
-       etm_writel(t, start, ETMR_COMP_VAL(n * 2));
+       etm_writel(t, id, flags, ETMR_COMP_ACC_TYPE(n * 2));
+       etm_writel(t, id, start, ETMR_COMP_VAL(n * 2));
 
        /* second comparator is right next to it */
-       etm_writel(t, flags, ETMR_COMP_ACC_TYPE(n * 2 + 1));
-       etm_writel(t, end, ETMR_COMP_VAL(n * 2 + 1));
-
-       flags = exclude ? ETMTE_INCLEXCL : 0;
-       etm_writel(t, flags | (1 << n), ETMR_TRACEENCTRL);
+       etm_writel(t, id, flags, ETMR_COMP_ACC_TYPE(n * 2 + 1));
+       etm_writel(t, id, end, ETMR_COMP_VAL(n * 2 + 1));
+
+       if (data) {
+               flags = exclude ? ETMVDC3_EXCLONLY : 0;
+               if (exclude)
+                       n += 8;
+               etm_writel(t, id, flags | BIT(n), ETMR_VIEWDATACTRL3);
+       } else {
+               flags = exclude ? ETMTE_INCLEXCL : 0;
+               etm_writel(t, id, flags | (1 << n), ETMR_TRACEENCTRL);
+       }
 
        return 0;
 }
 
-static int trace_start(struct tracectx *t)
+static int trace_start_etm(struct tracectx *t, int id)
 {
        u32 v;
        unsigned long timeout = TRACER_TIMEOUT;
 
-       etb_unlock(t);
-
-       etb_writel(t, 0, ETBR_FORMATTERCTRL);
-       etb_writel(t, 1, ETBR_CTRL);
-
-       etb_lock(t);
-
-       /* configure etm */
        v = ETMCTRL_OPTS | ETMCTRL_PROGRAM | ETMCTRL_PORTSIZE(t->etm_portsz);
 
        if (t->flags & TRACER_CYCLE_ACC)
                v |= ETMCTRL_CYCLEACCURATE;
 
-       etm_unlock(t);
+       if (t->flags & TRACER_TRACE_DATA)
+               v |= ETMCTRL_DATA_DO_ADDR;
+
+       etm_unlock(t, id);
 
-       etm_writel(t, v, ETMR_CTRL);
+       etm_writel(t, id, v, ETMR_CTRL);
 
-       while (!(etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
+       while (!(etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
                ;
        if (!timeout) {
                dev_dbg(t->dev, "Waiting for progbit to assert timed out\n");
-               etm_lock(t);
+               etm_lock(t, id);
                return -EFAULT;
        }
 
-       etm_setup_address_range(t, 1, (unsigned long)_stext,
-                       (unsigned long)_etext, 0, 0);
-       etm_writel(t, 0, ETMR_TRACEENCTRL2);
-       etm_writel(t, 0, ETMR_TRACESSCTRL);
-       etm_writel(t, 0x6f, ETMR_TRACEENEVT);
+       if (t->range_start || t->range_end)
+               etm_setup_address_range(t, id, 1,
+                                       t->range_start, t->range_end, 0, 0);
+       else
+               etm_writel(t, id, ETMTE_INCLEXCL, ETMR_TRACEENCTRL);
+
+       etm_writel(t, id, 0, ETMR_TRACEENCTRL2);
+       etm_writel(t, id, 0, ETMR_TRACESSCTRL);
+       etm_writel(t, id, 0x6f, ETMR_TRACEENEVT);
+
+       etm_writel(t, id, 0, ETMR_VIEWDATACTRL1);
+       etm_writel(t, id, 0, ETMR_VIEWDATACTRL2);
+
+       if (t->data_range_start || t->data_range_end)
+               etm_setup_address_range(t, id, 2, t->data_range_start,
+                                       t->data_range_end, 0, 1);
+       else
+               etm_writel(t, id, ETMVDC3_EXCLONLY, ETMR_VIEWDATACTRL3);
+
+       etm_writel(t, id, 0x6f, ETMR_VIEWDATAEVT);
 
        v &= ~ETMCTRL_PROGRAM;
        v |= ETMCTRL_PORTSEL;
 
-       etm_writel(t, v, ETMR_CTRL);
+       etm_writel(t, id, v, ETMR_CTRL);
 
        timeout = TRACER_TIMEOUT;
-       while (etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM && --timeout)
+       while (etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM && --timeout)
                ;
        if (!timeout) {
                dev_dbg(t->dev, "Waiting for progbit to deassert timed out\n");
-               etm_lock(t);
+               etm_lock(t, id);
                return -EFAULT;
        }
 
-       etm_lock(t);
+       etm_lock(t, id);
+       return 0;
+}
+
+static int trace_start(struct tracectx *t)
+{
+       int ret;
+       int id;
+       u32 etb_fc = t->etb_fc;
+
+       etb_unlock(t);
+
+       t->dump_initial_etb = false;
+       etb_writel(t, 0, ETBR_WRITEADDR);
+       etb_writel(t, etb_fc, ETBR_FORMATTERCTRL);
+       etb_writel(t, 1, ETBR_CTRL);
+
+       etb_lock(t);
+
+       /* configure etm(s) */
+       for (id = 0; id < t->etm_regs_count; id++) {
+               ret = trace_start_etm(t, id);
+               if (ret)
+                       return ret;
+       }
 
        t->flags |= TRACER_RUNNING;
 
        return 0;
 }
 
-static int trace_stop(struct tracectx *t)
+static int trace_stop_etm(struct tracectx *t, int id)
 {
        unsigned long timeout = TRACER_TIMEOUT;
 
-       etm_unlock(t);
+       etm_unlock(t, id);
 
-       etm_writel(t, 0x440, ETMR_CTRL);
-       while (!(etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
+       etm_writel(t, id, 0x441, ETMR_CTRL);
+       while (!(etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
                ;
        if (!timeout) {
                dev_dbg(t->dev, "Waiting for progbit to assert timed out\n");
-               etm_lock(t);
+               etm_lock(t, id);
                return -EFAULT;
        }
 
-       etm_lock(t);
+       etm_lock(t, id);
+       return 0;
+}
+
+static int trace_stop(struct tracectx *t)
+{
+       int id;
+       int ret;
+       unsigned long timeout = TRACER_TIMEOUT;
+       u32 etb_fc = t->etb_fc;
+
+       for (id = 0; id < t->etm_regs_count; id++) {
+               ret = trace_stop_etm(t, id);
+               if (ret)
+                       return ret;
+       }
 
        etb_unlock(t);
-       etb_writel(t, ETBFF_MANUAL_FLUSH, ETBR_FORMATTERCTRL);
+       if (etb_fc) {
+               etb_fc |= ETBFF_STOPFL;
+               etb_writel(t, t->etb_fc, ETBR_FORMATTERCTRL);
+       }
+       etb_writel(t, etb_fc | ETBFF_MANUAL_FLUSH, ETBR_FORMATTERCTRL);
 
        timeout = TRACER_TIMEOUT;
        while (etb_readl(t, ETBR_FORMATTERCTRL) &
@@ -185,24 +256,15 @@ static int trace_stop(struct tracectx *t)
 static int etb_getdatalen(struct tracectx *t)
 {
        u32 v;
-       int rp, wp;
+       int wp;
 
        v = etb_readl(t, ETBR_STATUS);
 
        if (v & 1)
                return t->etb_bufsz;
 
-       rp = etb_readl(t, ETBR_READADDR);
        wp = etb_readl(t, ETBR_WRITEADDR);
-
-       if (rp > wp) {
-               etb_writel(t, 0, ETBR_READADDR);
-               etb_writel(t, 0, ETBR_WRITEADDR);
-
-               return 0;
-       }
-
-       return wp - rp;
+       return wp;
 }
 
 /* sysrq+v will always stop the running trace and leave it at that */
@@ -235,21 +297,18 @@ static void etm_dump(void)
                printk("%08x", cpu_to_be32(etb_readl(t, ETBR_READMEM)));
        printk(KERN_INFO "\n--- ETB buffer end ---\n");
 
-       /* deassert the overflow bit */
-       etb_writel(t, 1, ETBR_CTRL);
-       etb_writel(t, 0, ETBR_CTRL);
-
-       etb_writel(t, 0, ETBR_TRIGGERCOUNT);
-       etb_writel(t, 0, ETBR_READADDR);
-       etb_writel(t, 0, ETBR_WRITEADDR);
-
        etb_lock(t);
 }
 
 static void sysrq_etm_dump(int key)
 {
+       if (!mutex_trylock(&tracer.mutex)) {
+               printk(KERN_INFO "Tracing hardware busy\n");
+               return;
+       }
        dev_dbg(tracer.dev, "Dumping ETB buffer\n");
        etm_dump();
+       mutex_unlock(&tracer.mutex);
 }
 
 static struct sysrq_key_op sysrq_etm_op = {
@@ -276,6 +335,10 @@ static ssize_t etb_read(struct file *file, char __user *data,
        struct tracectx *t = file->private_data;
        u32 first = 0;
        u32 *buf;
+       int wpos;
+       int skip;
+       long wlength;
+       loff_t pos = *ppos;
 
        mutex_lock(&t->mutex);
 
@@ -287,31 +350,39 @@ static ssize_t etb_read(struct file *file, char __user *data,
        etb_unlock(t);
 
        total = etb_getdatalen(t);
+       if (total == 0 && t->dump_initial_etb)
+               total = t->etb_bufsz;
        if (total == t->etb_bufsz)
                first = etb_readl(t, ETBR_WRITEADDR);
 
+       if (pos > total * 4) {
+               skip = 0;
+               wpos = total;
+       } else {
+               skip = (int)pos % 4;
+               wpos = (int)pos / 4;
+       }
+       total -= wpos;
+       first = (first + wpos) % t->etb_bufsz;
+
        etb_writel(t, first, ETBR_READADDR);
 
-       length = min(total * 4, (int)len);
-       buf = vmalloc(length);
+       wlength = min(total, DIV_ROUND_UP(skip + (int)len, 4));
+       length = min(total * 4 - skip, (int)len);
+       buf = vmalloc(wlength * 4);
 
-       dev_dbg(t->dev, "ETB buffer length: %d\n", total);
+       dev_dbg(t->dev, "ETB read %ld bytes to %lld from %ld words at %d\n",
+               length, pos, wlength, first);
+       dev_dbg(t->dev, "ETB buffer length: %d\n", total + wpos);
        dev_dbg(t->dev, "ETB status reg: %x\n", etb_readl(t, ETBR_STATUS));
-       for (i = 0; i < length / 4; i++)
+       for (i = 0; i < wlength; i++)
                buf[i] = etb_readl(t, ETBR_READMEM);
 
-       /* the only way to deassert overflow bit in ETB status is this */
-       etb_writel(t, 1, ETBR_CTRL);
-       etb_writel(t, 0, ETBR_CTRL);
-
-       etb_writel(t, 0, ETBR_WRITEADDR);
-       etb_writel(t, 0, ETBR_READADDR);
-       etb_writel(t, 0, ETBR_TRIGGERCOUNT);
-
        etb_lock(t);
 
-       length -= copy_to_user(data, buf, length);
+       length -= copy_to_user(data, (u8 *)buf + skip, length);
        vfree(buf);
+       *ppos = pos + length;
 
 out:
        mutex_unlock(&t->mutex);
@@ -348,28 +419,17 @@ static int __devinit etb_probe(struct amba_device *dev, const struct amba_id *id
        if (ret)
                goto out;
 
+       mutex_lock(&t->mutex);
        t->etb_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res));
        if (!t->etb_regs) {
                ret = -ENOMEM;
                goto out_release;
        }
 
+       t->dev = &dev->dev;
+       t->dump_initial_etb = true;
        amba_set_drvdata(dev, t);
 
-       etb_miscdev.parent = &dev->dev;
-
-       ret = misc_register(&etb_miscdev);
-       if (ret)
-               goto out_unmap;
-
-       t->emu_clk = clk_get(&dev->dev, "emu_src_ck");
-       if (IS_ERR(t->emu_clk)) {
-               dev_dbg(&dev->dev, "Failed to obtain emu_src_ck.\n");
-               return -EFAULT;
-       }
-
-       clk_enable(t->emu_clk);
-
        etb_unlock(t);
        t->etb_bufsz = etb_readl(t, ETBR_DEPTH);
        dev_dbg(&dev->dev, "Size: %x\n", t->etb_bufsz);
@@ -378,6 +438,20 @@ static int __devinit etb_probe(struct amba_device *dev, const struct amba_id *id
        etb_writel(t, 0, ETBR_CTRL);
        etb_writel(t, 0x1000, ETBR_FORMATTERCTRL);
        etb_lock(t);
+       mutex_unlock(&t->mutex);
+
+       etb_miscdev.parent = &dev->dev;
+
+       ret = misc_register(&etb_miscdev);
+       if (ret)
+               goto out_unmap;
+
+       /* Get optional clock. Currently used to select clock source on omap3 */
+       t->emu_clk = clk_get(&dev->dev, "emu_src_ck");
+       if (IS_ERR(t->emu_clk))
+               dev_dbg(&dev->dev, "Failed to obtain emu_src_ck.\n");
+       else
+               clk_enable(t->emu_clk);
 
        dev_dbg(&dev->dev, "ETB AMBA driver initialized.\n");
 
@@ -385,10 +459,13 @@ out:
        return ret;
 
 out_unmap:
+       mutex_lock(&t->mutex);
        amba_set_drvdata(dev, NULL);
        iounmap(t->etb_regs);
+       t->etb_regs = NULL;
 
 out_release:
+       mutex_unlock(&t->mutex);
        amba_release_regions(dev);
 
        return ret;
@@ -403,8 +480,10 @@ static int etb_remove(struct amba_device *dev)
        iounmap(t->etb_regs);
        t->etb_regs = NULL;
 
-       clk_disable(t->emu_clk);
-       clk_put(t->emu_clk);
+       if (!IS_ERR(t->emu_clk)) {
+               clk_disable(t->emu_clk);
+               clk_put(t->emu_clk);
+       }
 
        amba_release_regions(dev);
 
@@ -448,7 +527,10 @@ static ssize_t trace_running_store(struct kobject *kobj,
                return -EINVAL;
 
        mutex_lock(&tracer.mutex);
-       ret = value ? trace_start(&tracer) : trace_stop(&tracer);
+       if (!tracer.etb_regs)
+               ret = -ENODEV;
+       else
+               ret = value ? trace_start(&tracer) : trace_stop(&tracer);
        mutex_unlock(&tracer.mutex);
 
        return ret ? : n;
@@ -463,36 +545,50 @@ static ssize_t trace_info_show(struct kobject *kobj,
 {
        u32 etb_wa, etb_ra, etb_st, etb_fc, etm_ctrl, etm_st;
        int datalen;
+       int id;
+       int ret;
 
-       etb_unlock(&tracer);
-       datalen = etb_getdatalen(&tracer);
-       etb_wa = etb_readl(&tracer, ETBR_WRITEADDR);
-       etb_ra = etb_readl(&tracer, ETBR_READADDR);
-       etb_st = etb_readl(&tracer, ETBR_STATUS);
-       etb_fc = etb_readl(&tracer, ETBR_FORMATTERCTRL);
-       etb_lock(&tracer);
-
-       etm_unlock(&tracer);
-       etm_ctrl = etm_readl(&tracer, ETMR_CTRL);
-       etm_st = etm_readl(&tracer, ETMR_STATUS);
-       etm_lock(&tracer);
+       mutex_lock(&tracer.mutex);
+       if (tracer.etb_regs) {
+               etb_unlock(&tracer);
+               datalen = etb_getdatalen(&tracer);
+               etb_wa = etb_readl(&tracer, ETBR_WRITEADDR);
+               etb_ra = etb_readl(&tracer, ETBR_READADDR);
+               etb_st = etb_readl(&tracer, ETBR_STATUS);
+               etb_fc = etb_readl(&tracer, ETBR_FORMATTERCTRL);
+               etb_lock(&tracer);
+       } else {
+               etb_wa = etb_ra = etb_st = etb_fc = ~0;
+               datalen = -1;
+       }
 
-       return sprintf(buf, "Trace buffer len: %d\nComparator pairs: %d\n"
+       ret = sprintf(buf, "Trace buffer len: %d\nComparator pairs: %d\n"
                        "ETBR_WRITEADDR:\t%08x\n"
                        "ETBR_READADDR:\t%08x\n"
                        "ETBR_STATUS:\t%08x\n"
-                       "ETBR_FORMATTERCTRL:\t%08x\n"
-                       "ETMR_CTRL:\t%08x\n"
-                       "ETMR_STATUS:\t%08x\n",
+                       "ETBR_FORMATTERCTRL:\t%08x\n",
                        datalen,
                        tracer.ncmppairs,
                        etb_wa,
                        etb_ra,
                        etb_st,
-                       etb_fc,
+                       etb_fc
+                       );
+
+       for (id = 0; id < tracer.etm_regs_count; id++) {
+               etm_unlock(&tracer, id);
+               etm_ctrl = etm_readl(&tracer, id, ETMR_CTRL);
+               etm_st = etm_readl(&tracer, id, ETMR_STATUS);
+               etm_lock(&tracer, id);
+               ret += sprintf(buf + ret, "ETMR_CTRL:\t%08x\n"
+                       "ETMR_STATUS:\t%08x\n",
                        etm_ctrl,
                        etm_st
                        );
+       }
+       mutex_unlock(&tracer.mutex);
+
+       return ret;
 }
 
 static struct kobj_attribute trace_info_attr =
@@ -531,42 +627,121 @@ static ssize_t trace_mode_store(struct kobject *kobj,
 static struct kobj_attribute trace_mode_attr =
        __ATTR(trace_mode, 0644, trace_mode_show, trace_mode_store);
 
+static ssize_t trace_range_show(struct kobject *kobj,
+                                 struct kobj_attribute *attr,
+                                 char *buf)
+{
+       return sprintf(buf, "%08lx %08lx\n",
+                       tracer.range_start, tracer.range_end);
+}
+
+static ssize_t trace_range_store(struct kobject *kobj,
+                                  struct kobj_attribute *attr,
+                                  const char *buf, size_t n)
+{
+       unsigned long range_start, range_end;
+
+       if (sscanf(buf, "%lx %lx", &range_start, &range_end) != 2)
+               return -EINVAL;
+
+       mutex_lock(&tracer.mutex);
+       tracer.range_start = range_start;
+       tracer.range_end = range_end;
+       mutex_unlock(&tracer.mutex);
+
+       return n;
+}
+
+
+static struct kobj_attribute trace_range_attr =
+       __ATTR(trace_range, 0644, trace_range_show, trace_range_store);
+
+static ssize_t trace_data_range_show(struct kobject *kobj,
+                                 struct kobj_attribute *attr,
+                                 char *buf)
+{
+       unsigned long range_start;
+       u64 range_end;
+       mutex_lock(&tracer.mutex);
+       range_start = tracer.data_range_start;
+       range_end = tracer.data_range_end;
+       if (!range_end && (tracer.flags & TRACER_TRACE_DATA))
+               range_end = 0x100000000ULL;
+       mutex_unlock(&tracer.mutex);
+       return sprintf(buf, "%08lx %08llx\n", range_start, range_end);
+}
+
+static ssize_t trace_data_range_store(struct kobject *kobj,
+                                  struct kobj_attribute *attr,
+                                  const char *buf, size_t n)
+{
+       unsigned long range_start;
+       u64 range_end;
+
+       if (sscanf(buf, "%lx %llx", &range_start, &range_end) != 2)
+               return -EINVAL;
+
+       mutex_lock(&tracer.mutex);
+       tracer.data_range_start = range_start;
+       tracer.data_range_end = (unsigned long)range_end;
+       if (range_end)
+               tracer.flags |= TRACER_TRACE_DATA;
+       else
+               tracer.flags &= ~TRACER_TRACE_DATA;
+       mutex_unlock(&tracer.mutex);
+
+       return n;
+}
+
+
+static struct kobj_attribute trace_data_range_attr =
+       __ATTR(trace_data_range, 0644,
+               trace_data_range_show, trace_data_range_store);
+
 static int __devinit etm_probe(struct amba_device *dev, const struct amba_id *id)
 {
        struct tracectx *t = &tracer;
        int ret = 0;
+       void __iomem **new_regs;
+       int new_count;
 
-       if (t->etm_regs) {
-               dev_dbg(&dev->dev, "ETM already initialized\n");
-               ret = -EBUSY;
+       mutex_lock(&t->mutex);
+       new_count = t->etm_regs_count + 1;
+       new_regs = krealloc(t->etm_regs,
+                               sizeof(t->etm_regs[0]) * new_count, GFP_KERNEL);
+
+       if (!new_regs) {
+               dev_dbg(&dev->dev, "Failed to allocate ETM register array\n");
+               ret = -ENOMEM;
                goto out;
        }
+       t->etm_regs = new_regs;
 
        ret = amba_request_regions(dev, NULL);
        if (ret)
                goto out;
 
-       t->etm_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res));
-       if (!t->etm_regs) {
+       t->etm_regs[t->etm_regs_count] =
+               ioremap_nocache(dev->res.start, resource_size(&dev->res));
+       if (!t->etm_regs[t->etm_regs_count]) {
                ret = -ENOMEM;
                goto out_release;
        }
 
-       amba_set_drvdata(dev, t);
+       amba_set_drvdata(dev, t->etm_regs[t->etm_regs_count]);
 
-       mutex_init(&t->mutex);
-       t->dev = &dev->dev;
-       t->flags = TRACER_CYCLE_ACC;
+       t->flags = TRACER_CYCLE_ACC | TRACER_TRACE_DATA;
        t->etm_portsz = 1;
 
-       etm_unlock(t);
-       (void)etm_readl(t, ETMMR_PDSR);
+       etm_unlock(t, t->etm_regs_count);
+       (void)etm_readl(t, t->etm_regs_count, ETMMR_PDSR);
        /* dummy first read */
-       (void)etm_readl(&tracer, ETMMR_OSSRR);
+       (void)etm_readl(&tracer, t->etm_regs_count, ETMMR_OSSRR);
 
-       t->ncmppairs = etm_readl(t, ETMR_CONFCODE) & 0xf;
-       etm_writel(t, 0x440, ETMR_CTRL);
-       etm_lock(t);
+       t->ncmppairs = etm_readl(t, t->etm_regs_count, ETMR_CONFCODE) & 0xf;
+       etm_writel(t, t->etm_regs_count, 0x441, ETMR_CTRL);
+       etm_writel(t, t->etm_regs_count, new_count, ETMR_TRACEIDR);
+       etm_lock(t, t->etm_regs_count);
 
        ret = sysfs_create_file(&dev->dev.kobj,
                        &trace_running_attr.attr);
@@ -582,36 +757,68 @@ static int __devinit etm_probe(struct amba_device *dev, const struct amba_id *id
        if (ret)
                dev_dbg(&dev->dev, "Failed to create trace_mode in sysfs\n");
 
-       dev_dbg(t->dev, "ETM AMBA driver initialized.\n");
+       ret = sysfs_create_file(&dev->dev.kobj, &trace_range_attr.attr);
+       if (ret)
+               dev_dbg(&dev->dev, "Failed to create trace_range in sysfs\n");
+
+       ret = sysfs_create_file(&dev->dev.kobj, &trace_data_range_attr.attr);
+       if (ret)
+               dev_dbg(&dev->dev,
+                       "Failed to create trace_data_range in sysfs\n");
+
+       dev_dbg(&dev->dev, "ETM AMBA driver initialized.\n");
+
+       /* Enable formatter if there are multiple trace sources */
+       if (new_count > 1)
+               t->etb_fc = ETBFF_ENFCONT | ETBFF_ENFTC;
+
+       t->etm_regs_count = new_count;
 
 out:
+       mutex_unlock(&t->mutex);
        return ret;
 
 out_unmap:
        amba_set_drvdata(dev, NULL);
-       iounmap(t->etm_regs);
+       iounmap(t->etm_regs[t->etm_regs_count]);
 
 out_release:
        amba_release_regions(dev);
 
+       mutex_unlock(&t->mutex);
        return ret;
 }
 
 static int etm_remove(struct amba_device *dev)
 {
-       struct tracectx *t = amba_get_drvdata(dev);
+       int i;
+       struct tracectx *t = &tracer;
+       void __iomem    *etm_regs = amba_get_drvdata(dev);
+
+       sysfs_remove_file(&dev->dev.kobj, &trace_running_attr.attr);
+       sysfs_remove_file(&dev->dev.kobj, &trace_info_attr.attr);
+       sysfs_remove_file(&dev->dev.kobj, &trace_mode_attr.attr);
+       sysfs_remove_file(&dev->dev.kobj, &trace_range_attr.attr);
+       sysfs_remove_file(&dev->dev.kobj, &trace_data_range_attr.attr);
 
        amba_set_drvdata(dev, NULL);
 
-       iounmap(t->etm_regs);
-       t->etm_regs = NULL;
+       mutex_lock(&t->mutex);
+       for (i = 0; i < t->etm_regs_count; i++)
+               if (t->etm_regs[i] == etm_regs)
+                       break;
+       for (; i < t->etm_regs_count - 1; i++)
+               t->etm_regs[i] = t->etm_regs[i + 1];
+       t->etm_regs_count--;
+       if (!t->etm_regs_count) {
+               kfree(t->etm_regs);
+               t->etm_regs = NULL;
+       }
+       mutex_unlock(&t->mutex);
 
+       iounmap(etm_regs);
        amba_release_regions(dev);
 
-       sysfs_remove_file(&dev->dev.kobj, &trace_running_attr.attr);
-       sysfs_remove_file(&dev->dev.kobj, &trace_info_attr.attr);
-       sysfs_remove_file(&dev->dev.kobj, &trace_mode_attr.attr);
-
        return 0;
 }
 
@@ -620,6 +827,10 @@ static struct amba_id etm_ids[] = {
                .id     = 0x0003b921,
                .mask   = 0x0007ffff,
        },
+       {
+               .id     = 0x0003b950,
+               .mask   = 0x0007ffff,
+       },
        { 0, 0 },
 };
 
@@ -637,6 +848,8 @@ static int __init etm_init(void)
 {
        int retval;
 
+       mutex_init(&tracer.mutex);
+
        retval = amba_driver_register(&etb_driver);
        if (retval) {
                printk(KERN_ERR "Failed to register etb\n");
index c0062ad..5722abf 100644 (file)
@@ -13,6 +13,7 @@
  */
 
 #include <linux/ftrace.h>
+#include <linux/module.h>
 #include <linux/uaccess.h>
 
 #include <asm/cacheflush.h>
@@ -115,6 +116,20 @@ static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr,
 }
 #endif
 
+int ftrace_arch_code_modify_prepare(void)
+{
+       set_kernel_text_rw();
+       set_all_modules_text_rw();
+       return 0;
+}
+
+int ftrace_arch_code_modify_post_process(void)
+{
+       set_all_modules_text_ro();
+       set_kernel_text_ro();
+       return 0;
+}
+
 static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
 {
        return ftrace_gen_branch(pc, addr, true);
index 1911dae..2050399 100644 (file)
@@ -10,6 +10,8 @@
 #include <linux/export.h>
 #include <linux/init.h>
 #include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
 #include <linux/syscore_ops.h>
 #include <linux/string.h>
 
@@ -103,6 +105,25 @@ static struct syscore_ops leds_syscore_ops = {
        .resume         = leds_resume,
 };
 
+static int leds_idle_notifier(struct notifier_block *nb, unsigned long val,
+                                void *data)
+{
+       switch (val) {
+       case IDLE_START:
+               leds_event(led_idle_start);
+               break;
+       case IDLE_END:
+               leds_event(led_idle_end);
+               break;
+       }
+
+       return 0;
+}
+
+static struct notifier_block leds_idle_nb = {
+       .notifier_call = leds_idle_notifier,
+};
+
 static int __init leds_init(void)
 {
        int ret;
@@ -111,8 +132,11 @@ static int __init leds_init(void)
                ret = device_register(&leds_device);
        if (ret == 0)
                ret = device_create_file(&leds_device, &dev_attr_event);
-       if (ret == 0)
+       if (ret == 0) {
                register_syscore_ops(&leds_syscore_ops);
+               idle_notifier_register(&leds_idle_nb);
+       }
+
        return ret;
 }
 
index 971d65c..c26f414 100644 (file)
@@ -31,9 +31,9 @@
 #include <linux/random.h>
 #include <linux/hw_breakpoint.h>
 #include <linux/cpuidle.h>
+#include <linux/console.h>
 
 #include <asm/cacheflush.h>
-#include <asm/leds.h>
 #include <asm/processor.h>
 #include <asm/system.h>
 #include <asm/thread_notify.h>
@@ -63,6 +63,18 @@ static volatile int hlt_counter;
 
 #include <mach/system.h>
 
+#ifdef CONFIG_SMP
+void arch_trigger_all_cpu_backtrace(void)
+{
+       smp_send_all_cpu_backtrace();
+}
+#else
+void arch_trigger_all_cpu_backtrace(void)
+{
+       dump_stack();
+}
+#endif
+
 void disable_hlt(void)
 {
        hlt_counter++;
@@ -95,6 +107,31 @@ __setup("hlt", hlt_setup);
 extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
 typedef void (*phys_reset_t)(unsigned long);
 
+#ifdef CONFIG_ARM_FLUSH_CONSOLE_ON_RESTART
+void arm_machine_flush_console(void)
+{
+       printk("\n");
+       pr_emerg("Restarting %s\n", linux_banner);
+       if (console_trylock()) {
+               console_unlock();
+               return;
+       }
+
+       mdelay(50);
+
+       local_irq_disable();
+       if (!console_trylock())
+               pr_emerg("arm_restart: Console was locked! Busting\n");
+       else
+               pr_emerg("arm_restart: Console was locked!\n");
+       console_unlock();
+}
+#else
+void arm_machine_flush_console(void)
+{
+}
+#endif
+
 /*
  * A temporary stack to use for CPU reset. This is static so that we
  * don't clobber it with the identity mapping. When running with this
@@ -206,9 +243,9 @@ void cpu_idle(void)
 
        /* endless idle loop with no priority at all */
        while (1) {
+               idle_notifier_call_chain(IDLE_START);
                tick_nohz_idle_enter();
                rcu_idle_enter();
-               leds_event(led_idle_start);
                while (!need_resched()) {
 #ifdef CONFIG_HOTPLUG_CPU
                        if (cpu_is_offline(smp_processor_id()))
@@ -236,9 +273,9 @@ void cpu_idle(void)
                                local_irq_enable();
                        }
                }
-               leds_event(led_idle_end);
                rcu_idle_exit();
                tick_nohz_idle_exit();
+               idle_notifier_call_chain(IDLE_END);
                preempt_enable_no_resched();
                schedule();
                preempt_disable();
@@ -279,6 +316,10 @@ void machine_restart(char *cmd)
 {
        machine_shutdown();
 
+       /* Flush the console to make sure all the relevant messages make it
+        * out to the console drivers */
+       arm_machine_flush_console();
+
        arm_pm_restart(reboot_mode, cmd);
 
        /* Give a grace period for failure to restart of 1s */
@@ -289,6 +330,77 @@ void machine_restart(char *cmd)
        while (1);
 }
 
+/*
+ * dump a block of kernel memory from around the given address
+ */
+static void show_data(unsigned long addr, int nbytes, const char *name)
+{
+       int     i, j;
+       int     nlines;
+       u32     *p;
+
+       /*
+        * don't attempt to dump non-kernel addresses or
+        * values that are probably just small negative numbers
+        */
+       if (addr < PAGE_OFFSET || addr > -256UL)
+               return;
+
+       printk("\n%s: %#lx:\n", name, addr);
+
+       /*
+        * round address down to a 32 bit boundary
+        * and always dump a multiple of 32 bytes
+        */
+       p = (u32 *)(addr & ~(sizeof(u32) - 1));
+       nbytes += (addr & (sizeof(u32) - 1));
+       nlines = (nbytes + 31) / 32;
+
+
+       for (i = 0; i < nlines; i++) {
+               /*
+                * just display low 16 bits of address to keep
+                * each line of the dump < 80 characters
+                */
+               printk("%04lx ", (unsigned long)p & 0xffff);
+               for (j = 0; j < 8; j++) {
+                       u32     data;
+                       if (probe_kernel_address(p, data)) {
+                               printk(" ********");
+                       } else {
+                               printk(" %08x", data);
+                       }
+                       ++p;
+               }
+               printk("\n");
+       }
+}
+
+static void show_extra_register_data(struct pt_regs *regs, int nbytes)
+{
+       mm_segment_t fs;
+
+       fs = get_fs();
+       set_fs(KERNEL_DS);
+       show_data(regs->ARM_pc - nbytes, nbytes * 2, "PC");
+       show_data(regs->ARM_lr - nbytes, nbytes * 2, "LR");
+       show_data(regs->ARM_sp - nbytes, nbytes * 2, "SP");
+       show_data(regs->ARM_ip - nbytes, nbytes * 2, "IP");
+       show_data(regs->ARM_fp - nbytes, nbytes * 2, "FP");
+       show_data(regs->ARM_r0 - nbytes, nbytes * 2, "R0");
+       show_data(regs->ARM_r1 - nbytes, nbytes * 2, "R1");
+       show_data(regs->ARM_r2 - nbytes, nbytes * 2, "R2");
+       show_data(regs->ARM_r3 - nbytes, nbytes * 2, "R3");
+       show_data(regs->ARM_r4 - nbytes, nbytes * 2, "R4");
+       show_data(regs->ARM_r5 - nbytes, nbytes * 2, "R5");
+       show_data(regs->ARM_r6 - nbytes, nbytes * 2, "R6");
+       show_data(regs->ARM_r7 - nbytes, nbytes * 2, "R7");
+       show_data(regs->ARM_r8 - nbytes, nbytes * 2, "R8");
+       show_data(regs->ARM_r9 - nbytes, nbytes * 2, "R9");
+       show_data(regs->ARM_r10 - nbytes, nbytes * 2, "R10");
+       set_fs(fs);
+}
+
 void __show_regs(struct pt_regs *regs)
 {
        unsigned long flags;
@@ -348,6 +460,8 @@ void __show_regs(struct pt_regs *regs)
                printk("Control: %08x%s\n", ctrl, buf);
        }
 #endif
+
+       show_extra_register_data(regs, 128);
 }
 
 void show_regs(struct pt_regs * regs)
index cdeb727..bfb93dc 100644 (file)
@@ -56,6 +56,7 @@ enum ipi_msg_type {
        IPI_CALL_FUNC,
        IPI_CALL_FUNC_SINGLE,
        IPI_CPU_STOP,
+       IPI_CPU_BACKTRACE,
 };
 
 int __cpuinit __cpu_up(unsigned int cpu)
@@ -393,6 +394,7 @@ static const char *ipi_types[NR_IPI] = {
        S(IPI_CALL_FUNC, "Function call interrupts"),
        S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
        S(IPI_CPU_STOP, "CPU stop interrupts"),
+       S(IPI_CPU_BACKTRACE, "CPU backtrace"),
 };
 
 void show_ipi_list(struct seq_file *p, int prec)
@@ -514,6 +516,58 @@ static void ipi_cpu_stop(unsigned int cpu)
                cpu_relax();
 }
 
+static cpumask_t backtrace_mask;
+static DEFINE_RAW_SPINLOCK(backtrace_lock);
+
+/* "in progress" flag of arch_trigger_all_cpu_backtrace */
+static unsigned long backtrace_flag;
+
+void smp_send_all_cpu_backtrace(void)
+{
+       unsigned int this_cpu = smp_processor_id();
+       int i;
+
+       if (test_and_set_bit(0, &backtrace_flag))
+               /*
+                * If there is already a trigger_all_cpu_backtrace() in progress
+                * (backtrace_flag == 1), don't output double cpu dump infos.
+                */
+               return;
+
+       cpumask_copy(&backtrace_mask, cpu_online_mask);
+       cpu_clear(this_cpu, backtrace_mask);
+
+       pr_info("Backtrace for cpu %d (current):\n", this_cpu);
+       dump_stack();
+
+       pr_info("\nsending IPI to all other CPUs:\n");
+       smp_cross_call(&backtrace_mask, IPI_CPU_BACKTRACE);
+
+       /* Wait for up to 10 seconds for all other CPUs to do the backtrace */
+       for (i = 0; i < 10 * 1000; i++) {
+               if (cpumask_empty(&backtrace_mask))
+                       break;
+               mdelay(1);
+       }
+
+       clear_bit(0, &backtrace_flag);
+       smp_mb__after_clear_bit();
+}
+
+/*
+ * ipi_cpu_backtrace - handle IPI from smp_send_all_cpu_backtrace()
+ */
+static void ipi_cpu_backtrace(unsigned int cpu, struct pt_regs *regs)
+{
+       if (cpu_isset(cpu, backtrace_mask)) {
+               raw_spin_lock(&backtrace_lock);
+               pr_warning("IPI backtrace for cpu %d\n", cpu);
+               show_regs(regs);
+               raw_spin_unlock(&backtrace_lock);
+               cpu_clear(cpu, backtrace_mask);
+       }
+}
+
 /*
  * Main handler for inter-processor interrupts
  */
@@ -559,6 +613,10 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
                irq_exit();
                break;
 
+       case IPI_CPU_BACKTRACE:
+               ipi_cpu_backtrace(cpu, regs);
+               break;
+
        default:
                printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
                       cpu, ipinr);
index f84dfe6..504b28a 100644 (file)
@@ -491,7 +491,9 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
                if (end > vma->vm_end)
                        end = vma->vm_end;
 
-               flush_cache_user_range(vma, start, end);
+               up_read(&mm->mmap_sem);
+               flush_cache_user_range(start, end);
+               return;
        }
        up_read(&mm->mmap_sem);
 }
index bca7e61..37da2cc 100644 (file)
@@ -7,6 +7,7 @@ obj-y                           := dma-mapping.o extable.o fault.o init.o \
 
 obj-$(CONFIG_MMU)              += fault-armv.o flush.o idmap.o ioremap.o \
                                   mmap.o pgd.o mmu.o vmregion.o
+obj-$(CONFIG_DEBUG_RODATA)     += rodata.o
 
 ifneq ($(CONFIG_MMU),y)
 obj-y                          += nommu.o
index b1e192b..a6a84ff 100644 (file)
@@ -32,6 +32,16 @@ static void __iomem *l2x0_base;
 static DEFINE_RAW_SPINLOCK(l2x0_lock);
 static uint32_t l2x0_way_mask; /* Bitmask of active ways */
 static uint32_t l2x0_size;
+static u32 l2x0_cache_id;
+static unsigned int l2x0_sets;
+static unsigned int l2x0_ways;
+
+static inline bool is_pl310_rev(int rev)
+{
+       return (l2x0_cache_id &
+               (L2X0_CACHE_ID_PART_MASK | L2X0_CACHE_ID_REV_MASK)) ==
+                       (L2X0_CACHE_ID_PART_L310 | rev);
+}
 
 struct l2x0_regs l2x0_saved_regs;
 
@@ -131,6 +141,23 @@ static void l2x0_cache_sync(void)
        raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 }
 
+#ifdef CONFIG_PL310_ERRATA_727915
+static void l2x0_for_each_set_way(void __iomem *reg)
+{
+       int set;
+       int way;
+       unsigned long flags;
+
+       for (way = 0; way < l2x0_ways; way++) {
+               raw_spin_lock_irqsave(&l2x0_lock, flags);
+               for (set = 0; set < l2x0_sets; set++)
+                       writel_relaxed((way << 28) | (set << 5), reg);
+               cache_sync();
+               raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+       }
+}
+#endif
+
 static void __l2x0_flush_all(void)
 {
        debug_writel(0x03);
@@ -144,6 +171,13 @@ static void l2x0_flush_all(void)
 {
        unsigned long flags;
 
+#ifdef CONFIG_PL310_ERRATA_727915
+       if (is_pl310_rev(REV_PL310_R2P0)) {
+               l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_INV_LINE_IDX);
+               return;
+       }
+#endif
+
        /* clean all ways */
        raw_spin_lock_irqsave(&l2x0_lock, flags);
        __l2x0_flush_all();
@@ -154,11 +188,20 @@ static void l2x0_clean_all(void)
 {
        unsigned long flags;
 
+#ifdef CONFIG_PL310_ERRATA_727915
+       if (is_pl310_rev(REV_PL310_R2P0)) {
+               l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_LINE_IDX);
+               return;
+       }
+#endif
+
        /* clean all ways */
        raw_spin_lock_irqsave(&l2x0_lock, flags);
+       debug_writel(0x03);
        writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
        cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
        cache_sync();
+       debug_writel(0x00);
        raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 }
 
@@ -310,47 +353,46 @@ static void l2x0_unlock(__u32 cache_id)
 void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
 {
        __u32 aux;
-       __u32 cache_id;
        __u32 way_size = 0;
-       int ways;
        const char *type;
 
        l2x0_base = base;
 
-       cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
+       l2x0_cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
        aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
 
        aux &= aux_mask;
        aux |= aux_val;
 
        /* Determine the number of ways */
-       switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
+       switch (l2x0_cache_id & L2X0_CACHE_ID_PART_MASK) {
        case L2X0_CACHE_ID_PART_L310:
                if (aux & (1 << 16))
-                       ways = 16;
+                       l2x0_ways = 16;
                else
-                       ways = 8;
+                       l2x0_ways = 8;
                type = "L310";
                break;
        case L2X0_CACHE_ID_PART_L210:
-               ways = (aux >> 13) & 0xf;
+               l2x0_ways = (aux >> 13) & 0xf;
                type = "L210";
                break;
        default:
                /* Assume unknown chips have 8 ways */
-               ways = 8;
+               l2x0_ways = 8;
                type = "L2x0 series";
                break;
        }
 
-       l2x0_way_mask = (1 << ways) - 1;
+       l2x0_way_mask = (1 << l2x0_ways) - 1;
 
        /*
         * L2 cache Size =  Way size * Number of ways
         */
        way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
-       way_size = 1 << (way_size + 3);
-       l2x0_size = ways * way_size * SZ_1K;
+       way_size = SZ_1K << (way_size + 3);
+       l2x0_size = l2x0_ways * way_size;
+       l2x0_sets = way_size / CACHE_LINE_SIZE;
 
        /*
         * Check if l2x0 controller is already enabled.
@@ -359,7 +401,7 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
         */
        if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
                /* Make sure that I&D is not locked down when starting */
-               l2x0_unlock(cache_id);
+               l2x0_unlock(l2x0_cache_id);
 
                /* l2x0 controller is disabled */
                writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
@@ -383,7 +425,7 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
 
        printk(KERN_INFO "%s cache controller enabled\n", type);
        printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
-                       ways, cache_id, aux, l2x0_size);
+                       l2x0_ways, l2x0_cache_id, aux, l2x0_size);
 }
 
 #ifdef CONFIG_OF
index 74c2e5a..2edb6f6 100644 (file)
@@ -272,6 +272,11 @@ v6_dma_clean_range:
  *     - end     - virtual end address of region
  */
 ENTRY(v6_dma_flush_range)
+#ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT
+       sub     r2, r1, r0
+       cmp     r2, #CONFIG_CACHE_FLUSH_RANGE_LIMIT
+       bhi     v6_dma_flush_dcache_all
+#endif
 #ifdef CONFIG_DMA_CACHE_RWFO
        ldrb    r2, [r0]                @ read for ownership
        strb    r2, [r0]                @ write for ownership
@@ -294,6 +299,18 @@ ENTRY(v6_dma_flush_range)
        mcr     p15, 0, r0, c7, c10, 4          @ drain write buffer
        mov     pc, lr
 
+#ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT
+v6_dma_flush_dcache_all:
+       mov     r0, #0
+#ifdef HARVARD_CACHE
+       mcr     p15, 0, r0, c7, c14, 0          @ D cache clean+invalidate
+#else
+       mcr     p15, 0, r0, c7, c15, 0          @ Cache clean+invalidate
+#endif
+       mcr     p15, 0, r0, c7, c10, 4          @ drain write buffer
+       mov     pc, lr
+#endif
+
 /*
  *     dma_map_area(start, size, dir)
  *     - start - kernel virtual start address
index 94c5a0c..d2b36b6 100644 (file)
@@ -560,11 +560,25 @@ static void __init *early_alloc(unsigned long sz)
        return early_alloc_aligned(sz, sz);
 }
 
-static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
+static pte_t * __init early_pte_alloc(pmd_t *pmd)
+{
+       if (pmd_none(*pmd) || pmd_bad(*pmd))
+               return early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
+       return pmd_page_vaddr(*pmd);
+}
+
+static void __init early_pte_install(pmd_t *pmd, pte_t *pte, unsigned long prot)
+{
+       __pmd_populate(pmd, __pa(pte), prot);
+       BUG_ON(pmd_bad(*pmd));
+}
+
+static pte_t * __init early_pte_alloc_and_install(pmd_t *pmd,
+       unsigned long addr, unsigned long prot)
 {
        if (pmd_none(*pmd)) {
-               pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
-               __pmd_populate(pmd, __pa(pte), prot);
+               pte_t *pte = early_pte_alloc(pmd);
+               early_pte_install(pmd, pte, prot);
        }
        BUG_ON(pmd_bad(*pmd));
        return pte_offset_kernel(pmd, addr);
@@ -574,16 +588,23 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
                                  unsigned long end, unsigned long pfn,
                                  const struct mem_type *type)
 {
-       pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
+       pte_t *start_pte = early_pte_alloc(pmd);
+       pte_t *pte = start_pte + pte_index(addr);
+
+       /* If replacing a section mapping, the whole section must be replaced */
+       BUG_ON(pmd_bad(*pmd) && ((addr | end) & ~PMD_MASK));
+
        do {
                set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
                pfn++;
        } while (pte++, addr += PAGE_SIZE, addr != end);
+       early_pte_install(pmd, start_pte, type->prot_l1);
 }
 
 static void __init alloc_init_section(pud_t *pud, unsigned long addr,
                                      unsigned long end, phys_addr_t phys,
-                                     const struct mem_type *type)
+                                     const struct mem_type *type,
+                                     bool force_pages)
 {
        pmd_t *pmd = pmd_offset(pud, addr);
 
@@ -593,7 +614,7 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr,
         * L1 entries, whereas PGDs refer to a group of L1 entries making
         * up one logical pointer to an L2 table.
         */
-       if (((addr | end | phys) & ~SECTION_MASK) == 0) {
+       if (((addr | end | phys) & ~SECTION_MASK) == 0 && !force_pages) {
                pmd_t *p = pmd;
 
 #ifndef CONFIG_ARM_LPAE
@@ -617,14 +638,14 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr,
 }
 
 static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
-       unsigned long phys, const struct mem_type *type)
+       unsigned long phys, const struct mem_type *type, bool force_pages)
 {
        pud_t *pud = pud_offset(pgd, addr);
        unsigned long next;
 
        do {
                next = pud_addr_end(addr, end);
-               alloc_init_section(pud, addr, next, phys, type);
+               alloc_init_section(pud, addr, next, phys, type, force_pages);
                phys += next - addr;
        } while (pud++, addr = next, addr != end);
 }
@@ -698,7 +719,7 @@ static void __init create_36bit_mapping(struct map_desc *md,
  * offsets, and we take full advantage of sections and
  * supersections.
  */
-static void __init create_mapping(struct map_desc *md)
+static void __init create_mapping(struct map_desc *md, bool force_pages)
 {
        unsigned long addr, length, end;
        phys_addr_t phys;
@@ -748,7 +769,7 @@ static void __init create_mapping(struct map_desc *md)
        do {
                unsigned long next = pgd_addr_end(addr, end);
 
-               alloc_init_pud(pgd, addr, next, phys, type);
+               alloc_init_pud(pgd, addr, next, phys, type, force_pages);
 
                phys += next - addr;
                addr = next;
@@ -769,7 +790,7 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
        vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
 
        for (md = io_desc; nr; md++, nr--) {
-               create_mapping(md);
+               create_mapping(md, false);
                vm->addr = (void *)(md->virtual & PAGE_MASK);
                vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
                vm->phys_addr = __pfn_to_phys(md->pfn); 
@@ -1045,12 +1066,12 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
        map.virtual = 0xffff0000;
        map.length = PAGE_SIZE;
        map.type = MT_HIGH_VECTORS;
-       create_mapping(&map);
+       create_mapping(&map, false);
 
        if (!vectors_high()) {
                map.virtual = 0;
                map.type = MT_LOW_VECTORS;
-               create_mapping(&map);
+               create_mapping(&map, false);
        }
 
        /*
@@ -1072,20 +1093,23 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
 static void __init kmap_init(void)
 {
 #ifdef CONFIG_HIGHMEM
-       pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
+       pkmap_page_table = early_pte_alloc_and_install(pmd_off_k(PKMAP_BASE),
                PKMAP_BASE, _PAGE_KERNEL_TABLE);
 #endif
 }
 
+
 static void __init map_lowmem(void)
 {
        struct memblock_region *reg;
+       phys_addr_t start;
+       phys_addr_t end;
+       struct map_desc map;
 
        /* Map all the lowmem memory banks. */
        for_each_memblock(memory, reg) {
-               phys_addr_t start = reg->base;
-               phys_addr_t end = start + reg->size;
-               struct map_desc map;
+               start = reg->base;
+               end = start + reg->size;
 
                if (end > lowmem_limit)
                        end = lowmem_limit;
@@ -1097,8 +1121,20 @@ static void __init map_lowmem(void)
                map.length = end - start;
                map.type = MT_MEMORY;
 
-               create_mapping(&map);
+               create_mapping(&map, false);
        }
+
+#ifdef CONFIG_DEBUG_RODATA
+       start = __pa(_stext) & PMD_MASK;
+       end = ALIGN(__pa(__end_rodata), PMD_SIZE);
+
+       map.pfn = __phys_to_pfn(start);
+       map.virtual = __phys_to_virt(start);
+       map.length = end - start;
+       map.type = MT_MEMORY;
+
+       create_mapping(&map, true);
+#endif
 }
 
 /*
diff --git a/arch/arm/mm/rodata.c b/arch/arm/mm/rodata.c
new file mode 100644 (file)
index 0000000..9a8eb84
--- /dev/null
@@ -0,0 +1,159 @@
+/*
+ *  linux/arch/arm/mm/rodata.c
+ *
+ *  Copyright (C) 2011 Google, Inc.
+ *
+ *  Author: Colin Cross <ccross@android.com>
+ *
+ *  Based on x86 implementation in arch/x86/mm/init_32.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+
+#include <asm/cache.h>
+#include <asm/pgtable.h>
+#include <asm/rodata.h>
+#include <asm/sections.h>
+#include <asm/tlbflush.h>
+
+#include "mm.h"
+
+static int kernel_set_to_readonly __read_mostly;
+
+#ifdef CONFIG_DEBUG_RODATA_TEST
+static const int rodata_test_data = 0xC3;
+
+static noinline void rodata_test(void)
+{
+       int result;
+
+       pr_info("%s: attempting to write to read-only section:\n", __func__);
+
+       if (*(volatile int *)&rodata_test_data != 0xC3) {
+               pr_err("read only data changed before test\n");
+               return;
+       }
+
+       /*
+        * Attempt to to write to rodata_test_data, trapping the expected
+        * data abort.  If the trap executed, result will be 1.  If it didn't,
+        * result will be 0xFF.
+        */
+       asm volatile(
+               "0:     str     %[zero], [%[rodata_test_data]]\n"
+               "       mov     %[result], #0xFF\n"
+               "       b       2f\n"
+               "1:     mov     %[result], #1\n"
+               "2:\n"
+
+               /* Exception fixup - if store at label 0 faults, jumps to 1 */
+               ".pushsection __ex_table, \"a\"\n"
+               "       .long   0b, 1b\n"
+               ".popsection\n"
+
+               : [result] "=r" (result)
+               : [rodata_test_data] "r" (&rodata_test_data), [zero] "r" (0)
+               : "memory"
+       );
+
+       if (result == 1)
+               pr_info("write to read-only section trapped, success\n");
+       else
+               pr_err("write to read-only section NOT trapped, test failed\n");
+
+       if (*(volatile int *)&rodata_test_data != 0xC3)
+               pr_err("read only data changed during write\n");
+}
+#else
+static inline void rodata_test(void) { }
+#endif
+
+static int set_page_attributes(unsigned long virt, int numpages,
+       pte_t (*f)(pte_t))
+{
+       pmd_t *pmd;
+       pte_t *pte;
+       unsigned long start = virt;
+       unsigned long end = virt + (numpages << PAGE_SHIFT);
+       unsigned long pmd_end;
+
+       while (virt < end) {
+               pmd = pmd_off_k(virt);
+               pmd_end = min(ALIGN(virt + 1, PMD_SIZE), end);
+
+               if ((pmd_val(*pmd) & PMD_TYPE_MASK) != PMD_TYPE_TABLE) {
+                       pr_err("%s: pmd %p=%08lx for %08lx not page table\n",
+                               __func__, pmd, pmd_val(*pmd), virt);
+                       virt = pmd_end;
+                       continue;
+               }
+
+               while (virt < pmd_end) {
+                       pte = pte_offset_kernel(pmd, virt);
+                       set_pte_ext(pte, f(*pte), 0);
+                       virt += PAGE_SIZE;
+               }
+       }
+
+       flush_tlb_kernel_range(start, end);
+
+       return 0;
+}
+
+int set_memory_ro(unsigned long virt, int numpages)
+{
+       return set_page_attributes(virt, numpages, pte_wrprotect);
+}
+EXPORT_SYMBOL(set_memory_ro);
+
+int set_memory_rw(unsigned long virt, int numpages)
+{
+       return set_page_attributes(virt, numpages, pte_mkwrite);
+}
+EXPORT_SYMBOL(set_memory_rw);
+
+void set_kernel_text_rw(void)
+{
+       unsigned long start = PAGE_ALIGN((unsigned long)_text);
+       unsigned long size = PAGE_ALIGN((unsigned long)__end_rodata) - start;
+
+       if (!kernel_set_to_readonly)
+               return;
+
+       pr_debug("Set kernel text: %lx - %lx to read-write\n",
+                start, start + size);
+
+       set_memory_rw(start, size >> PAGE_SHIFT);
+}
+
+void set_kernel_text_ro(void)
+{
+       unsigned long start = PAGE_ALIGN((unsigned long)_text);
+       unsigned long size = PAGE_ALIGN((unsigned long)__end_rodata) - start;
+
+       if (!kernel_set_to_readonly)
+               return;
+
+       pr_info_once("Write protecting the kernel text section %lx - %lx\n",
+               start, start + size);
+
+       pr_debug("Set kernel text: %lx - %lx to read only\n",
+                start, start + size);
+
+       set_memory_ro(start, size >> PAGE_SHIFT);
+}
+
+void mark_rodata_ro(void)
+{
+       kernel_set_to_readonly = 1;
+
+       set_kernel_text_ro();
+
+       rodata_test();
+}
index 4fa9903..c1a9784 100644 (file)
@@ -10,7 +10,7 @@
  *
  * Basic entry code, called from the kernel's undefined instruction trap.
  *  r0  = faulted instruction
- *  r5  = faulted PC+4
+ *  r2  = faulted PC+4
  *  r9  = successful return
  *  r10 = thread_info structure
  *  lr  = failure return
@@ -26,6 +26,7 @@ ENTRY(do_vfp)
        str     r11, [r10, #TI_PREEMPT]
 #endif
        enable_irq
+       str     r2, [sp, #S_PC]         @ update regs->ARM_pc for Thumb 2 case
        ldr     r4, .LCvfp
        ldr     r11, [r10, #TI_CPU]     @ CPU number
        add     r10, r10, #TI_VFPSTATE  @ r10 = workspace
index 8f3ccdd..4942007 100644 (file)
@@ -449,6 +449,10 @@ static int vfp_pm_suspend(void)
 
                /* disable, just in case */
                fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
+       } else if (vfp_current_hw_state[ti->cpu]) {
+               fmxr(FPEXC, fpexc | FPEXC_EN);
+               vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc);
+               fmxr(FPEXC, fpexc);
        }
 
        /* clear any information we had about last context state */
index f49253d..f1e4268 100644 (file)
@@ -1,13 +1,6 @@
 #ifndef _ASM_X86_IDLE_H
 #define _ASM_X86_IDLE_H
 
-#define IDLE_START 1
-#define IDLE_END 2
-
-struct notifier_block;
-void idle_notifier_register(struct notifier_block *n);
-void idle_notifier_unregister(struct notifier_block *n);
-
 #ifdef CONFIG_X86_64
 void enter_idle(void);
 void exit_idle(void);
index cfa5c90..7a91eea 100644 (file)
@@ -58,31 +58,17 @@ asmlinkage extern void ret_from_fork(void);
 DEFINE_PER_CPU(unsigned long, old_rsp);
 static DEFINE_PER_CPU(unsigned char, is_idle);
 
-static ATOMIC_NOTIFIER_HEAD(idle_notifier);
-
-void idle_notifier_register(struct notifier_block *n)
-{
-       atomic_notifier_chain_register(&idle_notifier, n);
-}
-EXPORT_SYMBOL_GPL(idle_notifier_register);
-
-void idle_notifier_unregister(struct notifier_block *n)
-{
-       atomic_notifier_chain_unregister(&idle_notifier, n);
-}
-EXPORT_SYMBOL_GPL(idle_notifier_unregister);
-
 void enter_idle(void)
 {
        percpu_write(is_idle, 1);
-       atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
+       idle_notifier_call_chain(IDLE_START);
 }
 
 static void __exit_idle(void)
 {
        if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
                return;
-       atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
+       idle_notifier_call_chain(IDLE_END);
 }
 
 /* Called from interrupts to signify idle end */
index 23b4f70..16f8891 100644 (file)
@@ -1104,6 +1104,22 @@ static void disk_release(struct device *dev)
                blk_put_queue(disk->queue);
        kfree(disk);
 }
+
+static int disk_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+       struct gendisk *disk = dev_to_disk(dev);
+       struct disk_part_iter piter;
+       struct hd_struct *part;
+       int cnt = 0;
+
+       disk_part_iter_init(&piter, disk, 0);
+       while((part = disk_part_iter_next(&piter)))
+               cnt++;
+       disk_part_iter_exit(&piter);
+       add_uevent_var(env, "NPARTS=%u", cnt);
+       return 0;
+}
+
 struct class block_class = {
        .name           = "block",
 };
@@ -1122,6 +1138,7 @@ static struct device_type disk_type = {
        .groups         = disk_attr_groups,
        .release        = disk_release,
        .devnode        = block_devnode,
+       .uevent         = disk_uevent,
 };
 
 #ifdef CONFIG_PROC_FS
index d06ec1c..a1643da 100644 (file)
@@ -216,10 +216,21 @@ static void part_release(struct device *dev)
        kfree(p);
 }
 
+static int part_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+       struct hd_struct *part = dev_to_part(dev);
+
+       add_uevent_var(env, "PARTN=%u", part->partno);
+       if (part->info && part->info->volname[0])
+               add_uevent_var(env, "PARTNAME=%s", part->info->volname);
+       return 0;
+}
+
 struct device_type part_type = {
        .name           = "partition",
        .groups         = part_attr_groups,
        .release        = part_release,
+       .uevent         = part_uevent,
 };
 
 static void delete_partition_rcu_cb(struct rcu_head *head)
index 5afe5d1..86d65b1 100644 (file)
@@ -96,6 +96,8 @@ source "drivers/memstick/Kconfig"
 
 source "drivers/leds/Kconfig"
 
+source "drivers/switch/Kconfig"
+
 source "drivers/accessibility/Kconfig"
 
 source "drivers/infiniband/Kconfig"
index c07be02..5f8c75d 100644 (file)
@@ -100,6 +100,7 @@ obj-$(CONFIG_CPU_IDLE)              += cpuidle/
 obj-y                          += mmc/
 obj-$(CONFIG_MEMSTICK)         += memstick/
 obj-y                          += leds/
+obj-$(CONFIG_SWITCH)           += switch/
 obj-$(CONFIG_INFINIBAND)       += infiniband/
 obj-$(CONFIG_SGI_SN)           += sn/
 obj-y                          += firmware/
index e2cc3d2..070ef58 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/sched.h>
 #include <linux/async.h>
 #include <linux/suspend.h>
+#include <linux/timer.h>
 
 #include "../base.h"
 #include "power.h"
@@ -53,6 +54,12 @@ struct suspend_stats suspend_stats;
 static DEFINE_MUTEX(dpm_list_mtx);
 static pm_message_t pm_transition;
 
+static void dpm_drv_timeout(unsigned long data);
+struct dpm_drv_wd_data {
+       struct device *dev;
+       struct task_struct *tsk;
+};
+
 static int async_error;
 
 /**
@@ -541,6 +548,30 @@ static bool is_async(struct device *dev)
 }
 
 /**
+ *     dpm_drv_timeout - Driver suspend / resume watchdog handler
+ *     @data: struct device which timed out
+ *
+ *     Called when a driver has timed out suspending or resuming.
+ *     There's not much we can do here to recover so
+ *     BUG() out for a crash-dump
+ *
+ */
+static void dpm_drv_timeout(unsigned long data)
+{
+       struct dpm_drv_wd_data *wd_data = (void *)data;
+       struct device *dev = wd_data->dev;
+       struct task_struct *tsk = wd_data->tsk;
+
+       printk(KERN_EMERG "**** DPM device timeout: %s (%s)\n", dev_name(dev),
+              (dev->driver ? dev->driver->name : "no driver"));
+
+       printk(KERN_EMERG "dpm suspend stack:\n");
+       show_stack(tsk, NULL);
+
+       BUG();
+}
+
+/**
  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
  * @state: PM transition of the system being carried out.
  *
@@ -814,9 +845,19 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
        pm_callback_t callback = NULL;
        char *info = NULL;
        int error = 0;
+       struct timer_list timer;
+       struct dpm_drv_wd_data data;
 
        dpm_wait_for_children(dev, async);
 
+       data.dev = dev;
+       data.tsk = get_current();
+       init_timer_on_stack(&timer);
+       timer.expires = jiffies + HZ * 12;
+       timer.function = dpm_drv_timeout;
+       timer.data = (unsigned long)&data;
+       add_timer(&timer);
+
        if (async_error)
                return 0;
 
@@ -884,6 +925,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
        }
 
        device_unlock(dev);
+
+       del_timer_sync(&timer);
+       destroy_timer_on_stack(&timer);
+
        complete_all(&dev->power.completion);
 
        if (error) {
index 4364303..ce18820 100644 (file)
@@ -6,6 +6,19 @@ menu "Character devices"
 
 source "drivers/tty/Kconfig"
 
+config DEVMEM
+       bool "Memory device driver"
+       default y
+       help
+         The memory driver provides two character devices, mem and kmem, which
+         provide access to the system's memory. The mem device is a view of
+         physical memory, and each byte in the device corresponds to the
+         matching physical address. The kmem device is the same as mem, but
+         the addresses correspond to the kernel's virtual address space rather
+         than physical memory. These devices are standard parts of a Linux
+         system and most users should say Y here. You might say N if very
+         security conscience or memory is tight.
+
 config DEVKMEM
        bool "/dev/kmem virtual device support"
        default y
@@ -598,6 +611,10 @@ config DEVPORT
        depends on ISA || PCI
        default y
 
+config DCC_TTY
+       tristate "DCC tty driver"
+       depends on ARM
+
 source "drivers/s390/char/Kconfig"
 
 config RAMOOPS
index 32762ba..5e2fd70 100644 (file)
@@ -58,6 +58,7 @@ obj-$(CONFIG_IPMI_HANDLER)    += ipmi/
 obj-$(CONFIG_HANGCHECK_TIMER)  += hangcheck-timer.o
 obj-$(CONFIG_TCG_TPM)          += tpm/
 
+obj-$(CONFIG_DCC_TTY)          += dcc_tty.o
 obj-$(CONFIG_PS3_FLASH)                += ps3flash.o
 obj-$(CONFIG_RAMOOPS)          += ramoops.o
 
diff --git a/drivers/char/dcc_tty.c b/drivers/char/dcc_tty.c
new file mode 100644 (file)
index 0000000..a787acc
--- /dev/null
@@ -0,0 +1,326 @@
+/* drivers/char/dcc_tty.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/console.h>
+#include <linux/hrtimer.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+
+MODULE_DESCRIPTION("DCC TTY Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+
+static spinlock_t g_dcc_tty_lock = SPIN_LOCK_UNLOCKED;
+static struct hrtimer g_dcc_timer;
+static char g_dcc_buffer[16];
+static int g_dcc_buffer_head;
+static int g_dcc_buffer_count;
+static unsigned g_dcc_write_delay_usecs = 1;
+static struct tty_driver *g_dcc_tty_driver;
+static struct tty_struct *g_dcc_tty;
+static int g_dcc_tty_open_count;
+
+static void dcc_poll_locked(void)
+{
+       char ch;
+       int rch;
+       int written;
+
+       while (g_dcc_buffer_count) {
+               ch = g_dcc_buffer[g_dcc_buffer_head];
+               asm(
+                       "mrc 14, 0, r15, c0, c1, 0\n"
+                       "mcrcc 14, 0, %1, c0, c5, 0\n"
+                       "movcc %0, #1\n"
+                       "movcs %0, #0\n"
+                       : "=r" (written)
+                       : "r" (ch)
+               );
+               if (written) {
+                       if (ch == '\n')
+                               g_dcc_buffer[g_dcc_buffer_head] = '\r';
+                       else {
+                               g_dcc_buffer_head = (g_dcc_buffer_head + 1) % ARRAY_SIZE(g_dcc_buffer);
+                               g_dcc_buffer_count--;
+                               if (g_dcc_tty)
+                                       tty_wakeup(g_dcc_tty);
+                       }
+                       g_dcc_write_delay_usecs = 1;
+               } else {
+                       if (g_dcc_write_delay_usecs > 0x100)
+                               break;
+                       g_dcc_write_delay_usecs <<= 1;
+                       udelay(g_dcc_write_delay_usecs);
+               }
+       }
+
+       if (g_dcc_tty && !test_bit(TTY_THROTTLED, &g_dcc_tty->flags)) {
+               asm(
+                       "mrc 14, 0, %0, c0, c1, 0\n"
+                       "tst %0, #(1 << 30)\n"
+                       "moveq %0, #-1\n"
+                       "mrcne 14, 0, %0, c0, c5, 0\n"
+                       : "=r" (rch)
+               );
+               if (rch >= 0) {
+                       ch = rch;
+                       tty_insert_flip_string(g_dcc_tty, &ch, 1);
+                       tty_flip_buffer_push(g_dcc_tty);
+               }
+       }
+
+
+       if (g_dcc_buffer_count)
+               hrtimer_start(&g_dcc_timer, ktime_set(0, g_dcc_write_delay_usecs * NSEC_PER_USEC), HRTIMER_MODE_REL);
+       else
+               hrtimer_start(&g_dcc_timer, ktime_set(0, 20 * NSEC_PER_MSEC), HRTIMER_MODE_REL);
+}
+
+static int dcc_tty_open(struct tty_struct * tty, struct file * filp)
+{
+       int ret;
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+       if (g_dcc_tty == NULL || g_dcc_tty == tty) {
+               g_dcc_tty = tty;
+               g_dcc_tty_open_count++;
+               ret = 0;
+       } else
+               ret = -EBUSY;
+       spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+
+       printk("dcc_tty_open, tty %p, f_flags %x, returned %d\n", tty, filp->f_flags, ret);
+
+       return ret;
+}
+
+static void dcc_tty_close(struct tty_struct * tty, struct file * filp)
+{
+       printk("dcc_tty_close, tty %p, f_flags %x\n", tty, filp->f_flags);
+       if (g_dcc_tty == tty) {
+               if (--g_dcc_tty_open_count == 0)
+                       g_dcc_tty = NULL;
+       }
+}
+
+static int dcc_write(const unsigned char *buf_start, int count)
+{
+       const unsigned char *buf = buf_start;
+       unsigned long irq_flags;
+       int copy_len;
+       int space_left;
+       int tail;
+
+       if (count < 1)
+               return 0;
+
+       spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+       do {
+               tail = (g_dcc_buffer_head + g_dcc_buffer_count) % ARRAY_SIZE(g_dcc_buffer);
+               copy_len = ARRAY_SIZE(g_dcc_buffer) - tail;
+               space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count;
+               if (copy_len > space_left)
+                       copy_len = space_left;
+               if (copy_len > count)
+                       copy_len = count;
+               memcpy(&g_dcc_buffer[tail], buf, copy_len);
+               g_dcc_buffer_count += copy_len;
+               buf += copy_len;
+               count -= copy_len;
+               if (copy_len < count && copy_len < space_left) {
+                       space_left -= copy_len;
+                       copy_len = count;
+                       if (copy_len > space_left) {
+                               copy_len = space_left;
+                       }
+                       memcpy(g_dcc_buffer, buf, copy_len);
+                       buf += copy_len;
+                       count -= copy_len;
+                       g_dcc_buffer_count += copy_len;
+               }
+               dcc_poll_locked();
+               space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count;
+       } while(count && space_left);
+       spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+       return buf - buf_start;
+}
+
+static int dcc_tty_write(struct tty_struct * tty, const unsigned char *buf, int count)
+{
+       int ret;
+       /* printk("dcc_tty_write %p, %d\n", buf, count); */
+       ret = dcc_write(buf, count);
+       if (ret != count)
+               printk("dcc_tty_write %p, %d, returned %d\n", buf, count, ret);
+       return ret;
+}
+
+static int dcc_tty_write_room(struct tty_struct *tty)
+{
+       int space_left;
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+       space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count;
+       spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+       return space_left;
+}
+
+static int dcc_tty_chars_in_buffer(struct tty_struct *tty)
+{
+       int ret;
+       asm(
+               "mrc 14, 0, %0, c0, c1, 0\n"
+               "mov %0, %0, LSR #30\n"
+               "and %0, %0, #1\n"
+               : "=r" (ret)
+       );
+       return ret;
+}
+
+static void dcc_tty_unthrottle(struct tty_struct * tty)
+{
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+       dcc_poll_locked();
+       spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+}
+
+static enum hrtimer_restart dcc_tty_timer_func(struct hrtimer *timer)
+{
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+       dcc_poll_locked();
+       spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+       return HRTIMER_NORESTART;
+}
+
+void dcc_console_write(struct console *co, const char *b, unsigned count)
+{
+#if 1
+       dcc_write(b, count);
+#else
+       /* blocking printk */
+       while (count > 0) {
+               int written;
+               written = dcc_write(b, count);
+               if (written) {
+                       b += written;
+                       count -= written;
+               }
+       }
+#endif
+}
+
+static struct tty_driver *dcc_console_device(struct console *c, int *index)
+{
+       *index = 0;
+       return g_dcc_tty_driver;
+}
+
+static int __init dcc_console_setup(struct console *co, char *options)
+{
+       if (co->index != 0)
+               return -ENODEV;
+       return 0;
+}
+
+
+static struct console dcc_console =
+{
+       .name           = "ttyDCC",
+       .write          = dcc_console_write,
+       .device         = dcc_console_device,
+       .setup          = dcc_console_setup,
+       .flags          = CON_PRINTBUFFER,
+       .index          = -1,
+};
+
+static struct tty_operations dcc_tty_ops = {
+       .open = dcc_tty_open,
+       .close = dcc_tty_close,
+       .write = dcc_tty_write,
+       .write_room = dcc_tty_write_room,
+       .chars_in_buffer = dcc_tty_chars_in_buffer,
+       .unthrottle = dcc_tty_unthrottle,
+};
+
+static int __init dcc_tty_init(void)
+{
+       int ret;
+
+       hrtimer_init(&g_dcc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       g_dcc_timer.function = dcc_tty_timer_func;
+
+       g_dcc_tty_driver = alloc_tty_driver(1);
+       if (!g_dcc_tty_driver) {
+               printk(KERN_ERR "dcc_tty_probe: alloc_tty_driver failed\n");
+               ret = -ENOMEM;
+               goto err_alloc_tty_driver_failed;
+       }
+       g_dcc_tty_driver->owner = THIS_MODULE;
+       g_dcc_tty_driver->driver_name = "dcc";
+       g_dcc_tty_driver->name = "ttyDCC";
+       g_dcc_tty_driver->major = 0; // auto assign
+       g_dcc_tty_driver->minor_start = 0;
+       g_dcc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+       g_dcc_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+       g_dcc_tty_driver->init_termios = tty_std_termios;
+       g_dcc_tty_driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+       tty_set_operations(g_dcc_tty_driver, &dcc_tty_ops);
+       ret = tty_register_driver(g_dcc_tty_driver);
+       if (ret) {
+               printk(KERN_ERR "dcc_tty_probe: tty_register_driver failed, %d\n", ret);
+               goto err_tty_register_driver_failed;
+       }
+       tty_register_device(g_dcc_tty_driver, 0, NULL);
+
+       register_console(&dcc_console);
+       hrtimer_start(&g_dcc_timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+
+       return 0;
+
+err_tty_register_driver_failed:
+       put_tty_driver(g_dcc_tty_driver);
+       g_dcc_tty_driver = NULL;
+err_alloc_tty_driver_failed:
+       return ret;
+}
+
+static void  __exit dcc_tty_exit(void)
+{
+       int ret;
+
+       tty_unregister_device(g_dcc_tty_driver, 0);
+       ret = tty_unregister_driver(g_dcc_tty_driver);
+       if (ret < 0) {
+               printk(KERN_ERR "dcc_tty_remove: tty_unregister_driver failed, %d\n", ret);
+       } else {
+               put_tty_driver(g_dcc_tty_driver);
+       }
+       g_dcc_tty_driver = NULL;
+}
+
+module_init(dcc_tty_init);
+module_exit(dcc_tty_exit);
+
+
index d6e9d08..67e19b6 100644 (file)
@@ -57,6 +57,7 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
 }
 #endif
 
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
 #ifdef CONFIG_STRICT_DEVMEM
 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
 {
@@ -82,7 +83,9 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
        return 1;
 }
 #endif
+#endif
 
+#ifdef CONFIG_DEVMEM
 void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr)
 {
 }
@@ -209,6 +212,9 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
        *ppos += written;
        return written;
 }
+#endif /* CONFIG_DEVMEM */
+
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
 
 int __weak phys_mem_access_prot_allowed(struct file *file,
        unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
@@ -330,6 +336,7 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma)
        }
        return 0;
 }
+#endif /* CONFIG_DEVMEM */
 
 #ifdef CONFIG_DEVKMEM
 static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
@@ -694,6 +701,8 @@ static loff_t null_lseek(struct file *file, loff_t offset, int orig)
        return file->f_pos = 0;
 }
 
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
+
 /*
  * The memory devices use the full 32/64 bits of the offset, and so we cannot
  * check against negative addresses: they are ok. The return value is weird,
@@ -727,10 +736,14 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
        return ret;
 }
 
+#endif
+
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
 static int open_port(struct inode * inode, struct file * filp)
 {
        return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
 }
+#endif
 
 #define zero_lseek     null_lseek
 #define full_lseek      null_lseek
@@ -740,6 +753,7 @@ static int open_port(struct inode * inode, struct file * filp)
 #define open_kmem      open_mem
 #define open_oldmem    open_mem
 
+#ifdef CONFIG_DEVMEM
 static const struct file_operations mem_fops = {
        .llseek         = memory_lseek,
        .read           = read_mem,
@@ -748,6 +762,7 @@ static const struct file_operations mem_fops = {
        .open           = open_mem,
        .get_unmapped_area = get_unmapped_area_mem,
 };
+#endif
 
 #ifdef CONFIG_DEVKMEM
 static const struct file_operations kmem_fops = {
@@ -851,7 +866,9 @@ static const struct memdev {
        const struct file_operations *fops;
        struct backing_dev_info *dev_info;
 } devlist[] = {
+#ifdef CONFIG_DEVMEM
         [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
+#endif
 #ifdef CONFIG_DEVKMEM
         [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
 #endif
index e24a2a1..57f96eb 100644 (file)
@@ -99,6 +99,16 @@ config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
          Be aware that not all cpufreq drivers support the conservative
          governor. If unsure have a look at the help section of the
          driver. Fallback governor will be the performance governor.
+
+config CPU_FREQ_DEFAULT_GOV_INTERACTIVE
+       bool "interactive"
+       select CPU_FREQ_GOV_INTERACTIVE
+       help
+         Use the CPUFreq governor 'interactive' as default. This allows
+         you to get a full dynamic cpu frequency capable system by simply
+         loading your cpufreq low-level hardware driver, using the
+         'interactive' governor for latency-sensitive workloads.
+
 endchoice
 
 config CPU_FREQ_GOV_PERFORMANCE
@@ -156,6 +166,23 @@ config CPU_FREQ_GOV_ONDEMAND
 
          If in doubt, say N.
 
+config CPU_FREQ_GOV_INTERACTIVE
+       tristate "'interactive' cpufreq policy governor"
+       help
+         'interactive' - This driver adds a dynamic cpufreq policy governor
+         designed for latency-sensitive workloads.
+
+         This governor attempts to reduce the latency of clock
+         increases so that the system is more responsive to
+         interactive workloads.
+
+         To compile this driver as a module, choose M here: the
+         module will be called cpufreq_interactive.
+
+         For details, take a look at linux/Documentation/cpu-freq.
+
+         If in doubt, say N.
+
 config CPU_FREQ_GOV_CONSERVATIVE
        tristate "'conservative' cpufreq governor"
        depends on CPU_FREQ
index ac000fa..f84c99b 100644 (file)
@@ -9,6 +9,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE)    += cpufreq_powersave.o
 obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE)   += cpufreq_userspace.o
 obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND)    += cpufreq_ondemand.o
 obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE)        += cpufreq_conservative.o
+obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o
 
 # CPUfreq cross-arch helpers
 obj-$(CONFIG_CPU_FREQ_TABLE)           += freq_table.o
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
new file mode 100644 (file)
index 0000000..8a5cd15
--- /dev/null
@@ -0,0 +1,705 @@
+/*
+ * drivers/cpufreq/cpufreq_interactive.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Author: Mike Chan (mike@android.com)
+ *
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/tick.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+
+#include <asm/cputime.h>
+
+static atomic_t active_count = ATOMIC_INIT(0);
+
+struct cpufreq_interactive_cpuinfo {
+       struct timer_list cpu_timer;
+       int timer_idlecancel;
+       u64 time_in_idle;
+       u64 idle_exit_time;
+       u64 timer_run_time;
+       int idling;
+       u64 freq_change_time;
+       u64 freq_change_time_in_idle;
+       struct cpufreq_policy *policy;
+       struct cpufreq_frequency_table *freq_table;
+       unsigned int target_freq;
+       int governor_enabled;
+};
+
+static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
+
+/* Workqueues handle frequency scaling */
+static struct task_struct *up_task;
+static struct workqueue_struct *down_wq;
+static struct work_struct freq_scale_down_work;
+static cpumask_t up_cpumask;
+static spinlock_t up_cpumask_lock;
+static cpumask_t down_cpumask;
+static spinlock_t down_cpumask_lock;
+static struct mutex set_speed_lock;
+
+/* Hi speed to bump to from lo speed when load burst (default max) */
+static u64 hispeed_freq;
+
+/* Go to hi speed when CPU load at or above this value. */
+#define DEFAULT_GO_HISPEED_LOAD 95
+static unsigned long go_hispeed_load;
+
+/*
+ * The minimum amount of time to spend at a frequency before we can ramp down.
+ */
+#define DEFAULT_MIN_SAMPLE_TIME 20 * USEC_PER_MSEC
+static unsigned long min_sample_time;
+
+/*
+ * The sample rate of the timer used to increase frequency
+ */
+#define DEFAULT_TIMER_RATE 20 * USEC_PER_MSEC
+static unsigned long timer_rate;
+
+static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
+               unsigned int event);
+
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
+static
+#endif
+struct cpufreq_governor cpufreq_gov_interactive = {
+       .name = "interactive",
+       .governor = cpufreq_governor_interactive,
+       .max_transition_latency = 10000000,
+       .owner = THIS_MODULE,
+};
+
+static void cpufreq_interactive_timer(unsigned long data)
+{
+       unsigned int delta_idle;
+       unsigned int delta_time;
+       int cpu_load;
+       int load_since_change;
+       u64 time_in_idle;
+       u64 idle_exit_time;
+       struct cpufreq_interactive_cpuinfo *pcpu =
+               &per_cpu(cpuinfo, data);
+       u64 now_idle;
+       unsigned int new_freq;
+       unsigned int index;
+       unsigned long flags;
+
+       smp_rmb();
+
+       if (!pcpu->governor_enabled)
+               goto exit;
+
+       /*
+        * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time,
+        * this lets idle exit know the current idle time sample has
+        * been processed, and idle exit can generate a new sample and
+        * re-arm the timer.  This prevents a concurrent idle
+        * exit on that CPU from writing a new set of info at the same time
+        * the timer function runs (the timer function can't use that info
+        * until more time passes).
+        */
+       time_in_idle = pcpu->time_in_idle;
+       idle_exit_time = pcpu->idle_exit_time;
+       now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time);
+       smp_wmb();
+
+       /* If we raced with cancelling a timer, skip. */
+       if (!idle_exit_time)
+               goto exit;
+
+       delta_idle = (unsigned int)(now_idle - time_in_idle);
+       delta_time = (unsigned int)(pcpu->timer_run_time - idle_exit_time);
+
+       /*
+        * If timer ran less than 1ms after short-term sample started, retry.
+        */
+       if (delta_time < 1000)
+               goto rearm;
+
+       if (delta_idle > delta_time)
+               cpu_load = 0;
+       else
+               cpu_load = 100 * (delta_time - delta_idle) / delta_time;
+
+       delta_idle = (unsigned int)(now_idle - pcpu->freq_change_time_in_idle);
+       delta_time = (unsigned int)(pcpu->timer_run_time - pcpu->freq_change_time);
+
+       if ((delta_time == 0) || (delta_idle > delta_time))
+               load_since_change = 0;
+       else
+               load_since_change =
+                       100 * (delta_time - delta_idle) / delta_time;
+
+       /*
+        * Choose greater of short-term load (since last idle timer
+        * started or timer function re-armed itself) or long-term load
+        * (since last frequency change).
+        */
+       if (load_since_change > cpu_load)
+               cpu_load = load_since_change;
+
+       if (cpu_load >= go_hispeed_load) {
+               if (pcpu->policy->cur == pcpu->policy->min)
+                       new_freq = hispeed_freq;
+               else
+                       new_freq = pcpu->policy->max * cpu_load / 100;
+       } else {
+               new_freq = pcpu->policy->cur * cpu_load / 100;
+       }
+
+       if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
+                                          new_freq, CPUFREQ_RELATION_H,
+                                          &index)) {
+               pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
+                            (int) data);
+               goto rearm;
+       }
+
+       new_freq = pcpu->freq_table[index].frequency;
+
+       if (pcpu->target_freq == new_freq)
+               goto rearm_if_notmax;
+
+       /*
+        * Do not scale down unless we have been at this frequency for the
+        * minimum sample time.
+        */
+       if (new_freq < pcpu->target_freq) {
+               if (pcpu->timer_run_time - pcpu->freq_change_time
+                   < min_sample_time)
+                       goto rearm;
+       }
+
+       if (new_freq < pcpu->target_freq) {
+               pcpu->target_freq = new_freq;
+               spin_lock_irqsave(&down_cpumask_lock, flags);
+               cpumask_set_cpu(data, &down_cpumask);
+               spin_unlock_irqrestore(&down_cpumask_lock, flags);
+               queue_work(down_wq, &freq_scale_down_work);
+       } else {
+               pcpu->target_freq = new_freq;
+               spin_lock_irqsave(&up_cpumask_lock, flags);
+               cpumask_set_cpu(data, &up_cpumask);
+               spin_unlock_irqrestore(&up_cpumask_lock, flags);
+               wake_up_process(up_task);
+       }
+
+rearm_if_notmax:
+       /*
+        * Already set max speed and don't see a need to change that,
+        * wait until next idle to re-evaluate, don't need timer.
+        */
+       if (pcpu->target_freq == pcpu->policy->max)
+               goto exit;
+
+rearm:
+       if (!timer_pending(&pcpu->cpu_timer)) {
+               /*
+                * If already at min: if that CPU is idle, don't set timer.
+                * Else cancel the timer if that CPU goes idle.  We don't
+                * need to re-evaluate speed until the next idle exit.
+                */
+               if (pcpu->target_freq == pcpu->policy->min) {
+                       smp_rmb();
+
+                       if (pcpu->idling)
+                               goto exit;
+
+                       pcpu->timer_idlecancel = 1;
+               }
+
+               pcpu->time_in_idle = get_cpu_idle_time_us(
+                       data, &pcpu->idle_exit_time);
+               mod_timer(&pcpu->cpu_timer,
+                         jiffies + usecs_to_jiffies(timer_rate));
+       }
+
+exit:
+       return;
+}
+
+static void cpufreq_interactive_idle_start(void)
+{
+       struct cpufreq_interactive_cpuinfo *pcpu =
+               &per_cpu(cpuinfo, smp_processor_id());
+       int pending;
+
+       if (!pcpu->governor_enabled)
+               return;
+
+       pcpu->idling = 1;
+       smp_wmb();
+       pending = timer_pending(&pcpu->cpu_timer);
+
+       if (pcpu->target_freq != pcpu->policy->min) {
+#ifdef CONFIG_SMP
+               /*
+                * Entering idle while not at lowest speed.  On some
+                * platforms this can hold the other CPU(s) at that speed
+                * even though the CPU is idle. Set a timer to re-evaluate
+                * speed so this idle CPU doesn't hold the other CPUs above
+                * min indefinitely.  This should probably be a quirk of
+                * the CPUFreq driver.
+                */
+               if (!pending) {
+                       pcpu->time_in_idle = get_cpu_idle_time_us(
+                               smp_processor_id(), &pcpu->idle_exit_time);
+                       pcpu->timer_idlecancel = 0;
+                       mod_timer(&pcpu->cpu_timer,
+                                 jiffies + usecs_to_jiffies(timer_rate));
+               }
+#endif
+       } else {
+               /*
+                * If at min speed and entering idle after load has
+                * already been evaluated, and a timer has been set just in
+                * case the CPU suddenly goes busy, cancel that timer.  The
+                * CPU didn't go busy; we'll recheck things upon idle exit.
+                */
+               if (pending && pcpu->timer_idlecancel) {
+                       del_timer(&pcpu->cpu_timer);
+                       /*
+                        * Ensure last timer run time is after current idle
+                        * sample start time, so next idle exit will always
+                        * start a new idle sampling period.
+                        */
+                       pcpu->idle_exit_time = 0;
+                       pcpu->timer_idlecancel = 0;
+               }
+       }
+
+}
+
+static void cpufreq_interactive_idle_end(void)
+{
+       struct cpufreq_interactive_cpuinfo *pcpu =
+               &per_cpu(cpuinfo, smp_processor_id());
+
+       pcpu->idling = 0;
+       smp_wmb();
+
+       /*
+        * Arm the timer for 1-2 ticks later if not already, and if the timer
+        * function has already processed the previous load sampling
+        * interval.  (If the timer is not pending but has not processed
+        * the previous interval, it is probably racing with us on another
+        * CPU.  Let it compute load based on the previous sample and then
+        * re-arm the timer for another interval when it's done, rather
+        * than updating the interval start time to be "now", which doesn't
+        * give the timer function enough time to make a decision on this
+        * run.)
+        */
+       if (timer_pending(&pcpu->cpu_timer) == 0 &&
+           pcpu->timer_run_time >= pcpu->idle_exit_time &&
+           pcpu->governor_enabled) {
+               pcpu->time_in_idle =
+                       get_cpu_idle_time_us(smp_processor_id(),
+                                            &pcpu->idle_exit_time);
+               pcpu->timer_idlecancel = 0;
+               mod_timer(&pcpu->cpu_timer,
+                         jiffies + usecs_to_jiffies(timer_rate));
+       }
+
+}
+
+static int cpufreq_interactive_up_task(void *data)
+{
+       unsigned int cpu;
+       cpumask_t tmp_mask;
+       unsigned long flags;
+       struct cpufreq_interactive_cpuinfo *pcpu;
+
+       while (1) {
+               set_current_state(TASK_INTERRUPTIBLE);
+               spin_lock_irqsave(&up_cpumask_lock, flags);
+
+               if (cpumask_empty(&up_cpumask)) {
+                       spin_unlock_irqrestore(&up_cpumask_lock, flags);
+                       schedule();
+
+                       if (kthread_should_stop())
+                               break;
+
+                       spin_lock_irqsave(&up_cpumask_lock, flags);
+               }
+
+               set_current_state(TASK_RUNNING);
+               tmp_mask = up_cpumask;
+               cpumask_clear(&up_cpumask);
+               spin_unlock_irqrestore(&up_cpumask_lock, flags);
+
+               for_each_cpu(cpu, &tmp_mask) {
+                       unsigned int j;
+                       unsigned int max_freq = 0;
+
+                       pcpu = &per_cpu(cpuinfo, cpu);
+                       smp_rmb();
+
+                       if (!pcpu->governor_enabled)
+                               continue;
+
+                       mutex_lock(&set_speed_lock);
+
+                       for_each_cpu(j, pcpu->policy->cpus) {
+                               struct cpufreq_interactive_cpuinfo *pjcpu =
+                                       &per_cpu(cpuinfo, j);
+
+                               if (pjcpu->target_freq > max_freq)
+                                       max_freq = pjcpu->target_freq;
+                       }
+
+                       if (max_freq != pcpu->policy->cur)
+                               __cpufreq_driver_target(pcpu->policy,
+                                                       max_freq,
+                                                       CPUFREQ_RELATION_H);
+                       mutex_unlock(&set_speed_lock);
+
+                       pcpu->freq_change_time_in_idle =
+                               get_cpu_idle_time_us(cpu,
+                                                    &pcpu->freq_change_time);
+               }
+       }
+
+       return 0;
+}
+
+static void cpufreq_interactive_freq_down(struct work_struct *work)
+{
+       unsigned int cpu;
+       cpumask_t tmp_mask;
+       unsigned long flags;
+       struct cpufreq_interactive_cpuinfo *pcpu;
+
+       spin_lock_irqsave(&down_cpumask_lock, flags);
+       tmp_mask = down_cpumask;
+       cpumask_clear(&down_cpumask);
+       spin_unlock_irqrestore(&down_cpumask_lock, flags);
+
+       for_each_cpu(cpu, &tmp_mask) {
+               unsigned int j;
+               unsigned int max_freq = 0;
+
+               pcpu = &per_cpu(cpuinfo, cpu);
+               smp_rmb();
+
+               if (!pcpu->governor_enabled)
+                       continue;
+
+               mutex_lock(&set_speed_lock);
+
+               for_each_cpu(j, pcpu->policy->cpus) {
+                       struct cpufreq_interactive_cpuinfo *pjcpu =
+                               &per_cpu(cpuinfo, j);
+
+                       if (pjcpu->target_freq > max_freq)
+                               max_freq = pjcpu->target_freq;
+               }
+
+               if (max_freq != pcpu->policy->cur)
+                       __cpufreq_driver_target(pcpu->policy, max_freq,
+                                               CPUFREQ_RELATION_H);
+
+               mutex_unlock(&set_speed_lock);
+               pcpu->freq_change_time_in_idle =
+                       get_cpu_idle_time_us(cpu,
+                                            &pcpu->freq_change_time);
+       }
+}
+
+static ssize_t show_hispeed_freq(struct kobject *kobj,
+                                struct attribute *attr, char *buf)
+{
+       return sprintf(buf, "%llu\n", hispeed_freq);
+}
+
+static ssize_t store_hispeed_freq(struct kobject *kobj,
+                                 struct attribute *attr, const char *buf,
+                                 size_t count)
+{
+       int ret;
+       u64 val;
+
+       ret = strict_strtoull(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+       hispeed_freq = val;
+       return count;
+}
+
+static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
+               show_hispeed_freq, store_hispeed_freq);
+
+
+static ssize_t show_go_hispeed_load(struct kobject *kobj,
+                                    struct attribute *attr, char *buf)
+{
+       return sprintf(buf, "%lu\n", go_hispeed_load);
+}
+
+static ssize_t store_go_hispeed_load(struct kobject *kobj,
+                       struct attribute *attr, const char *buf, size_t count)
+{
+       int ret;
+       unsigned long val;
+
+       ret = strict_strtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+       go_hispeed_load = val;
+       return count;
+}
+
+static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
+               show_go_hispeed_load, store_go_hispeed_load);
+
+static ssize_t show_min_sample_time(struct kobject *kobj,
+                               struct attribute *attr, char *buf)
+{
+       return sprintf(buf, "%lu\n", min_sample_time);
+}
+
+static ssize_t store_min_sample_time(struct kobject *kobj,
+                       struct attribute *attr, const char *buf, size_t count)
+{
+       int ret;
+       unsigned long val;
+
+       ret = strict_strtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+       min_sample_time = val;
+       return count;
+}
+
+static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
+               show_min_sample_time, store_min_sample_time);
+
+static ssize_t show_timer_rate(struct kobject *kobj,
+                       struct attribute *attr, char *buf)
+{
+       return sprintf(buf, "%lu\n", timer_rate);
+}
+
+static ssize_t store_timer_rate(struct kobject *kobj,
+                       struct attribute *attr, const char *buf, size_t count)
+{
+       int ret;
+       unsigned long val;
+
+       ret = strict_strtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+       timer_rate = val;
+       return count;
+}
+
+static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
+               show_timer_rate, store_timer_rate);
+
+static struct attribute *interactive_attributes[] = {
+       &hispeed_freq_attr.attr,
+       &go_hispeed_load_attr.attr,
+       &min_sample_time_attr.attr,
+       &timer_rate_attr.attr,
+       NULL,
+};
+
+static struct attribute_group interactive_attr_group = {
+       .attrs = interactive_attributes,
+       .name = "interactive",
+};
+
+static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
+               unsigned int event)
+{
+       int rc;
+       unsigned int j;
+       struct cpufreq_interactive_cpuinfo *pcpu;
+       struct cpufreq_frequency_table *freq_table;
+
+       switch (event) {
+       case CPUFREQ_GOV_START:
+               if (!cpu_online(policy->cpu))
+                       return -EINVAL;
+
+               freq_table =
+                       cpufreq_frequency_get_table(policy->cpu);
+
+               for_each_cpu(j, policy->cpus) {
+                       pcpu = &per_cpu(cpuinfo, j);
+                       pcpu->policy = policy;
+                       pcpu->target_freq = policy->cur;
+                       pcpu->freq_table = freq_table;
+                       pcpu->freq_change_time_in_idle =
+                               get_cpu_idle_time_us(j,
+                                            &pcpu->freq_change_time);
+                       pcpu->governor_enabled = 1;
+                       smp_wmb();
+               }
+
+               if (!hispeed_freq)
+                       hispeed_freq = policy->max;
+
+               /*
+                * Do not register the idle hook and create sysfs
+                * entries if we have already done so.
+                */
+               if (atomic_inc_return(&active_count) > 1)
+                       return 0;
+
+               rc = sysfs_create_group(cpufreq_global_kobject,
+                               &interactive_attr_group);
+               if (rc)
+                       return rc;
+
+               break;
+
+       case CPUFREQ_GOV_STOP:
+               for_each_cpu(j, policy->cpus) {
+                       pcpu = &per_cpu(cpuinfo, j);
+                       pcpu->governor_enabled = 0;
+                       smp_wmb();
+                       del_timer_sync(&pcpu->cpu_timer);
+
+                       /*
+                        * Reset idle exit time since we may cancel the timer
+                        * before it can run after the last idle exit time,
+                        * to avoid tripping the check in idle exit for a timer
+                        * that is trying to run.
+                        */
+                       pcpu->idle_exit_time = 0;
+               }
+
+               flush_work(&freq_scale_down_work);
+               if (atomic_dec_return(&active_count) > 0)
+                       return 0;
+
+               sysfs_remove_group(cpufreq_global_kobject,
+                               &interactive_attr_group);
+
+               break;
+
+       case CPUFREQ_GOV_LIMITS:
+               if (policy->max < policy->cur)
+                       __cpufreq_driver_target(policy,
+                                       policy->max, CPUFREQ_RELATION_H);
+               else if (policy->min > policy->cur)
+                       __cpufreq_driver_target(policy,
+                                       policy->min, CPUFREQ_RELATION_L);
+               break;
+       }
+       return 0;
+}
+
+static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
+                                            unsigned long val,
+                                            void *data)
+{
+       switch (val) {
+       case IDLE_START:
+               cpufreq_interactive_idle_start();
+               break;
+       case IDLE_END:
+               cpufreq_interactive_idle_end();
+               break;
+       }
+
+       return 0;
+}
+
+static struct notifier_block cpufreq_interactive_idle_nb = {
+       .notifier_call = cpufreq_interactive_idle_notifier,
+};
+
+static int __init cpufreq_interactive_init(void)
+{
+       unsigned int i;
+       struct cpufreq_interactive_cpuinfo *pcpu;
+       struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
+
+       go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
+       min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
+       timer_rate = DEFAULT_TIMER_RATE;
+
+       /* Initalize per-cpu timers */
+       for_each_possible_cpu(i) {
+               pcpu = &per_cpu(cpuinfo, i);
+               init_timer(&pcpu->cpu_timer);
+               pcpu->cpu_timer.function = cpufreq_interactive_timer;
+               pcpu->cpu_timer.data = i;
+       }
+
+       up_task = kthread_create(cpufreq_interactive_up_task, NULL,
+                                "kinteractiveup");
+       if (IS_ERR(up_task))
+               return PTR_ERR(up_task);
+
+       sched_setscheduler_nocheck(up_task, SCHED_FIFO, &param);
+       get_task_struct(up_task);
+
+       /* No rescuer thread, bind to CPU queuing the work for possibly
+          warm cache (probably doesn't matter much). */
+       down_wq = alloc_workqueue("knteractive_down", 0, 1);
+
+       if (!down_wq)
+               goto err_freeuptask;
+
+       INIT_WORK(&freq_scale_down_work,
+                 cpufreq_interactive_freq_down);
+
+       spin_lock_init(&up_cpumask_lock);
+       spin_lock_init(&down_cpumask_lock);
+       mutex_init(&set_speed_lock);
+
+       idle_notifier_register(&cpufreq_interactive_idle_nb);
+
+       return cpufreq_register_governor(&cpufreq_gov_interactive);
+
+err_freeuptask:
+       put_task_struct(up_task);
+       return -ENOMEM;
+}
+
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
+fs_initcall(cpufreq_interactive_init);
+#else
+module_init(cpufreq_interactive_init);
+#endif
+
+static void __exit cpufreq_interactive_exit(void)
+{
+       cpufreq_unregister_governor(&cpufreq_gov_interactive);
+       kthread_stop(up_task);
+       put_task_struct(up_task);
+       destroy_workqueue(down_wq);
+}
+
+module_exit(cpufreq_interactive_exit);
+
+MODULE_AUTHOR("Mike Chan <mike@android.com>");
+MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
+       "Latency sensitive workloads");
+MODULE_LICENSE("GPL");
index b40ee14..72f0093 100644 (file)
@@ -316,6 +316,27 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
        return 0;
 }
 
+static int cpufreq_stats_create_table_cpu(unsigned int cpu)
+{
+       struct cpufreq_policy *policy;
+       struct cpufreq_frequency_table *table;
+       int ret = -ENODEV;
+
+       policy = cpufreq_cpu_get(cpu);
+       if (!policy)
+               return -ENODEV;
+
+       table = cpufreq_frequency_get_table(cpu);
+       if (!table)
+               goto out;
+
+       ret = cpufreq_stats_create_table(policy, table);
+
+out:
+       cpufreq_cpu_put(policy);
+       return ret;
+}
+
 static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
                                               unsigned long action,
                                               void *hcpu)
@@ -334,6 +355,10 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
        case CPU_DEAD_FROZEN:
                cpufreq_stats_free_table(cpu);
                break;
+       case CPU_DOWN_FAILED:
+       case CPU_DOWN_FAILED_FROZEN:
+               cpufreq_stats_create_table_cpu(cpu);
+               break;
        }
        return NOTIFY_OK;
 }
index ad09526..3d6c2ae 100644 (file)
@@ -173,7 +173,12 @@ static inline int performance_multiplier(void)
 
        /* for higher loadavg, we are more reluctant */
 
-       mult += 2 * get_loadavg();
+       /*
+        * this doesn't work as intended - it is almost always 0, but can
+        * sometimes, depending on workload, spike very high into the hundreds
+        * even when the average cpu load is under 10%.
+        */
+       /* mult += 2 * get_loadavg(); */
 
        /* for IO wait tasks (per cpu!) we add 5x each */
        mult += 10 * nr_iowait_cpu(smp_processor_id());
index cc92778..ca2d3b3 100644 (file)
@@ -1 +1 @@
-obj-y                  += drm/ vga/ stub/
+obj-y                  += drm/ vga/ stub/ ion/
diff --git a/drivers/gpu/ion/Kconfig b/drivers/gpu/ion/Kconfig
new file mode 100644 (file)
index 0000000..5b48b4e
--- /dev/null
@@ -0,0 +1,12 @@
+menuconfig ION
+       tristate "Ion Memory Manager"
+       select GENERIC_ALLOCATOR
+       help
+         Chose this option to enable the ION Memory Manager.
+
+config ION_TEGRA
+       tristate "Ion for Tegra"
+       depends on ARCH_TEGRA && ION
+       help
+         Choose this option if you wish to use ion on an nVidia Tegra.
+
diff --git a/drivers/gpu/ion/Makefile b/drivers/gpu/ion/Makefile
new file mode 100644 (file)
index 0000000..73fe3fa
--- /dev/null
@@ -0,0 +1,2 @@
+obj-$(CONFIG_ION) +=   ion.o ion_heap.o ion_system_heap.o ion_carveout_heap.o
+obj-$(CONFIG_ION_TEGRA) += tegra/
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
new file mode 100644 (file)
index 0000000..1002ec0
--- /dev/null
@@ -0,0 +1,1187 @@
+/*
+ * drivers/gpu/ion/ion.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/anon_inodes.h>
+#include <linux/ion.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+
+#include "ion_priv.h"
+#define DEBUG
+
+/**
+ * struct ion_device - the metadata of the ion device node
+ * @dev:               the actual misc device
+ * @buffers:   an rb tree of all the existing buffers
+ * @lock:              lock protecting the buffers & heaps trees
+ * @heaps:             list of all the heaps in the system
+ * @user_clients:      list of all the clients created from userspace
+ */
+struct ion_device {
+       struct miscdevice dev;
+       struct rb_root buffers;
+       struct mutex lock;
+       struct rb_root heaps;
+       long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
+                             unsigned long arg);
+       struct rb_root user_clients;
+       struct rb_root kernel_clients;
+       struct dentry *debug_root;
+};
+
+/**
+ * struct ion_client - a process/hw block local address space
+ * @ref:               for reference counting the client
+ * @node:              node in the tree of all clients
+ * @dev:               backpointer to ion device
+ * @handles:           an rb tree of all the handles in this client
+ * @lock:              lock protecting the tree of handles
+ * @heap_mask:         mask of all supported heaps
+ * @name:              used for debugging
+ * @task:              used for debugging
+ *
+ * A client represents a list of buffers this client may access.
+ * The mutex stored here is used to protect both handles tree
+ * as well as the handles themselves, and should be held while modifying either.
+ */
+struct ion_client {
+       struct kref ref;
+       struct rb_node node;
+       struct ion_device *dev;
+       struct rb_root handles;
+       struct mutex lock;
+       unsigned int heap_mask;
+       const char *name;
+       struct task_struct *task;
+       pid_t pid;
+       struct dentry *debug_root;
+};
+
+/**
+ * ion_handle - a client local reference to a buffer
+ * @ref:               reference count
+ * @client:            back pointer to the client the buffer resides in
+ * @buffer:            pointer to the buffer
+ * @node:              node in the client's handle rbtree
+ * @kmap_cnt:          count of times this client has mapped to kernel
+ * @dmap_cnt:          count of times this client has mapped for dma
+ * @usermap_cnt:       count of times this client has mapped for userspace
+ *
+ * Modifications to node, map_cnt or mapping should be protected by the
+ * lock in the client.  Other fields are never changed after initialization.
+ */
+struct ion_handle {
+       struct kref ref;
+       struct ion_client *client;
+       struct ion_buffer *buffer;
+       struct rb_node node;
+       unsigned int kmap_cnt;
+       unsigned int dmap_cnt;
+       unsigned int usermap_cnt;
+};
+
+/* this function should only be called while dev->lock is held */
+static void ion_buffer_add(struct ion_device *dev,
+                          struct ion_buffer *buffer)
+{
+       struct rb_node **p = &dev->buffers.rb_node;
+       struct rb_node *parent = NULL;
+       struct ion_buffer *entry;
+
+       while (*p) {
+               parent = *p;
+               entry = rb_entry(parent, struct ion_buffer, node);
+
+               if (buffer < entry) {
+                       p = &(*p)->rb_left;
+               } else if (buffer > entry) {
+                       p = &(*p)->rb_right;
+               } else {
+                       pr_err("%s: buffer already found.", __func__);
+                       BUG();
+               }
+       }
+
+       rb_link_node(&buffer->node, parent, p);
+       rb_insert_color(&buffer->node, &dev->buffers);
+}
+
+/* this function should only be called while dev->lock is held */
+static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
+                                    struct ion_device *dev,
+                                    unsigned long len,
+                                    unsigned long align,
+                                    unsigned long flags)
+{
+       struct ion_buffer *buffer;
+       int ret;
+
+       buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
+       if (!buffer)
+               return ERR_PTR(-ENOMEM);
+
+       buffer->heap = heap;
+       kref_init(&buffer->ref);
+
+       ret = heap->ops->allocate(heap, buffer, len, align, flags);
+       if (ret) {
+               kfree(buffer);
+               return ERR_PTR(ret);
+       }
+       buffer->dev = dev;
+       buffer->size = len;
+       mutex_init(&buffer->lock);
+       ion_buffer_add(dev, buffer);
+       return buffer;
+}
+
+static void ion_buffer_destroy(struct kref *kref)
+{
+       struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
+       struct ion_device *dev = buffer->dev;
+
+       buffer->heap->ops->free(buffer);
+       mutex_lock(&dev->lock);
+       rb_erase(&buffer->node, &dev->buffers);
+       mutex_unlock(&dev->lock);
+       kfree(buffer);
+}
+
+static void ion_buffer_get(struct ion_buffer *buffer)
+{
+       kref_get(&buffer->ref);
+}
+
+static int ion_buffer_put(struct ion_buffer *buffer)
+{
+       return kref_put(&buffer->ref, ion_buffer_destroy);
+}
+
+static struct ion_handle *ion_handle_create(struct ion_client *client,
+                                    struct ion_buffer *buffer)
+{
+       struct ion_handle *handle;
+
+       handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
+       if (!handle)
+               return ERR_PTR(-ENOMEM);
+       kref_init(&handle->ref);
+       rb_init_node(&handle->node);
+       handle->client = client;
+       ion_buffer_get(buffer);
+       handle->buffer = buffer;
+
+       return handle;
+}
+
+static void ion_handle_destroy(struct kref *kref)
+{
+       struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
+       /* XXX Can a handle be destroyed while it's map count is non-zero?:
+          if (handle->map_cnt) unmap
+        */
+       ion_buffer_put(handle->buffer);
+       mutex_lock(&handle->client->lock);
+       if (!RB_EMPTY_NODE(&handle->node))
+               rb_erase(&handle->node, &handle->client->handles);
+       mutex_unlock(&handle->client->lock);
+       kfree(handle);
+}
+
+struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
+{
+       return handle->buffer;
+}
+
+static void ion_handle_get(struct ion_handle *handle)
+{
+       kref_get(&handle->ref);
+}
+
+static int ion_handle_put(struct ion_handle *handle)
+{
+       return kref_put(&handle->ref, ion_handle_destroy);
+}
+
+static struct ion_handle *ion_handle_lookup(struct ion_client *client,
+                                           struct ion_buffer *buffer)
+{
+       struct rb_node *n;
+
+       for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+               struct ion_handle *handle = rb_entry(n, struct ion_handle,
+                                                    node);
+               if (handle->buffer == buffer)
+                       return handle;
+       }
+       return NULL;
+}
+
+static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
+{
+       struct rb_node *n = client->handles.rb_node;
+
+       while (n) {
+               struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
+                                                         node);
+               if (handle < handle_node)
+                       n = n->rb_left;
+               else if (handle > handle_node)
+                       n = n->rb_right;
+               else
+                       return true;
+       }
+       return false;
+}
+
+static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
+{
+       struct rb_node **p = &client->handles.rb_node;
+       struct rb_node *parent = NULL;
+       struct ion_handle *entry;
+
+       while (*p) {
+               parent = *p;
+               entry = rb_entry(parent, struct ion_handle, node);
+
+               if (handle < entry)
+                       p = &(*p)->rb_left;
+               else if (handle > entry)
+                       p = &(*p)->rb_right;
+               else
+                       WARN(1, "%s: buffer already found.", __func__);
+       }
+
+       rb_link_node(&handle->node, parent, p);
+       rb_insert_color(&handle->node, &client->handles);
+}
+
+struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+                            size_t align, unsigned int flags)
+{
+       struct rb_node *n;
+       struct ion_handle *handle;
+       struct ion_device *dev = client->dev;
+       struct ion_buffer *buffer = NULL;
+
+       /*
+        * traverse the list of heaps available in this system in priority
+        * order.  If the heap type is supported by the client, and matches the
+        * request of the caller allocate from it.  Repeat until allocate has
+        * succeeded or all heaps have been tried
+        */
+       mutex_lock(&dev->lock);
+       for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
+               struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
+               /* if the client doesn't support this heap type */
+               if (!((1 << heap->type) & client->heap_mask))
+                       continue;
+               /* if the caller didn't specify this heap type */
+               if (!((1 << heap->id) & flags))
+                       continue;
+               buffer = ion_buffer_create(heap, dev, len, align, flags);
+               if (!IS_ERR_OR_NULL(buffer))
+                       break;
+       }
+       mutex_unlock(&dev->lock);
+
+       if (IS_ERR_OR_NULL(buffer))
+               return ERR_PTR(PTR_ERR(buffer));
+
+       handle = ion_handle_create(client, buffer);
+
+       if (IS_ERR_OR_NULL(handle))
+               goto end;
+
+       /*
+        * ion_buffer_create will create a buffer with a ref_cnt of 1,
+        * and ion_handle_create will take a second reference, drop one here
+        */
+       ion_buffer_put(buffer);
+
+       mutex_lock(&client->lock);
+       ion_handle_add(client, handle);
+       mutex_unlock(&client->lock);
+       return handle;
+
+end:
+       ion_buffer_put(buffer);
+       return handle;
+}
+
+void ion_free(struct ion_client *client, struct ion_handle *handle)
+{
+       bool valid_handle;
+
+       BUG_ON(client != handle->client);
+
+       mutex_lock(&client->lock);
+       valid_handle = ion_handle_validate(client, handle);
+       mutex_unlock(&client->lock);
+
+       if (!valid_handle) {
+               WARN("%s: invalid handle passed to free.\n", __func__);
+               return;
+       }
+       ion_handle_put(handle);
+}
+
+static void ion_client_get(struct ion_client *client);
+static int ion_client_put(struct ion_client *client);
+
+static bool _ion_map(int *buffer_cnt, int *handle_cnt)
+{
+       bool map;
+
+       BUG_ON(*handle_cnt != 0 && *buffer_cnt == 0);
+
+       if (*buffer_cnt)
+               map = false;
+       else
+               map = true;
+       if (*handle_cnt == 0)
+               (*buffer_cnt)++;
+       (*handle_cnt)++;
+       return map;
+}
+
+static bool _ion_unmap(int *buffer_cnt, int *handle_cnt)
+{
+       BUG_ON(*handle_cnt == 0);
+       (*handle_cnt)--;
+       if (*handle_cnt != 0)
+               return false;
+       BUG_ON(*buffer_cnt == 0);
+       (*buffer_cnt)--;
+       if (*buffer_cnt == 0)
+               return true;
+       return false;
+}
+
+int ion_phys(struct ion_client *client, struct ion_handle *handle,
+            ion_phys_addr_t *addr, size_t *len)
+{
+       struct ion_buffer *buffer;
+       int ret;
+
+       mutex_lock(&client->lock);
+       if (!ion_handle_validate(client, handle)) {
+               mutex_unlock(&client->lock);
+               return -EINVAL;
+       }
+
+       buffer = handle->buffer;
+
+       if (!buffer->heap->ops->phys) {
+               pr_err("%s: ion_phys is not implemented by this heap.\n",
+                      __func__);
+               mutex_unlock(&client->lock);
+               return -ENODEV;
+       }
+       mutex_unlock(&client->lock);
+       ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
+       return ret;
+}
+
+void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
+{
+       struct ion_buffer *buffer;
+       void *vaddr;
+
+       mutex_lock(&client->lock);
+       if (!ion_handle_validate(client, handle)) {
+               pr_err("%s: invalid handle passed to map_kernel.\n",
+                      __func__);
+               mutex_unlock(&client->lock);
+               return ERR_PTR(-EINVAL);
+       }
+
+       buffer = handle->buffer;
+       mutex_lock(&buffer->lock);
+
+       if (!handle->buffer->heap->ops->map_kernel) {
+               pr_err("%s: map_kernel is not implemented by this heap.\n",
+                      __func__);
+               mutex_unlock(&buffer->lock);
+               mutex_unlock(&client->lock);
+               return ERR_PTR(-ENODEV);
+       }
+
+       if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) {
+               vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
+               if (IS_ERR_OR_NULL(vaddr))
+                       _ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt);
+               buffer->vaddr = vaddr;
+       } else {
+               vaddr = buffer->vaddr;
+       }
+       mutex_unlock(&buffer->lock);
+       mutex_unlock(&client->lock);
+       return vaddr;
+}
+
+struct scatterlist *ion_map_dma(struct ion_client *client,
+                               struct ion_handle *handle)
+{
+       struct ion_buffer *buffer;
+       struct scatterlist *sglist;
+
+       mutex_lock(&client->lock);
+       if (!ion_handle_validate(client, handle)) {
+               pr_err("%s: invalid handle passed to map_dma.\n",
+                      __func__);
+               mutex_unlock(&client->lock);
+               return ERR_PTR(-EINVAL);
+       }
+       buffer = handle->buffer;
+       mutex_lock(&buffer->lock);
+
+       if (!handle->buffer->heap->ops->map_dma) {
+               pr_err("%s: map_kernel is not implemented by this heap.\n",
+                      __func__);
+               mutex_unlock(&buffer->lock);
+               mutex_unlock(&client->lock);
+               return ERR_PTR(-ENODEV);
+       }
+       if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) {
+               sglist = buffer->heap->ops->map_dma(buffer->heap, buffer);
+               if (IS_ERR_OR_NULL(sglist))
+                       _ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt);
+               buffer->sglist = sglist;
+       } else {
+               sglist = buffer->sglist;
+       }
+       mutex_unlock(&buffer->lock);
+       mutex_unlock(&client->lock);
+       return sglist;
+}
+
+void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
+{
+       struct ion_buffer *buffer;
+
+       mutex_lock(&client->lock);
+       buffer = handle->buffer;
+       mutex_lock(&buffer->lock);
+       if (_ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt)) {
+               buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+               buffer->vaddr = NULL;
+       }
+       mutex_unlock(&buffer->lock);
+       mutex_unlock(&client->lock);
+}
+
+void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle)
+{
+       struct ion_buffer *buffer;
+
+       mutex_lock(&client->lock);
+       buffer = handle->buffer;
+       mutex_lock(&buffer->lock);
+       if (_ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt)) {
+               buffer->heap->ops->unmap_dma(buffer->heap, buffer);
+               buffer->sglist = NULL;
+       }
+       mutex_unlock(&buffer->lock);
+       mutex_unlock(&client->lock);
+}
+
+
+struct ion_buffer *ion_share(struct ion_client *client,
+                                struct ion_handle *handle)
+{
+       bool valid_handle;
+
+       mutex_lock(&client->lock);
+       valid_handle = ion_handle_validate(client, handle);
+       mutex_unlock(&client->lock);
+       if (!valid_handle) {
+               WARN("%s: invalid handle passed to share.\n", __func__);
+               return ERR_PTR(-EINVAL);
+       }
+
+       /* do not take an extra reference here, the burden is on the caller
+        * to make sure the buffer doesn't go away while it's passing it
+        * to another client -- ion_free should not be called on this handle
+        * until the buffer has been imported into the other client
+        */
+       return handle->buffer;
+}
+
+struct ion_handle *ion_import(struct ion_client *client,
+                             struct ion_buffer *buffer)
+{
+       struct ion_handle *handle = NULL;
+
+       mutex_lock(&client->lock);
+       /* if a handle exists for this buffer just take a reference to it */
+       handle = ion_handle_lookup(client, buffer);
+       if (!IS_ERR_OR_NULL(handle)) {
+               ion_handle_get(handle);
+               goto end;
+       }
+       handle = ion_handle_create(client, buffer);
+       if (IS_ERR_OR_NULL(handle))
+               goto end;
+       ion_handle_add(client, handle);
+end:
+       mutex_unlock(&client->lock);
+       return handle;
+}
+
+static const struct file_operations ion_share_fops;
+
+struct ion_handle *ion_import_fd(struct ion_client *client, int fd)
+{
+       struct file *file = fget(fd);
+       struct ion_handle *handle;
+
+       if (!file) {
+               pr_err("%s: imported fd not found in file table.\n", __func__);
+               return ERR_PTR(-EINVAL);
+       }
+       if (file->f_op != &ion_share_fops) {
+               pr_err("%s: imported file is not a shared ion file.\n",
+                      __func__);
+               handle = ERR_PTR(-EINVAL);
+               goto end;
+       }
+       handle = ion_import(client, file->private_data);
+end:
+       fput(file);
+       return handle;
+}
+
+static int ion_debug_client_show(struct seq_file *s, void *unused)
+{
+       struct ion_client *client = s->private;
+       struct rb_node *n;
+       size_t sizes[ION_NUM_HEAPS] = {0};
+       const char *names[ION_NUM_HEAPS] = {0};
+       int i;
+
+       mutex_lock(&client->lock);
+       for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+               struct ion_handle *handle = rb_entry(n, struct ion_handle,
+                                                    node);
+               enum ion_heap_type type = handle->buffer->heap->type;
+
+               if (!names[type])
+                       names[type] = handle->buffer->heap->name;
+               sizes[type] += handle->buffer->size;
+       }
+       mutex_unlock(&client->lock);
+
+       seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
+       for (i = 0; i < ION_NUM_HEAPS; i++) {
+               if (!names[i])
+                       continue;
+               seq_printf(s, "%16.16s: %16u %d\n", names[i], sizes[i],
+                          atomic_read(&client->ref.refcount));
+       }
+       return 0;
+}
+
+static int ion_debug_client_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, ion_debug_client_show, inode->i_private);
+}
+
+static const struct file_operations debug_client_fops = {
+       .open = ion_debug_client_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+static struct ion_client *ion_client_lookup(struct ion_device *dev,
+                                           struct task_struct *task)
+{
+       struct rb_node *n = dev->user_clients.rb_node;
+       struct ion_client *client;
+
+       mutex_lock(&dev->lock);
+       while (n) {
+               client = rb_entry(n, struct ion_client, node);
+               if (task == client->task) {
+                       ion_client_get(client);
+                       mutex_unlock(&dev->lock);
+                       return client;
+               } else if (task < client->task) {
+                       n = n->rb_left;
+               } else if (task > client->task) {
+                       n = n->rb_right;
+               }
+       }
+       mutex_unlock(&dev->lock);
+       return NULL;
+}
+
+struct ion_client *ion_client_create(struct ion_device *dev,
+                                    unsigned int heap_mask,
+                                    const char *name)
+{
+       struct ion_client *client;
+       struct task_struct *task;
+       struct rb_node **p;
+       struct rb_node *parent = NULL;
+       struct ion_client *entry;
+       char debug_name[64];
+       pid_t pid;
+
+       get_task_struct(current->group_leader);
+       task_lock(current->group_leader);
+       pid = task_pid_nr(current->group_leader);
+       /* don't bother to store task struct for kernel threads,
+          they can't be killed anyway */
+       if (current->group_leader->flags & PF_KTHREAD) {
+               put_task_struct(current->group_leader);
+               task = NULL;
+       } else {
+               task = current->group_leader;
+       }
+       task_unlock(current->group_leader);
+
+       /* if this isn't a kernel thread, see if a client already
+          exists */
+       if (task) {
+               client = ion_client_lookup(dev, task);
+               if (!IS_ERR_OR_NULL(client)) {
+                       put_task_struct(current->group_leader);
+                       return client;
+               }
+       }
+
+       client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
+       if (!client) {
+               put_task_struct(current->group_leader);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       client->dev = dev;
+       client->handles = RB_ROOT;
+       mutex_init(&client->lock);
+       client->name = name;
+       client->heap_mask = heap_mask;
+       client->task = task;
+       client->pid = pid;
+       kref_init(&client->ref);
+
+       mutex_lock(&dev->lock);
+       if (task) {
+               p = &dev->user_clients.rb_node;
+               while (*p) {
+                       parent = *p;
+                       entry = rb_entry(parent, struct ion_client, node);
+
+                       if (task < entry->task)
+                               p = &(*p)->rb_left;
+                       else if (task > entry->task)
+                               p = &(*p)->rb_right;
+               }
+               rb_link_node(&client->node, parent, p);
+               rb_insert_color(&client->node, &dev->user_clients);
+       } else {
+               p = &dev->kernel_clients.rb_node;
+               while (*p) {
+                       parent = *p;
+                       entry = rb_entry(parent, struct ion_client, node);
+
+                       if (client < entry)
+                               p = &(*p)->rb_left;
+                       else if (client > entry)
+                               p = &(*p)->rb_right;
+               }
+               rb_link_node(&client->node, parent, p);
+               rb_insert_color(&client->node, &dev->kernel_clients);
+       }
+
+       snprintf(debug_name, 64, "%u", client->pid);
+       client->debug_root = debugfs_create_file(debug_name, 0664,
+                                                dev->debug_root, client,
+                                                &debug_client_fops);
+       mutex_unlock(&dev->lock);
+
+       return client;
+}
+
+static void _ion_client_destroy(struct kref *kref)
+{
+       struct ion_client *client = container_of(kref, struct ion_client, ref);
+       struct ion_device *dev = client->dev;
+       struct rb_node *n;
+
+       pr_debug("%s: %d\n", __func__, __LINE__);
+       while ((n = rb_first(&client->handles))) {
+               struct ion_handle *handle = rb_entry(n, struct ion_handle,
+                                                    node);
+               ion_handle_destroy(&handle->ref);
+       }
+       mutex_lock(&dev->lock);
+       if (client->task) {
+               rb_erase(&client->node, &dev->user_clients);
+               put_task_struct(client->task);
+       } else {
+               rb_erase(&client->node, &dev->kernel_clients);
+       }
+       debugfs_remove_recursive(client->debug_root);
+       mutex_unlock(&dev->lock);
+
+       kfree(client);
+}
+
+static void ion_client_get(struct ion_client *client)
+{
+       kref_get(&client->ref);
+}
+
+static int ion_client_put(struct ion_client *client)
+{
+       return kref_put(&client->ref, _ion_client_destroy);
+}
+
+void ion_client_destroy(struct ion_client *client)
+{
+       ion_client_put(client);
+}
+
+static int ion_share_release(struct inode *inode, struct file* file)
+{
+       struct ion_buffer *buffer = file->private_data;
+
+       pr_debug("%s: %d\n", __func__, __LINE__);
+       /* drop the reference to the buffer -- this prevents the
+          buffer from going away because the client holding it exited
+          while it was being passed */
+       ion_buffer_put(buffer);
+       return 0;
+}
+
+static void ion_vma_open(struct vm_area_struct *vma)
+{
+
+       struct ion_buffer *buffer = vma->vm_file->private_data;
+       struct ion_handle *handle = vma->vm_private_data;
+       struct ion_client *client;
+
+       pr_debug("%s: %d\n", __func__, __LINE__);
+       /* check that the client still exists and take a reference so
+          it can't go away until this vma is closed */
+       client = ion_client_lookup(buffer->dev, current->group_leader);
+       if (IS_ERR_OR_NULL(client)) {
+               vma->vm_private_data = NULL;
+               return;
+       }
+       pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
+                __func__, __LINE__,
+                atomic_read(&client->ref.refcount),
+                atomic_read(&handle->ref.refcount),
+                atomic_read(&buffer->ref.refcount));
+}
+
+static void ion_vma_close(struct vm_area_struct *vma)
+{
+       struct ion_handle *handle = vma->vm_private_data;
+       struct ion_buffer *buffer = vma->vm_file->private_data;
+       struct ion_client *client;
+
+       pr_debug("%s: %d\n", __func__, __LINE__);
+       /* this indicates the client is gone, nothing to do here */
+       if (!handle)
+               return;
+       client = handle->client;
+       pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
+                __func__, __LINE__,
+                atomic_read(&client->ref.refcount),
+                atomic_read(&handle->ref.refcount),
+                atomic_read(&buffer->ref.refcount));
+       ion_handle_put(handle);
+       ion_client_put(client);
+       pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
+                __func__, __LINE__,
+                atomic_read(&client->ref.refcount),
+                atomic_read(&handle->ref.refcount),
+                atomic_read(&buffer->ref.refcount));
+}
+
+static struct vm_operations_struct ion_vm_ops = {
+       .open = ion_vma_open,
+       .close = ion_vma_close,
+};
+
+static int ion_share_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       struct ion_buffer *buffer = file->private_data;
+       unsigned long size = vma->vm_end - vma->vm_start;
+       struct ion_client *client;
+       struct ion_handle *handle;
+       int ret;
+
+       pr_debug("%s: %d\n", __func__, __LINE__);
+       /* make sure the client still exists, it's possible for the client to
+          have gone away but the map/share fd still to be around, take
+          a reference to it so it can't go away while this mapping exists */
+       client = ion_client_lookup(buffer->dev, current->group_leader);
+       if (IS_ERR_OR_NULL(client)) {
+               pr_err("%s: trying to mmap an ion handle in a process with no "
+                      "ion client\n", __func__);
+               return -EINVAL;
+       }
+
+       if ((size > buffer->size) || (size + (vma->vm_pgoff << PAGE_SHIFT) >
+                                    buffer->size)) {
+               pr_err("%s: trying to map larger area than handle has available"
+                      "\n", __func__);
+               ret = -EINVAL;
+               goto err;
+       }
+
+       /* find the handle and take a reference to it */
+       handle = ion_import(client, buffer);
+       if (IS_ERR_OR_NULL(handle)) {
+               ret = -EINVAL;
+               goto err;
+       }
+
+       if (!handle->buffer->heap->ops->map_user) {
+               pr_err("%s: this heap does not define a method for mapping "
+                      "to userspace\n", __func__);
+               ret = -EINVAL;
+               goto err1;
+       }
+
+       mutex_lock(&buffer->lock);
+       /* now map it to userspace */
+       ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
+       mutex_unlock(&buffer->lock);
+       if (ret) {
+               pr_err("%s: failure mapping buffer to userspace\n",
+                      __func__);
+               goto err1;
+       }
+
+       vma->vm_ops = &ion_vm_ops;
+       /* move the handle into the vm_private_data so we can access it from
+          vma_open/close */
+       vma->vm_private_data = handle;
+       pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
+                __func__, __LINE__,
+                atomic_read(&client->ref.refcount),
+                atomic_read(&handle->ref.refcount),
+                atomic_read(&buffer->ref.refcount));
+       return 0;
+
+err1:
+       /* drop the reference to the handle */
+       ion_handle_put(handle);
+err:
+       /* drop the reference to the client */
+       ion_client_put(client);
+       return ret;
+}
+
+static const struct file_operations ion_share_fops = {
+       .owner          = THIS_MODULE,
+       .release        = ion_share_release,
+       .mmap           = ion_share_mmap,
+};
+
+static int ion_ioctl_share(struct file *parent, struct ion_client *client,
+                          struct ion_handle *handle)
+{
+       int fd = get_unused_fd();
+       struct file *file;
+
+       if (fd < 0)
+               return -ENFILE;
+
+       file = anon_inode_getfile("ion_share_fd", &ion_share_fops,
+                                 handle->buffer, O_RDWR);
+       if (IS_ERR_OR_NULL(file))
+               goto err;
+       ion_buffer_get(handle->buffer);
+       fd_install(fd, file);
+
+       return fd;
+
+err:
+       put_unused_fd(fd);
+       return -ENFILE;
+}
+
+static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+       struct ion_client *client = filp->private_data;
+
+       switch (cmd) {
+       case ION_IOC_ALLOC:
+       {
+               struct ion_allocation_data data;
+
+               if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+                       return -EFAULT;
+               data.handle = ion_alloc(client, data.len, data.align,
+                                            data.flags);
+               if (copy_to_user((void __user *)arg, &data, sizeof(data)))
+                       return -EFAULT;
+               break;
+       }
+       case ION_IOC_FREE:
+       {
+               struct ion_handle_data data;
+               bool valid;
+
+               if (copy_from_user(&data, (void __user *)arg,
+                                  sizeof(struct ion_handle_data)))
+                       return -EFAULT;
+               mutex_lock(&client->lock);
+               valid = ion_handle_validate(client, data.handle);
+               mutex_unlock(&client->lock);
+               if (!valid)
+                       return -EINVAL;
+               ion_free(client, data.handle);
+               break;
+       }
+       case ION_IOC_MAP:
+       case ION_IOC_SHARE:
+       {
+               struct ion_fd_data data;
+
+               if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+                       return -EFAULT;
+               mutex_lock(&client->lock);
+               if (!ion_handle_validate(client, data.handle)) {
+                       pr_err("%s: invalid handle passed to share ioctl.\n",
+                              __func__);
+                       mutex_unlock(&client->lock);
+                       return -EINVAL;
+               }
+               data.fd = ion_ioctl_share(filp, client, data.handle);
+               mutex_unlock(&client->lock);
+               if (copy_to_user((void __user *)arg, &data, sizeof(data)))
+                       return -EFAULT;
+               break;
+       }
+       case ION_IOC_IMPORT:
+       {
+               struct ion_fd_data data;
+               if (copy_from_user(&data, (void __user *)arg,
+                                  sizeof(struct ion_fd_data)))
+                       return -EFAULT;
+
+               data.handle = ion_import_fd(client, data.fd);
+               if (IS_ERR(data.handle))
+                       data.handle = NULL;
+               if (copy_to_user((void __user *)arg, &data,
+                                sizeof(struct ion_fd_data)))
+                       return -EFAULT;
+               break;
+       }
+       case ION_IOC_CUSTOM:
+       {
+               struct ion_device *dev = client->dev;
+               struct ion_custom_data data;
+
+               if (!dev->custom_ioctl)
+                       return -ENOTTY;
+               if (copy_from_user(&data, (void __user *)arg,
+                               sizeof(struct ion_custom_data)))
+                       return -EFAULT;
+               return dev->custom_ioctl(client, data.cmd, data.arg);
+       }
+       default:
+               return -ENOTTY;
+       }
+       return 0;
+}
+
+static int ion_release(struct inode *inode, struct file *file)
+{
+       struct ion_client *client = file->private_data;
+
+       pr_debug("%s: %d\n", __func__, __LINE__);
+       ion_client_put(client);
+       return 0;
+}
+
+static int ion_open(struct inode *inode, struct file *file)
+{
+       struct miscdevice *miscdev = file->private_data;
+       struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
+       struct ion_client *client;
+
+       pr_debug("%s: %d\n", __func__, __LINE__);
+       client = ion_client_create(dev, -1, "user");
+       if (IS_ERR_OR_NULL(client))
+               return PTR_ERR(client);
+       file->private_data = client;
+
+       return 0;
+}
+
+static const struct file_operations ion_fops = {
+       .owner          = THIS_MODULE,
+       .open           = ion_open,
+       .release        = ion_release,
+       .unlocked_ioctl = ion_ioctl,
+};
+
+static size_t ion_debug_heap_total(struct ion_client *client,
+                                  enum ion_heap_type type)
+{
+       size_t size = 0;
+       struct rb_node *n;
+
+       mutex_lock(&client->lock);
+       for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+               struct ion_handle *handle = rb_entry(n,
+                                                    struct ion_handle,
+                                                    node);
+               if (handle->buffer->heap->type == type)
+                       size += handle->buffer->size;
+       }
+       mutex_unlock(&client->lock);
+       return size;
+}
+
+static int ion_debug_heap_show(struct seq_file *s, void *unused)
+{
+       struct ion_heap *heap = s->private;
+       struct ion_device *dev = heap->dev;
+       struct rb_node *n;
+
+       seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
+       for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) {
+               struct ion_client *client = rb_entry(n, struct ion_client,
+                                                    node);
+               char task_comm[TASK_COMM_LEN];
+               size_t size = ion_debug_heap_total(client, heap->type);
+               if (!size)
+                       continue;
+
+               get_task_comm(task_comm, client->task);
+               seq_printf(s, "%16.s %16u %16u\n", task_comm, client->pid,
+                          size);
+       }
+
+       for (n = rb_first(&dev->kernel_clients); n; n = rb_next(n)) {
+               struct ion_client *client = rb_entry(n, struct ion_client,
+                                                    node);
+               size_t size = ion_debug_heap_total(client, heap->type);
+               if (!size)
+                       continue;
+               seq_printf(s, "%16.s %16u %16u\n", client->name, client->pid,
+                          size);
+       }
+       return 0;
+}
+
+static int ion_debug_heap_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, ion_debug_heap_show, inode->i_private);
+}
+
+static const struct file_operations debug_heap_fops = {
+       .open = ion_debug_heap_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
+{
+       struct rb_node **p = &dev->heaps.rb_node;
+       struct rb_node *parent = NULL;
+       struct ion_heap *entry;
+
+       heap->dev = dev;
+       mutex_lock(&dev->lock);
+       while (*p) {
+               parent = *p;
+               entry = rb_entry(parent, struct ion_heap, node);
+
+               if (heap->id < entry->id) {
+                       p = &(*p)->rb_left;
+               } else if (heap->id > entry->id ) {
+                       p = &(*p)->rb_right;
+               } else {
+                       pr_err("%s: can not insert multiple heaps with "
+                               "id %d\n", __func__, heap->id);
+                       goto end;
+               }
+       }
+
+       rb_link_node(&heap->node, parent, p);
+       rb_insert_color(&heap->node, &dev->heaps);
+       debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
+                           &debug_heap_fops);
+end:
+       mutex_unlock(&dev->lock);
+}
+
+struct ion_device *ion_device_create(long (*custom_ioctl)
+                                    (struct ion_client *client,
+                                     unsigned int cmd,
+                                     unsigned long arg))
+{
+       struct ion_device *idev;
+       int ret;
+
+       idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
+       if (!idev)
+               return ERR_PTR(-ENOMEM);
+
+       idev->dev.minor = MISC_DYNAMIC_MINOR;
+       idev->dev.name = "ion";
+       idev->dev.fops = &ion_fops;
+       idev->dev.parent = NULL;
+       ret = misc_register(&idev->dev);
+       if (ret) {
+               pr_err("ion: failed to register misc device.\n");
+               return ERR_PTR(ret);
+       }
+
+       idev->debug_root = debugfs_create_dir("ion", NULL);
+       if (IS_ERR_OR_NULL(idev->debug_root))
+               pr_err("ion: failed to create debug files.\n");
+
+       idev->custom_ioctl = custom_ioctl;
+       idev->buffers = RB_ROOT;
+       mutex_init(&idev->lock);
+       idev->heaps = RB_ROOT;
+       idev->user_clients = RB_ROOT;
+       idev->kernel_clients = RB_ROOT;
+       return idev;
+}
+
+void ion_device_destroy(struct ion_device *dev)
+{
+       misc_deregister(&dev->dev);
+       /* XXX need to free the heaps and clients ? */
+       kfree(dev);
<