Merge branch 'perf/urgent' into perf/core
Ingo Molnar [Tue, 20 Oct 2009 05:51:41 +0000 (07:51 +0200)]
Merge reason: Queue up dependent patch.

Signed-off-by: Ingo Molnar <mingo@elte.hu>

215 files changed:
Documentation/debugging-via-ohci1394.txt
Documentation/feature-removal-schedule.txt
Documentation/filesystems/ext3.txt
Documentation/kernel-parameters.txt
Documentation/sound/alsa/HD-Audio-Models.txt
Documentation/trace/ftrace-design.txt
MAINTAINERS
Makefile
arch/arm/include/asm/bitops.h
arch/arm/kernel/traps.c
arch/arm/mach-bcmring/core.c
arch/arm/mach-bcmring/include/mach/system.h
arch/arm/mach-ep93xx/Kconfig
arch/arm/mach-ep93xx/Makefile.boot
arch/arm/mach-ep93xx/clock.c
arch/arm/mach-ep93xx/core.c
arch/arm/mach-ep93xx/edb93xx.c
arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h
arch/arm/mach-ep93xx/include/mach/gpio.h
arch/arm/mach-ep93xx/include/mach/memory.h
arch/arm/mach-ep93xx/include/mach/platform.h
arch/arm/mach-ep93xx/micro9.c
arch/arm/mach-pxa/cpufreq-pxa2xx.c
arch/arm/mach-pxa/csb726.c
arch/arm/mach-sa1100/Makefile
arch/arm/mm/cache-v6.S
arch/arm/mm/cache-v7.S
arch/arm/mm/fault-armv.c
arch/arm/mm/fault.c
arch/arm/mm/highmem.c
arch/arm/mm/init.c
arch/s390/hypfs/hypfs_diag.c
arch/s390/kernel/ftrace.c
arch/s390/kernel/processor.c
arch/sh/kernel/entry-common.S
arch/sh/kernel/ftrace.c
arch/sh/kernel/setup.c
arch/sh/kernel/signal_32.c
arch/sh/kernel/smp.c
arch/sh/kernel/traps_32.c
arch/sh/mm/cache.c
arch/sparc/kernel/ldc.c
arch/sparc/kernel/perf_event.c
arch/sparc/mm/init_64.c
arch/x86/Kconfig
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/paravirt_types.h
arch/x86/include/asm/perf_event.h
arch/x86/kernel/acpi/realmode/wakeup.lds.S
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/entry_32.S
arch/x86/kernel/entry_64.S
arch/x86/kernel/ftrace.c
arch/x86/kernel/irq.c
arch/x86/kernel/pci-dma.c
arch/x86/kernel/smp.c
arch/x86/kernel/time.c
arch/x86/kernel/trampoline.c
arch/x86/kernel/trampoline_64.S
arch/x86/kernel/vmi_32.c
arch/x86/kernel/vmlinux.lds.S
arch/x86/mm/testmmiotrace.c
block/blk-core.c
block/blk-merge.c
block/blk-settings.c
block/blk-tag.c
block/cfq-iosched.c
block/elevator.c
block/genhd.c
drivers/block/cciss.c
drivers/char/genrtc.c
drivers/char/rtc.c
drivers/char/sonypi.c
drivers/char/tty_buffer.c
drivers/firewire/sbp2.c
drivers/hid/hid-core.c
drivers/hid/hid-twinhan.c
drivers/hid/hidraw.c
drivers/md/dm.c
drivers/mfd/twl4030-core.c
drivers/mmc/host/pxamci.c
drivers/net/wan/c101.c
drivers/net/wan/n2.c
drivers/net/wan/pci200syn.c
drivers/oprofile/event_buffer.c
drivers/pci/dmar.c
drivers/pci/hotplug/cpqphp.h
drivers/pci/intel-iommu.c
drivers/pci/pci.c
drivers/pci/pcie/aer/aerdrv.c
drivers/pci/pcie/portdrv_pci.c
drivers/pci/quirks.c
drivers/pci/setup-bus.c
drivers/pci/setup-res.c
drivers/s390/block/dasd.c
drivers/s390/block/dasd_eckd.c
drivers/s390/char/sclp_async.c
drivers/s390/char/sclp_vt220.c
drivers/s390/char/tape_block.c
drivers/s390/cio/device.c
drivers/serial/serial_core.c
drivers/spi/amba-pl022.c
drivers/usb/musb/Kconfig
drivers/watchdog/riowd.c
fs/ext3/super.c
fs/nfs/super.c
fs/partitions/check.c
include/linux/blkdev.h
include/linux/ftrace_event.h
include/linux/genhd.h
include/linux/kernel.h
include/linux/pci_ids.h
include/linux/perf_counter.h
include/linux/perf_event.h
include/linux/smp_lock.h
include/linux/workqueue.h
include/trace/events/bkl.h [new file with mode: 0644]
include/trace/events/irq.h
include/trace/events/power.h
include/trace/events/sched.h
include/trace/events/timer.h
include/trace/ftrace.h
include/trace/syscall.h
kernel/lockdep.c
kernel/perf_event.c
kernel/sched.c
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_events.c
kernel/trace/trace_events_filter.c
kernel/trace/trace_export.c
kernel/trace/trace_syscalls.c
kernel/workqueue.c
lib/kernel_lock.c
mm/backing-dev.c
mm/page-writeback.c
mm/percpu.c
scripts/Kbuild.include
scripts/Makefile.lib
scripts/checkkconfigsymbols.sh
scripts/headers_install.pl
scripts/mkcompile_h
scripts/package/Makefile
scripts/package/mkspec
scripts/recordmcount.pl
sound/arm/aaci.c
sound/pci/bt87x.c
sound/pci/hda/patch_nvhdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/pci/ice1712/amp.c
sound/pci/ice1712/ice1724.c
tools/perf/Makefile
tools/perf/builtin-annotate.c
tools/perf/builtin-record.c
tools/perf/builtin-report.c
tools/perf/builtin-sched.c
tools/perf/builtin-stat.c
tools/perf/builtin-top.c
tools/perf/builtin-trace.c
tools/perf/perf.c
tools/perf/util/cache.h
tools/perf/util/callchain.h
tools/perf/util/color.h
tools/perf/util/data_map.c [new file with mode: 0644]
tools/perf/util/data_map.h [new file with mode: 0644]
tools/perf/util/debug.h
tools/perf/util/event.h
tools/perf/util/exec_cmd.h
tools/perf/util/header.c
tools/perf/util/header.h
tools/perf/util/help.h
tools/perf/util/hist.c [new file with mode: 0644]
tools/perf/util/hist.h [new file with mode: 0644]
tools/perf/util/include/asm/asm-offsets.h [new file with mode: 0644]
tools/perf/util/include/asm/bitops.h [new file with mode: 0644]
tools/perf/util/include/asm/byteorder.h [new file with mode: 0644]
tools/perf/util/include/asm/swab.h [new file with mode: 0644]
tools/perf/util/include/asm/types.h [new file with mode: 0644]
tools/perf/util/include/asm/uaccess.h [new file with mode: 0644]
tools/perf/util/include/linux/bitmap.h [new file with mode: 0644]
tools/perf/util/include/linux/bitops.h [new file with mode: 0644]
tools/perf/util/include/linux/compiler.h [new file with mode: 0644]
tools/perf/util/include/linux/ctype.h [new file with mode: 0644]
tools/perf/util/include/linux/kernel.h
tools/perf/util/include/linux/string.h [new file with mode: 0644]
tools/perf/util/include/linux/types.h [new file with mode: 0644]
tools/perf/util/levenshtein.h
tools/perf/util/module.c [deleted file]
tools/perf/util/module.h [deleted file]
tools/perf/util/parse-events.c
tools/perf/util/parse-events.h
tools/perf/util/parse-options.h
tools/perf/util/quote.h
tools/perf/util/run-command.h
tools/perf/util/sigchain.h
tools/perf/util/sort.c [new file with mode: 0644]
tools/perf/util/sort.h [new file with mode: 0644]
tools/perf/util/strbuf.h
tools/perf/util/string.c
tools/perf/util/string.h
tools/perf/util/strlist.h
tools/perf/util/svghelper.h
tools/perf/util/symbol.c
tools/perf/util/symbol.h
tools/perf/util/thread.c
tools/perf/util/thread.h
tools/perf/util/trace-event-info.c
tools/perf/util/trace-event-parse.c
tools/perf/util/trace-event-read.c
tools/perf/util/trace-event.h
tools/perf/util/types.h
tools/perf/util/values.h

index 59a91e5..611f5a5 100644 (file)
@@ -64,14 +64,14 @@ be used to view the printk buffer of a remote machine, even with live update.
 
 Bernhard Kaindl enhanced firescope to support accessing 64-bit machines
 from 32-bit firescope and vice versa:
-- ftp://ftp.suse.de/private/bk/firewire/tools/firescope-0.2.2.tar.bz2
+- http://halobates.de/firewire/firescope-0.2.2.tar.bz2
 
 and he implemented fast system dump (alpha version - read README.txt):
-- ftp://ftp.suse.de/private/bk/firewire/tools/firedump-0.1.tar.bz2
+- http://halobates.de/firewire/firedump-0.1.tar.bz2
 
 There is also a gdb proxy for firewire which allows to use gdb to access
 data which can be referenced from symbols found by gdb in vmlinux:
-- ftp://ftp.suse.de/private/bk/firewire/tools/fireproxy-0.33.tar.bz2
+- http://halobates.de/firewire/fireproxy-0.33.tar.bz2
 
 The latest version of this gdb proxy (fireproxy-0.34) can communicate (not
 yet stable) with kgdb over an memory-based communication module (kgdbom).
@@ -178,7 +178,7 @@ Step-by-step instructions for using firescope with early OHCI initialization:
 
 Notes
 -----
-Documentation and specifications: ftp://ftp.suse.de/private/bk/firewire/docs
+Documentation and specifications: http://halobates.de/firewire/
 
 FireWire is a trademark of Apple Inc. - for more information please refer to:
 http://en.wikipedia.org/wiki/FireWire
index 89a47b5..04e6c81 100644 (file)
@@ -451,3 +451,33 @@ Why:       OSS sound_core grabs all legacy minors (0-255) of SOUND_MAJOR
        will also allow making ALSA OSS emulation independent of
        sound_core.  The dependency will be broken then too.
 Who:   Tejun Heo <tj@kernel.org>
+
+----------------------------
+
+What:  Support for VMware's guest paravirtuliazation technique [VMI] will be
+       dropped.
+When:  2.6.37 or earlier.
+Why:   With the recent innovations in CPU hardware acceleration technologies
+       from Intel and AMD, VMware ran a few experiments to compare these
+       techniques to guest paravirtualization technique on VMware's platform.
+       These hardware assisted virtualization techniques have outperformed the
+       performance benefits provided by VMI in most of the workloads. VMware
+       expects that these hardware features will be ubiquitous in a couple of
+       years, as a result, VMware has started a phased retirement of this
+       feature from the hypervisor. We will be removing this feature from the
+       Kernel too. Right now we are targeting 2.6.37 but can retire earlier if
+       technical reasons (read opportunity to remove major chunk of pvops)
+       arise.
+
+       Please note that VMI has always been an optimization and non-VMI kernels
+       still work fine on VMware's platform.
+       Latest versions of VMware's product which support VMI are,
+       Workstation 7.0 and VSphere 4.0 on ESX side, future maintainence
+       releases for these products will continue supporting VMI.
+
+       For more details about VMI retirement take a look at this,
+       http://blogs.vmware.com/guestosguide/2009/09/vmi-retirement.html
+
+Who:   Alok N Kataria <akataria@vmware.com>
+
+----------------------------
index 570f9bd..05d5cf1 100644 (file)
@@ -123,10 +123,18 @@ resuid=n          The user ID which may use the reserved blocks.
 
 sb=n                   Use alternate superblock at this location.
 
-quota
-noquota
-grpquota
-usrquota
+quota                  These options are ignored by the filesystem. They
+noquota                        are used only by quota tools to recognize volumes
+grpquota               where quota should be turned on. See documentation
+usrquota               in the quota-tools package for more details
+                       (http://sourceforge.net/projects/linuxquota).
+
+jqfmt=<quota type>     These options tell filesystem details about quota
+usrjquota=<file>       so that quota information can be properly updated
+grpjquota=<file>       during journal replay. They replace the above
+                       quota options. See documentation in the quota-tools
+                       package for more details
+                       (http://sourceforge.net/projects/linuxquota).
 
 bh             (*)     ext3 associates buffer heads to data pages to
 nobh                   (a) cache disk block mapping information
index 9107b38..c8d1b2b 100644 (file)
@@ -779,6 +779,13 @@ and is between 256 and 4096 characters. It is defined in the file
                        by the set_ftrace_notrace file in the debugfs
                        tracing directory.
 
+       ftrace_graph_filter=[function-list]
+                       [FTRACE] Limit the top level callers functions traced
+                       by the function graph tracer at boot up.
+                       function-list is a comma separated list of functions
+                       that can be changed at run time by the
+                       set_graph_function file in the debugfs tracing directory.
+
        gamecon.map[2|3]=
                        [HW,JOY] Multisystem joystick and NES/SNES/PSX pad
                        support via parallel port (up to 5 devices per port)
index 75fddb4..4c7f9ae 100644 (file)
@@ -359,6 +359,7 @@ STAC9227/9228/9229/927x
   5stack-no-fp D965 5stack without front panel
   dell-3stack  Dell Dimension E520
   dell-bios    Fixes with Dell BIOS setup
+  volknob      Fixes with volume-knob widget 0x24
   auto         BIOS setup (default)
 
 STAC92HD71B*
index 7003e10..641a1ef 100644 (file)
@@ -213,10 +213,19 @@ If you can't trace NMI functions, then skip this option.
 <details to be filled>
 
 
-HAVE_FTRACE_SYSCALLS
+HAVE_SYSCALL_TRACEPOINTS
 ---------------------
 
-<details to be filled>
+You need very few things to get the syscalls tracing in an arch.
+
+- Have a NR_syscalls variable in <asm/unistd.h> that provides the number
+  of syscalls supported by the arch.
+- Implement arch_syscall_addr() that resolves a syscall address from a
+  syscall number.
+- Support the TIF_SYSCALL_TRACEPOINT thread flags
+- Put the trace_sys_enter() and trace_sys_exit() tracepoints calls from ptrace
+  in the ptrace syscalls tracing path.
+- Tag this arch as HAVE_SYSCALL_TRACEPOINTS.
 
 
 HAVE_FTRACE_MCOUNT_RECORD
index cf69091..d5eb8c1 100644 (file)
@@ -577,6 +577,11 @@ M: Mike Rapoport <mike@compulab.co.il>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 
+ARM/CONTEC MICRO9 MACHINE SUPPORT
+M:     Hubert Feurstein <hubert.feurstein@contec.at>
+S:     Maintained
+F:     arch/arm/mach-ep93xx/micro9.c
+
 ARM/CORGI MACHINE SUPPORT
 M:     Richard Purdie <rpurdie@rpsys.net>
 S:     Maintained
@@ -2610,6 +2615,7 @@ L:        linux1394-devel@lists.sourceforge.net
 W:     http://www.linux1394.org/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6.git
 S:     Maintained
+F:     Documentation/debugging-via-ohci1394.txt
 F:     drivers/ieee1394/
 
 IEEE 1394 RAW I/O DRIVER
index 927d7a3..3267915 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -179,46 +179,9 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
 # Alternatively CROSS_COMPILE can be set in the environment.
 # Default value for CROSS_COMPILE is not to prefix executables
 # Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile
-#
-# To force ARCH and CROSS_COMPILE settings include kernel.* files
-# in the kernel tree - do not patch this file.
 export KBUILD_BUILDHOST := $(SUBARCH)
-
-# Kbuild save the ARCH and CROSS_COMPILE setting in kernel.* files.
-# Restore these settings and check that user did not specify
-# conflicting values.
-
-saved_arch  := $(shell cat include/generated/kernel.arch  2> /dev/null)
-saved_cross := $(shell cat include/generated/kernel.cross 2> /dev/null)
-
-ifneq ($(CROSS_COMPILE),)
-        ifneq ($(saved_cross),)
-                ifneq ($(CROSS_COMPILE),$(saved_cross))
-                        $(error CROSS_COMPILE changed from \
-                                "$(saved_cross)" to \
-                                 to "$(CROSS_COMPILE)". \
-                                 Use "make mrproper" to fix it up)
-                endif
-        endif
-else
-    CROSS_COMPILE := $(saved_cross)
-endif
-
-ifneq ($(ARCH),)
-        ifneq ($(saved_arch),)
-                ifneq ($(saved_arch),$(ARCH))
-                        $(error ARCH changed from \
-                                "$(saved_arch)" to "$(ARCH)". \
-                                 Use "make mrproper" to fix it up)
-                endif
-        endif
-else
-        ifneq ($(saved_arch),)
-                ARCH := $(saved_arch)
-        else
-                ARCH := $(SUBARCH)
-        endif
-endif
+ARCH           ?= $(SUBARCH)
+CROSS_COMPILE  ?=
 
 # Architecture as present in compile.h
 UTS_MACHINE    := $(ARCH)
@@ -483,11 +446,6 @@ ifeq ($(config-targets),1)
 include $(srctree)/arch/$(SRCARCH)/Makefile
 export KBUILD_DEFCONFIG KBUILD_KCONFIG
 
-# save ARCH & CROSS_COMPILE settings
-$(shell mkdir -p include/generated &&                            \
-        echo $(ARCH)          > include/generated/kernel.arch && \
-        echo $(CROSS_COMPILE) > include/generated/kernel.cross)
-
 config: scripts_basic outputmakefile FORCE
        $(Q)mkdir -p include/linux include/config
        $(Q)$(MAKE) $(build)=scripts/kconfig $@
index 63a481f..338ff19 100644 (file)
@@ -84,7 +84,7 @@ ____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p)
        *p = res | mask;
        raw_local_irq_restore(flags);
 
-       return res & mask;
+       return (res & mask) != 0;
 }
 
 static inline int
@@ -101,7 +101,7 @@ ____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
        *p = res & ~mask;
        raw_local_irq_restore(flags);
 
-       return res & mask;
+       return (res & mask) != 0;
 }
 
 static inline int
@@ -118,7 +118,7 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
        *p = res ^ mask;
        raw_local_irq_restore(flags);
 
-       return res & mask;
+       return (res & mask) != 0;
 }
 
 #include <asm-generic/bitops/non-atomic.h>
index 467b69e..f838f36 100644 (file)
@@ -45,21 +45,21 @@ static int __init user_debug_setup(char *str)
 __setup("user_debug=", user_debug_setup);
 #endif
 
-static void dump_mem(const char *str, unsigned long bottom, unsigned long top);
+static void dump_mem(const char *, const char *, unsigned long, unsigned long);
 
 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
 {
 #ifdef CONFIG_KALLSYMS
-       printk("[<%08lx>] ", where);
-       print_symbol("(%s) ", where);
-       printk("from [<%08lx>] ", from);
-       print_symbol("(%s)\n", from);
+       char sym1[KSYM_SYMBOL_LEN], sym2[KSYM_SYMBOL_LEN];
+       sprint_symbol(sym1, where);
+       sprint_symbol(sym2, from);
+       printk("[<%08lx>] (%s) from [<%08lx>] (%s)\n", where, sym1, from, sym2);
 #else
        printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
 #endif
 
        if (in_exception_text(where))
-               dump_mem("Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
+               dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
 }
 
 #ifndef CONFIG_ARM_UNWIND
@@ -81,9 +81,10 @@ static int verify_stack(unsigned long sp)
 /*
  * Dump out the contents of some memory nicely...
  */
-static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
+static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
+                    unsigned long top)
 {
-       unsigned long p = bottom & ~31;
+       unsigned long first;
        mm_segment_t fs;
        int i;
 
@@ -95,33 +96,37 @@ static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
        fs = get_fs();
        set_fs(KERNEL_DS);
 
-       printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
+       printk("%s%s(0x%08lx to 0x%08lx)\n", lvl, str, bottom, top);
 
-       for (p = bottom & ~31; p < top;) {
-               printk("%04lx: ", p & 0xffff);
+       for (first = bottom & ~31; first < top; first += 32) {
+               unsigned long p;
+               char str[sizeof(" 12345678") * 8 + 1];
 
-               for (i = 0; i < 8; i++, p += 4) {
-                       unsigned int val;
+               memset(str, ' ', sizeof(str));
+               str[sizeof(str) - 1] = '\0';
 
-                       if (p < bottom || p >= top)
-                               printk("         ");
-                       else {
-                               __get_user(val, (unsigned long *)p);
-                               printk("%08x ", val);
+               for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
+                       if (p >= bottom && p < top) {
+                               unsigned long val;
+                               if (__get_user(val, (unsigned long *)p) == 0)
+                                       sprintf(str + i * 9, " %08lx", val);
+                               else
+                                       sprintf(str + i * 9, " ????????");
                        }
                }
-               printk ("\n");
+               printk("%s%04lx:%s\n", lvl, first & 0xffff, str);
        }
 
        set_fs(fs);
 }
 
-static void dump_instr(struct pt_regs *regs)
+static void dump_instr(const char *lvl, struct pt_regs *regs)
 {
        unsigned long addr = instruction_pointer(regs);
        const int thumb = thumb_mode(regs);
        const int width = thumb ? 4 : 8;
        mm_segment_t fs;
+       char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
        int i;
 
        /*
@@ -132,7 +137,6 @@ static void dump_instr(struct pt_regs *regs)
        fs = get_fs();
        set_fs(KERNEL_DS);
 
-       printk("Code: ");
        for (i = -4; i < 1; i++) {
                unsigned int val, bad;
 
@@ -142,13 +146,14 @@ static void dump_instr(struct pt_regs *regs)
                        bad = __get_user(val, &((u32 *)addr)[i]);
 
                if (!bad)
-                       printk(i == 0 ? "(%0*x) " : "%0*x ", width, val);
+                       p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
+                                       width, val);
                else {
-                       printk("bad PC value.");
+                       p += sprintf(p, "bad PC value");
                        break;
                }
        }
-       printk("\n");
+       printk("%sCode: %s\n", lvl, str);
 
        set_fs(fs);
 }
@@ -224,18 +229,19 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p
        struct task_struct *tsk = thread->task;
        static int die_counter;
 
-       printk("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n",
+       printk(KERN_EMERG "Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n",
               str, err, ++die_counter);
+       sysfs_printk_last_file();
        print_modules();
        __show_regs(regs);
-       printk("Process %s (pid: %d, stack limit = 0x%p)\n",
-               tsk->comm, task_pid_nr(tsk), thread + 1);
+       printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n",
+               TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1);
 
        if (!user_mode(regs) || in_interrupt()) {
-               dump_mem("Stack: ", regs->ARM_sp,
+               dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
                         THREAD_SIZE + (unsigned long)task_stack_page(tsk));
                dump_backtrace(regs, tsk);
-               dump_instr(regs);
+               dump_instr(KERN_EMERG, regs);
        }
 }
 
@@ -250,13 +256,14 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
 
        oops_enter();
 
-       console_verbose();
        spin_lock_irq(&die_lock);
+       console_verbose();
        bust_spinlocks(1);
        __die(str, err, thread, regs);
        bust_spinlocks(0);
        add_taint(TAINT_DIE);
        spin_unlock_irq(&die_lock);
+       oops_exit();
 
        if (in_interrupt())
                panic("Fatal exception in interrupt");
@@ -264,7 +271,6 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
        if (panic_on_oops)
                panic("Fatal exception");
 
-       oops_exit();
        do_exit(SIGSEGV);
 }
 
@@ -349,7 +355,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
        if (user_debug & UDBG_UNDEFINED) {
                printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
                        current->comm, task_pid_nr(current), pc);
-               dump_instr(regs);
+               dump_instr(KERN_INFO, regs);
        }
 #endif
 
@@ -400,7 +406,7 @@ static int bad_syscall(int n, struct pt_regs *regs)
        if (user_debug & UDBG_SYSCALL) {
                printk(KERN_ERR "[%d] %s: obsolete system call %08x.\n",
                        task_pid_nr(current), current->comm, n);
-               dump_instr(regs);
+               dump_instr(KERN_ERR, regs);
        }
 #endif
 
@@ -579,7 +585,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
        if (user_debug & UDBG_SYSCALL) {
                printk("[%d] %s: arm syscall %d\n",
                       task_pid_nr(current), current->comm, no);
-               dump_instr(regs);
+               dump_instr("", regs);
                if (user_mode(regs)) {
                        __show_regs(regs);
                        c_backtrace(regs->ARM_fp, processor_mode(regs));
@@ -656,7 +662,7 @@ baddataabort(int code, unsigned long instr, struct pt_regs *regs)
        if (user_debug & UDBG_BADABORT) {
                printk(KERN_ERR "[%d] %s: bad data abort: code %d instr 0x%08lx\n",
                        task_pid_nr(current), current->comm, code, instr);
-               dump_instr(regs);
+               dump_instr(KERN_ERR, regs);
                show_pte(current->mm, addr);
        }
 #endif
index 4b4f692..e590bbe 100644 (file)
@@ -271,12 +271,12 @@ static struct irqaction bcmring_timer_irq = {
        .handler = bcmring_timer_interrupt,
 };
 
-static cycle_t bcmring_get_cycles_timer1(void)
+static cycle_t bcmring_get_cycles_timer1(struct clocksource *cs)
 {
        return ~readl(TIMER1_VA_BASE + TIMER_VALUE);
 }
 
-static cycle_t bcmring_get_cycles_timer3(void)
+static cycle_t bcmring_get_cycles_timer3(struct clocksource *cs)
 {
        return ~readl(TIMER3_VA_BASE + TIMER_VALUE);
 }
index cdbf93c..38b3706 100644 (file)
@@ -29,7 +29,7 @@ static inline void arch_idle(void)
        cpu_do_idle();
 }
 
-static inline void arch_reset(char mode, char *cmd)
+static inline void arch_reset(char mode, const char *cmd)
 {
        printk("arch_reset:%c %x\n", mode, bcmring_arch_warm_reboot);
 
index d7291c6..9167c3d 100644 (file)
@@ -17,13 +17,31 @@ config EP93XX_SDCE3_SYNC_PHYS_OFFSET
        bool "0x00000000 - SDCE3/SyncBoot"
        help
          Select this option if you want support for EP93xx boards with the
-         first SDRAM bank at 0x00000000
+         first SDRAM bank at 0x00000000.
 
 config EP93XX_SDCE0_PHYS_OFFSET
        bool "0xc0000000 - SDCEO"
        help
          Select this option if you want support for EP93xx boards with the
-         first SDRAM bank at 0xc0000000
+         first SDRAM bank at 0xc0000000.
+
+config EP93XX_SDCE1_PHYS_OFFSET
+       bool "0xd0000000 - SDCE1"
+       help
+         Select this option if you want support for EP93xx boards with the
+         first SDRAM bank at 0xd0000000.
+
+config EP93XX_SDCE2_PHYS_OFFSET
+       bool "0xe0000000 - SDCE2"
+       help
+         Select this option if you want support for EP93xx boards with the
+         first SDRAM bank at 0xe0000000.
+
+config EP93XX_SDCE3_ASYNC_PHYS_OFFSET
+       bool "0xf0000000 - SDCE3/AsyncBoot"
+       help
+         Select this option if you want support for EP93xx boards with the
+         first SDRAM bank at 0xf0000000.
 
 endchoice
 
@@ -112,28 +130,36 @@ config MACH_MICRO9
        bool
 
 config MACH_MICRO9H
-       bool "Support Contec Hypercontrol Micro9-H"
+       bool "Support Contec Micro9-High"
        depends on EP93XX_SDCE3_SYNC_PHYS_OFFSET
        select MACH_MICRO9
        help
          Say 'Y' here if you want your kernel to support the
-         Contec Hypercontrol Micro9-H board.
+         Contec Micro9-High board.
 
 config MACH_MICRO9M
-       bool "Support Contec Hypercontrol Micro9-M"
-       depends on EP93XX_SDCE3_SYNC_PHYS_OFFSET
+       bool "Support Contec Micro9-Mid"
+       depends on EP93XX_SDCE3_ASYNC_PHYS_OFFSET
        select MACH_MICRO9
        help
          Say 'Y' here if you want your kernel to support the
-         Contec Hypercontrol Micro9-M board.
+         Contec Micro9-Mid board.
 
 config MACH_MICRO9L
-       bool "Support Contec Hypercontrol Micro9-L"
+       bool "Support Contec Micro9-Lite"
        depends on EP93XX_SDCE3_SYNC_PHYS_OFFSET
        select MACH_MICRO9
        help
          Say 'Y' here if you want your kernel to support the
-         Contec Hypercontrol Micro9-L board.
+         Contec Micro9-Lite board.
+
+config MACH_MICRO9S
+       bool "Support Contec Micro9-Slim"
+       depends on EP93XX_SDCE3_ASYNC_PHYS_OFFSET
+       select MACH_MICRO9
+       help
+         Say 'Y' here if you want your kernel to support the
+         Contec Micro9-Slim board.
 
 config MACH_TS72XX
        bool "Support Technologic Systems TS-72xx SBC"
index 27a085a..0ad33f1 100644 (file)
@@ -3,3 +3,12 @@ params_phys-$(CONFIG_EP93XX_SDCE3_SYNC_PHYS_OFFSET)    := 0x00000100
 
    zreladdr-$(CONFIG_EP93XX_SDCE0_PHYS_OFFSET)         := 0xc0008000
 params_phys-$(CONFIG_EP93XX_SDCE0_PHYS_OFFSET)         := 0xc0000100
+
+   zreladdr-$(CONFIG_EP93XX_SDCE1_PHYS_OFFSET)         := 0xd0008000
+params_phys-$(CONFIG_EP93XX_SDCE1_PHYS_OFFSET)         := 0xd0000100
+
+   zreladdr-$(CONFIG_EP93XX_SDCE2_PHYS_OFFSET)         := 0xe0008000
+params_phys-$(CONFIG_EP93XX_SDCE2_PHYS_OFFSET)         := 0xe0000100
+
+   zreladdr-$(CONFIG_EP93XX_SDCE3_ASYNC_PHYS_OFFSET)   := 0xf0008000
+params_phys-$(CONFIG_EP93XX_SDCE3_ASYNC_PHYS_OFFSET)   := 0xf0000100
index dda19cd..1d0f9d8 100644 (file)
 #include <linux/module.h>
 #include <linux/string.h>
 #include <linux/io.h>
+#include <linux/spinlock.h>
+
+#include <mach/hardware.h>
 
 #include <asm/clkdev.h>
 #include <asm/div64.h>
-#include <mach/hardware.h>
 
 
 struct clk {
+       struct clk      *parent;
        unsigned long   rate;
        int             users;
        int             sw_locked;
@@ -39,40 +42,60 @@ static unsigned long get_uart_rate(struct clk *clk);
 static int set_keytchclk_rate(struct clk *clk, unsigned long rate);
 static int set_div_rate(struct clk *clk, unsigned long rate);
 
+
+static struct clk clk_xtali = {
+       .rate           = EP93XX_EXT_CLK_RATE,
+};
 static struct clk clk_uart1 = {
+       .parent         = &clk_xtali,
        .sw_locked      = 1,
        .enable_reg     = EP93XX_SYSCON_DEVCFG,
        .enable_mask    = EP93XX_SYSCON_DEVCFG_U1EN,
        .get_rate       = get_uart_rate,
 };
 static struct clk clk_uart2 = {
+       .parent         = &clk_xtali,
        .sw_locked      = 1,
        .enable_reg     = EP93XX_SYSCON_DEVCFG,
        .enable_mask    = EP93XX_SYSCON_DEVCFG_U2EN,
        .get_rate       = get_uart_rate,
 };
 static struct clk clk_uart3 = {
+       .parent         = &clk_xtali,
        .sw_locked      = 1,
        .enable_reg     = EP93XX_SYSCON_DEVCFG,
        .enable_mask    = EP93XX_SYSCON_DEVCFG_U3EN,
        .get_rate       = get_uart_rate,
 };
-static struct clk clk_pll1;
-static struct clk clk_f;
-static struct clk clk_h;
-static struct clk clk_p;
-static struct clk clk_pll2;
+static struct clk clk_pll1 = {
+       .parent         = &clk_xtali,
+};
+static struct clk clk_f = {
+       .parent         = &clk_pll1,
+};
+static struct clk clk_h = {
+       .parent         = &clk_pll1,
+};
+static struct clk clk_p = {
+       .parent         = &clk_pll1,
+};
+static struct clk clk_pll2 = {
+       .parent         = &clk_xtali,
+};
 static struct clk clk_usb_host = {
+       .parent         = &clk_pll2,
        .enable_reg     = EP93XX_SYSCON_PWRCNT,
        .enable_mask    = EP93XX_SYSCON_PWRCNT_USH_EN,
 };
 static struct clk clk_keypad = {
+       .parent         = &clk_xtali,
        .sw_locked      = 1,
        .enable_reg     = EP93XX_SYSCON_KEYTCHCLKDIV,
        .enable_mask    = EP93XX_SYSCON_KEYTCHCLKDIV_KEN,
        .set_rate       = set_keytchclk_rate,
 };
 static struct clk clk_pwm = {
+       .parent         = &clk_xtali,
        .rate           = EP93XX_EXT_CLK_RATE,
 };
 
@@ -85,50 +108,62 @@ static struct clk clk_video = {
 
 /* DMA Clocks */
 static struct clk clk_m2p0 = {
+       .parent         = &clk_h,
        .enable_reg     = EP93XX_SYSCON_PWRCNT,
        .enable_mask    = EP93XX_SYSCON_PWRCNT_DMA_M2P0,
 };
 static struct clk clk_m2p1 = {
+       .parent         = &clk_h,
        .enable_reg     = EP93XX_SYSCON_PWRCNT,
        .enable_mask    = EP93XX_SYSCON_PWRCNT_DMA_M2P1,
 };
 static struct clk clk_m2p2 = {
+       .parent         = &clk_h,
        .enable_reg     = EP93XX_SYSCON_PWRCNT,
        .enable_mask    = EP93XX_SYSCON_PWRCNT_DMA_M2P2,
 };
 static struct clk clk_m2p3 = {
+       .parent         = &clk_h,
        .enable_reg     = EP93XX_SYSCON_PWRCNT,
        .enable_mask    = EP93XX_SYSCON_PWRCNT_DMA_M2P3,
 };
 static struct clk clk_m2p4 = {
+       .parent         = &clk_h,
        .enable_reg     = EP93XX_SYSCON_PWRCNT,
        .enable_mask    = EP93XX_SYSCON_PWRCNT_DMA_M2P4,
 };
 static struct clk clk_m2p5 = {
+       .parent         = &clk_h,
        .enable_reg     = EP93XX_SYSCON_PWRCNT,
        .enable_mask    = EP93XX_SYSCON_PWRCNT_DMA_M2P5,
 };
 static struct clk clk_m2p6 = {
+       .parent         = &clk_h,
        .enable_reg     = EP93XX_SYSCON_PWRCNT,
        .enable_mask    = EP93XX_SYSCON_PWRCNT_DMA_M2P6,
 };
 static struct clk clk_m2p7 = {
+       .parent         = &clk_h,
        .enable_reg     = EP93XX_SYSCON_PWRCNT,
        .enable_mask    = EP93XX_SYSCON_PWRCNT_DMA_M2P7,
 };
 static struct clk clk_m2p8 = {
+       .parent         = &clk_h,
        .enable_reg     = EP93XX_SYSCON_PWRCNT,
        .enable_mask    = EP93XX_SYSCON_PWRCNT_DMA_M2P8,
 };
 static struct clk clk_m2p9 = {
+       .parent         = &clk_h,
        .enable_reg     = EP93XX_SYSCON_PWRCNT,
        .enable_mask    = EP93XX_SYSCON_PWRCNT_DMA_M2P9,
 };
 static struct clk clk_m2m0 = {
+       .parent         = &clk_h,
        .enable_reg     = EP93XX_SYSCON_PWRCNT,
        .enable_mask    = EP93XX_SYSCON_PWRCNT_DMA_M2M0,
 };
 static struct clk clk_m2m1 = {
+       .parent         = &clk_h,
        .enable_reg     = EP93XX_SYSCON_PWRCNT,
        .enable_mask    = EP93XX_SYSCON_PWRCNT_DMA_M2M1,
 };
@@ -137,6 +172,7 @@ static struct clk clk_m2m1 = {
        { .dev_id = dev, .con_id = con, .clk = ck }
 
 static struct clk_lookup clocks[] = {
+       INIT_CK(NULL,                   "xtali",        &clk_xtali),
        INIT_CK("apb:uart1",            NULL,           &clk_uart1),
        INIT_CK("apb:uart2",            NULL,           &clk_uart2),
        INIT_CK("apb:uart3",            NULL,           &clk_uart3),
@@ -163,48 +199,84 @@ static struct clk_lookup clocks[] = {
        INIT_CK(NULL,                   "m2m1",         &clk_m2m1),
 };
 
+static DEFINE_SPINLOCK(clk_lock);
+
+static void __clk_enable(struct clk *clk)
+{
+       if (!clk->users++) {
+               if (clk->parent)
+                       __clk_enable(clk->parent);
+
+               if (clk->enable_reg) {
+                       u32 v;
+
+                       v = __raw_readl(clk->enable_reg);
+                       v |= clk->enable_mask;
+                       if (clk->sw_locked)
+                               ep93xx_syscon_swlocked_write(v, clk->enable_reg);
+                       else
+                               __raw_writel(v, clk->enable_reg);
+               }
+       }
+}
 
 int clk_enable(struct clk *clk)
 {
-       if (!clk->users++ && clk->enable_reg) {
-               u32 value;
+       unsigned long flags;
 
-               value = __raw_readl(clk->enable_reg);
-               value |= clk->enable_mask;
-               if (clk->sw_locked)
-                       ep93xx_syscon_swlocked_write(value, clk->enable_reg);
-               else
-                       __raw_writel(value, clk->enable_reg);
-       }
+       if (!clk)
+               return -EINVAL;
+
+       spin_lock_irqsave(&clk_lock, flags);
+       __clk_enable(clk);
+       spin_unlock_irqrestore(&clk_lock, flags);
 
        return 0;
 }
 EXPORT_SYMBOL(clk_enable);
 
-void clk_disable(struct clk *clk)
+static void __clk_disable(struct clk *clk)
 {
-       if (!--clk->users && clk->enable_reg) {
-               u32 value;
+       if (!--clk->users) {
+               if (clk->enable_reg) {
+                       u32 v;
+
+                       v = __raw_readl(clk->enable_reg);
+                       v &= ~clk->enable_mask;
+                       if (clk->sw_locked)
+                               ep93xx_syscon_swlocked_write(v, clk->enable_reg);
+                       else
+                               __raw_writel(v, clk->enable_reg);
+               }
 
-               value = __raw_readl(clk->enable_reg);
-               value &= ~clk->enable_mask;
-               if (clk->sw_locked)
-                       ep93xx_syscon_swlocked_write(value, clk->enable_reg);
-               else
-                       __raw_writel(value, clk->enable_reg);
+               if (clk->parent)
+                       __clk_disable(clk->parent);
        }
 }
+
+void clk_disable(struct clk *clk)
+{
+       unsigned long flags;
+
+       if (!clk)
+               return;
+
+       spin_lock_irqsave(&clk_lock, flags);
+       __clk_disable(clk);
+       spin_unlock_irqrestore(&clk_lock, flags);
+}
 EXPORT_SYMBOL(clk_disable);
 
 static unsigned long get_uart_rate(struct clk *clk)
 {
+       unsigned long rate = clk_get_rate(clk->parent);
        u32 value;
 
        value = __raw_readl(EP93XX_SYSCON_PWRCNT);
        if (value & EP93XX_SYSCON_PWRCNT_UARTBAUD)
-               return EP93XX_EXT_CLK_RATE;
+               return rate;
        else
-               return EP93XX_EXT_CLK_RATE / 2;
+               return rate / 2;
 }
 
 unsigned long clk_get_rate(struct clk *clk)
@@ -244,16 +316,16 @@ static int set_keytchclk_rate(struct clk *clk, unsigned long rate)
        return 0;
 }
 
-static unsigned long calc_clk_div(unsigned long rate, int *psel, int *esel,
-                                 int *pdiv, int *div)
+static int calc_clk_div(struct clk *clk, unsigned long rate,
+                       int *psel, int *esel, int *pdiv, int *div)
 {
-       unsigned long max_rate, best_rate = 0,
-               actual_rate = 0, mclk_rate = 0, rate_err = -1;
+       struct clk *mclk;
+       unsigned long max_rate, actual_rate, mclk_rate, rate_err = -1;
        int i, found = 0, __div = 0, __pdiv = 0;
 
        /* Don't exceed the maximum rate */
        max_rate = max(max(clk_pll1.rate / 4, clk_pll2.rate / 4),
-                      (unsigned long)EP93XX_EXT_CLK_RATE / 4);
+                      clk_xtali.rate / 4);
        rate = min(rate, max_rate);
 
        /*
@@ -267,11 +339,12 @@ static unsigned long calc_clk_div(unsigned long rate, int *psel, int *esel,
         */
        for (i = 0; i < 3; i++) {
                if (i == 0)
-                       mclk_rate = EP93XX_EXT_CLK_RATE * 2;
+                       mclk = &clk_xtali;
                else if (i == 1)
-                       mclk_rate = clk_pll1.rate * 2;
-               else if (i == 2)
-                       mclk_rate = clk_pll2.rate * 2;
+                       mclk = &clk_pll1;
+               else
+                       mclk = &clk_pll2;
+               mclk_rate = mclk->rate * 2;
 
                /* Try each predivider value */
                for (__pdiv = 4; __pdiv <= 6; __pdiv++) {
@@ -286,7 +359,8 @@ static unsigned long calc_clk_div(unsigned long rate, int *psel, int *esel,
                                *div = __div;
                                *psel = (i == 2);
                                *esel = (i != 0);
-                               best_rate = actual_rate;
+                               clk->parent = mclk;
+                               clk->rate = actual_rate;
                                rate_err = abs(actual_rate - rate);
                                found = 1;
                        }
@@ -294,21 +368,19 @@ static unsigned long calc_clk_div(unsigned long rate, int *psel, int *esel,
        }
 
        if (!found)
-               return 0;
+               return -EINVAL;
 
-       return best_rate;
+       return 0;
 }
 
 static int set_div_rate(struct clk *clk, unsigned long rate)
 {
-       unsigned long actual_rate;
-       int psel = 0, esel = 0, pdiv = 0, div = 0;
+       int err, psel = 0, esel = 0, pdiv = 0, div = 0;
        u32 val;
 
-       actual_rate = calc_clk_div(rate, &psel, &esel, &pdiv, &div);
-       if (actual_rate == 0)
-               return -EINVAL;
-       clk->rate = actual_rate;
+       err = calc_clk_div(clk, rate, &psel, &esel, &pdiv, &div);
+       if (err)
+               return err;
 
        /* Clear the esel, psel, pdiv and div bits */
        val = __raw_readl(clk->enable_reg);
@@ -344,7 +416,7 @@ static unsigned long calc_pll_rate(u32 config_word)
        unsigned long long rate;
        int i;
 
-       rate = EP93XX_EXT_CLK_RATE;
+       rate = clk_xtali.rate;
        rate *= ((config_word >> 11) & 0x1f) + 1;               /* X1FBD */
        rate *= ((config_word >> 5) & 0x3f) + 1;                /* X2FBD */
        do_div(rate, (config_word & 0x1f) + 1);                 /* X2IPD */
@@ -377,7 +449,7 @@ static int __init ep93xx_clock_init(void)
 
        value = __raw_readl(EP93XX_SYSCON_CLOCK_SET1);
        if (!(value & 0x00800000)) {                    /* PLL1 bypassed?  */
-               clk_pll1.rate = EP93XX_EXT_CLK_RATE;
+               clk_pll1.rate = clk_xtali.rate;
        } else {
                clk_pll1.rate = calc_pll_rate(value);
        }
@@ -388,7 +460,7 @@ static int __init ep93xx_clock_init(void)
 
        value = __raw_readl(EP93XX_SYSCON_CLOCK_SET2);
        if (!(value & 0x00080000)) {                    /* PLL2 bypassed?  */
-               clk_pll2.rate = EP93XX_EXT_CLK_RATE;
+               clk_pll2.rate = clk_xtali.rate;
        } else if (value & 0x00040000) {                /* PLL2 enabled?  */
                clk_pll2.rate = calc_pll_rate(value);
        } else {
index f7ebed9..f95dc16 100644 (file)
@@ -550,13 +550,11 @@ void __init ep93xx_register_eth(struct ep93xx_eth_data *data, int copy_addr)
        platform_device_register(&ep93xx_eth_device);
 }
 
-static struct i2c_gpio_platform_data ep93xx_i2c_data = {
-       .sda_pin                = EP93XX_GPIO_LINE_EEDAT,
-       .sda_is_open_drain      = 0,
-       .scl_pin                = EP93XX_GPIO_LINE_EECLK,
-       .scl_is_open_drain      = 0,
-       .udelay                 = 2,
-};
+
+/*************************************************************************
+ * EP93xx i2c peripheral handling
+ *************************************************************************/
+static struct i2c_gpio_platform_data ep93xx_i2c_data;
 
 static struct platform_device ep93xx_i2c_device = {
        .name                   = "i2c-gpio",
@@ -564,8 +562,25 @@ static struct platform_device ep93xx_i2c_device = {
        .dev.platform_data      = &ep93xx_i2c_data,
 };
 
-void __init ep93xx_register_i2c(struct i2c_board_info *devices, int num)
+void __init ep93xx_register_i2c(struct i2c_gpio_platform_data *data,
+                               struct i2c_board_info *devices, int num)
 {
+       /*
+        * Set the EEPROM interface pin drive type control.
+        * Defines the driver type for the EECLK and EEDAT pins as either
+        * open drain, which will require an external pull-up, or a normal
+        * CMOS driver.
+        */
+       if (data->sda_is_open_drain && data->sda_pin != EP93XX_GPIO_LINE_EEDAT)
+               pr_warning("ep93xx: sda != EEDAT, open drain has no effect\n");
+       if (data->scl_is_open_drain && data->scl_pin != EP93XX_GPIO_LINE_EECLK)
+               pr_warning("ep93xx: scl != EECLK, open drain has no effect\n");
+
+       __raw_writel((data->sda_is_open_drain << 1) |
+                    (data->scl_is_open_drain << 0),
+                    EP93XX_GPIO_EEDRIVE);
+
+       ep93xx_i2c_data = *data;
        i2c_register_board_info(0, devices, num);
        platform_device_register(&ep93xx_i2c_device);
 }
index 73145ae..ca71cf1 100644 (file)
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/platform_device.h>
-#include <linux/i2c.h>
 #include <linux/mtd/physmap.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/i2c-gpio.h>
 
 #include <mach/hardware.h>
 
@@ -76,13 +78,26 @@ static struct ep93xx_eth_data edb93xx_eth_data = {
        .phy_id         = 1,
 };
 
-static struct i2c_board_info __initdata edb93xxa_i2c_data[] = {
+
+/*************************************************************************
+ * EDB93xx i2c peripheral handling
+ *************************************************************************/
+static struct i2c_gpio_platform_data edb93xx_i2c_gpio_data = {
+       .sda_pin                = EP93XX_GPIO_LINE_EEDAT,
+       .sda_is_open_drain      = 0,
+       .scl_pin                = EP93XX_GPIO_LINE_EECLK,
+       .scl_is_open_drain      = 0,
+       .udelay                 = 0,    /* default to 100 kHz */
+       .timeout                = 0,    /* default to 100 ms */
+};
+
+static struct i2c_board_info __initdata edb93xxa_i2c_board_info[] = {
        {
                I2C_BOARD_INFO("isl1208", 0x6f),
        },
 };
 
-static struct i2c_board_info __initdata edb93xx_i2c_data[] = {
+static struct i2c_board_info __initdata edb93xx_i2c_board_info[] = {
        {
                I2C_BOARD_INFO("ds1337", 0x68),
        },
@@ -92,12 +107,14 @@ static void __init edb93xx_register_i2c(void)
 {
        if (machine_is_edb9302a() || machine_is_edb9307a() ||
            machine_is_edb9315a()) {
-               ep93xx_register_i2c(edb93xxa_i2c_data,
-                               ARRAY_SIZE(edb93xxa_i2c_data));
+               ep93xx_register_i2c(&edb93xx_i2c_gpio_data,
+                                   edb93xxa_i2c_board_info,
+                                   ARRAY_SIZE(edb93xxa_i2c_board_info));
        } else if (machine_is_edb9307() || machine_is_edb9312() ||
                   machine_is_edb9315()) {
-               ep93xx_register_i2c(edb93xx_i2c_data,
-                               ARRAY_SIZE(edb93xx_i2c_data));
+               ep93xx_register_i2c(&edb93xx_i2c_gpio_data
+                                   edb93xx_i2c_board_info,
+                                   ARRAY_SIZE(edb93xx_i2c_board_info));
        }
 }
 
index 0fbf87b..b1f937e 100644 (file)
 #define EP93XX_AHB_VIRT_BASE           0xfef00000
 #define EP93XX_AHB_SIZE                        0x00100000
 
+#define EP93XX_AHB_PHYS(x)             (EP93XX_AHB_PHYS_BASE + (x))
 #define EP93XX_AHB_IOMEM(x)            IOMEM(EP93XX_AHB_VIRT_BASE + (x))
 
 #define EP93XX_APB_PHYS_BASE           0x80800000
 #define EP93XX_APB_VIRT_BASE           0xfed00000
 #define EP93XX_APB_SIZE                        0x00200000
 
+#define EP93XX_APB_PHYS(x)             (EP93XX_APB_PHYS_BASE + (x))
 #define EP93XX_APB_IOMEM(x)            IOMEM(EP93XX_APB_VIRT_BASE + (x))
 
 
 /* AHB peripherals */
 #define EP93XX_DMA_BASE                        EP93XX_AHB_IOMEM(0x00000000)
 
-#define EP93XX_ETHERNET_PHYS_BASE      (EP93XX_AHB_PHYS_BASE + 0x00010000)
+#define EP93XX_ETHERNET_PHYS_BASE      EP93XX_AHB_PHYS(0x00010000)
 #define EP93XX_ETHERNET_BASE           EP93XX_AHB_IOMEM(0x00010000)
 
-#define EP93XX_USB_PHYS_BASE           (EP93XX_AHB_PHYS_BASE + 0x00020000)
+#define EP93XX_USB_PHYS_BASE           EP93XX_AHB_PHYS(0x00020000)
 #define EP93XX_USB_BASE                        EP93XX_AHB_IOMEM(0x00020000)
 
-#define EP93XX_RASTER_PHYS_BASE                (EP93XX_AHB_PHYS_BASE + 0x00030000)
+#define EP93XX_RASTER_PHYS_BASE                EP93XX_AHB_PHYS(0x00030000)
 #define EP93XX_RASTER_BASE             EP93XX_AHB_IOMEM(0x00030000)
 
 #define EP93XX_GRAPHICS_ACCEL_BASE     EP93XX_AHB_IOMEM(0x00040000)
 
 #define EP93XX_GPIO_BASE               EP93XX_APB_IOMEM(0x00040000)
 #define EP93XX_GPIO_REG(x)             (EP93XX_GPIO_BASE + (x))
-#define EP93XX_GPIO_F_INT_TYPE1                EP93XX_GPIO_REG(0x4c)
-#define EP93XX_GPIO_F_INT_TYPE2                EP93XX_GPIO_REG(0x50)
-#define EP93XX_GPIO_F_INT_ACK          EP93XX_GPIO_REG(0x54)
-#define EP93XX_GPIO_F_INT_ENABLE       EP93XX_GPIO_REG(0x58)
 #define EP93XX_GPIO_F_INT_STATUS       EP93XX_GPIO_REG(0x5c)
-#define EP93XX_GPIO_A_INT_TYPE1                EP93XX_GPIO_REG(0x90)
-#define EP93XX_GPIO_A_INT_TYPE2                EP93XX_GPIO_REG(0x94)
-#define EP93XX_GPIO_A_INT_ACK          EP93XX_GPIO_REG(0x98)
-#define EP93XX_GPIO_A_INT_ENABLE       EP93XX_GPIO_REG(0x9c)
 #define EP93XX_GPIO_A_INT_STATUS       EP93XX_GPIO_REG(0xa0)
-#define EP93XX_GPIO_B_INT_TYPE1                EP93XX_GPIO_REG(0xac)
-#define EP93XX_GPIO_B_INT_TYPE2                EP93XX_GPIO_REG(0xb0)
-#define EP93XX_GPIO_B_INT_ACK          EP93XX_GPIO_REG(0xb4)
-#define EP93XX_GPIO_B_INT_ENABLE       EP93XX_GPIO_REG(0xb8)
 #define EP93XX_GPIO_B_INT_STATUS       EP93XX_GPIO_REG(0xbc)
+#define EP93XX_GPIO_EEDRIVE            EP93XX_GPIO_REG(0xc8)
 
 #define EP93XX_AAC_BASE                        EP93XX_APB_IOMEM(0x00080000)
 
 
 #define EP93XX_IRDA_BASE               EP93XX_APB_IOMEM(0x000b0000)
 
-#define EP93XX_UART1_PHYS_BASE         (EP93XX_APB_PHYS_BASE + 0x000c0000)
+#define EP93XX_UART1_PHYS_BASE         EP93XX_APB_PHYS(0x000c0000)
 #define EP93XX_UART1_BASE              EP93XX_APB_IOMEM(0x000c0000)
 
-#define EP93XX_UART2_PHYS_BASE         (EP93XX_APB_PHYS_BASE + 0x000d0000)
+#define EP93XX_UART2_PHYS_BASE         EP93XX_APB_PHYS(0x000d0000)
 #define EP93XX_UART2_BASE              EP93XX_APB_IOMEM(0x000d0000)
 
-#define EP93XX_UART3_PHYS_BASE         (EP93XX_APB_PHYS_BASE + 0x000e0000)
+#define EP93XX_UART3_PHYS_BASE         EP93XX_APB_PHYS(0x000e0000)
 #define EP93XX_UART3_BASE              EP93XX_APB_IOMEM(0x000e0000)
 
 #define EP93XX_KEY_MATRIX_BASE         EP93XX_APB_IOMEM(0x000f0000)
 #define EP93XX_ADC_BASE                        EP93XX_APB_IOMEM(0x00100000)
 #define EP93XX_TOUCHSCREEN_BASE                EP93XX_APB_IOMEM(0x00100000)
 
-#define EP93XX_PWM_PHYS_BASE           (EP93XX_APB_PHYS_BASE + 0x00110000)
+#define EP93XX_PWM_PHYS_BASE           EP93XX_APB_PHYS(0x00110000)
 #define EP93XX_PWM_BASE                        EP93XX_APB_IOMEM(0x00110000)
 
-#define EP93XX_RTC_PHYS_BASE           (EP93XX_APB_PHYS_BASE + 0x00120000)
+#define EP93XX_RTC_PHYS_BASE           EP93XX_APB_PHYS(0x00120000)
 #define EP93XX_RTC_BASE                        EP93XX_APB_IOMEM(0x00120000)
 
 #define EP93XX_SYSCON_BASE             EP93XX_APB_IOMEM(0x00130000)
 #define EP93XX_SYSCON_KEYTCHCLKDIV_ADIV        (1<<16)
 #define EP93XX_SYSCON_KEYTCHCLKDIV_KEN (1<<15)
 #define EP93XX_SYSCON_KEYTCHCLKDIV_KDIV        (1<<0)
+#define EP93XX_SYSCON_SYSCFG           EP93XX_SYSCON_REG(0x9c)
+#define EP93XX_SYSCON_SYSCFG_REV_MASK  (0xf0000000)
+#define EP93XX_SYSCON_SYSCFG_REV_SHIFT (28)
+#define EP93XX_SYSCON_SYSCFG_SBOOT     (1<<8)
+#define EP93XX_SYSCON_SYSCFG_LCSN7     (1<<7)
+#define EP93XX_SYSCON_SYSCFG_LCSN6     (1<<6)
+#define EP93XX_SYSCON_SYSCFG_LASDO     (1<<5)
+#define EP93XX_SYSCON_SYSCFG_LEEDA     (1<<4)
+#define EP93XX_SYSCON_SYSCFG_LEECLK    (1<<3)
+#define EP93XX_SYSCON_SYSCFG_LCSN2     (1<<1)
+#define EP93XX_SYSCON_SYSCFG_LCSN1     (1<<0)
 #define EP93XX_SYSCON_SWLOCK           EP93XX_SYSCON_REG(0xc0)
 
 #define EP93XX_WATCHDOG_BASE           EP93XX_APB_IOMEM(0x00140000)
index 0a1498a..c991b14 100644 (file)
@@ -114,17 +114,9 @@ extern void ep93xx_gpio_int_debounce(unsigned int irq, int enable);
  *          B0..B7  (7..15) to irq 72..79, and
  *          F0..F7 (16..24) to irq 80..87.
  */
-static inline int gpio_to_irq(unsigned gpio)
-{
-       if (gpio <= EP93XX_GPIO_LINE_MAX_IRQ)
-               return 64 + gpio;
-
-       return -EINVAL;
-}
-
-static inline int irq_to_gpio(unsigned irq)
-{
-       return irq - gpio_to_irq(0);
-}
+#define gpio_to_irq(gpio)      \
+       (((gpio) <= EP93XX_GPIO_LINE_MAX_IRQ) ? (64 + (gpio)) : -EINVAL)
+
+#define irq_to_gpio(irq)       ((irq) - gpio_to_irq(0))
 
 #endif
index 925b12e..554064e 100644 (file)
@@ -9,6 +9,12 @@
 #define PHYS_OFFSET            UL(0x00000000)
 #elif defined(CONFIG_EP93XX_SDCE0_PHYS_OFFSET)
 #define PHYS_OFFSET            UL(0xc0000000)
+#elif defined(CONFIG_EP93XX_SDCE1_PHYS_OFFSET)
+#define PHYS_OFFSET            UL(0xd0000000)
+#elif defined(CONFIG_EP93XX_SDCE2_PHYS_OFFSET)
+#define PHYS_OFFSET            UL(0xe0000000)
+#elif defined(CONFIG_EP93XX_SDCE3_ASYNC_PHYS_OFFSET)
+#define PHYS_OFFSET            UL(0xf0000000)
 #else
 #error "Kconfig bug: No EP93xx PHYS_OFFSET set"
 #endif
index 01a0f08..a3ec33f 100644 (file)
@@ -4,6 +4,7 @@
 
 #ifndef __ASSEMBLY__
 
+struct i2c_gpio_platform_data;
 struct i2c_board_info;
 struct platform_device;
 struct ep93xxfb_mach_info;
@@ -33,7 +34,8 @@ static inline void ep93xx_devcfg_clear_bits(unsigned int bits)
 }
 
 void ep93xx_register_eth(struct ep93xx_eth_data *data, int copy_addr);
-void ep93xx_register_i2c(struct i2c_board_info *devices, int num);
+void ep93xx_register_i2c(struct i2c_gpio_platform_data *data,
+                        struct i2c_board_info *devices, int num);
 void ep93xx_register_fb(struct ep93xxfb_mach_info *data);
 void ep93xx_register_pwm(int pwm0, int pwm1);
 int ep93xx_pwm_acquire_gpio(struct platform_device *pdev);
index 0a313e8..d83b804 100644 (file)
@@ -2,7 +2,9 @@
  *  linux/arch/arm/mach-ep93xx/micro9.c
  *
  * Copyright (C) 2006 Contec Steuerungstechnik & Automation GmbH
- *                   Manfred Gruber <manfred.gruber@contec.at>
+ *                    Manfred Gruber <m.gruber@tirol.com>
+ * Copyright (C) 2009 Contec Steuerungstechnik & Automation GmbH
+ *                    Hubert Feurstein <hubert.feurstein@contec.at>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
 #include <asm/mach/arch.h>
 
 
-static struct ep93xx_eth_data micro9_eth_data = {
-       .phy_id         = 0x1f,
-};
-
-static void __init micro9_init(void)
-{
-       ep93xx_register_eth(&micro9_eth_data, 1);
-}
-
-/*
- * Micro9-H
- */
-#ifdef CONFIG_MACH_MICRO9H
-static struct physmap_flash_data micro9h_flash_data = {
-       .width          = 4,
-};
-
-static struct resource micro9h_flash_resource = {
+/*************************************************************************
+ * Micro9 NOR Flash
+ *
+ * Micro9-High has up to 64MB of 32-bit flash on CS1
+ * Micro9-Mid has up to 64MB of either 32-bit or 16-bit flash on CS1
+ * Micro9-Lite uses a seperate MTD map driver for flash support
+ * Micro9-Slim has up to 64MB of either 32-bit or 16-bit flash on CS1
+ *************************************************************************/
+static struct physmap_flash_data micro9_flash_data;
+
+static struct resource micro9_flash_resource = {
        .start          = EP93XX_CS1_PHYS_BASE,
        .end            = EP93XX_CS1_PHYS_BASE + SZ_64M - 1,
        .flags          = IORESOURCE_MEM,
 };
 
-static struct platform_device micro9h_flash = {
+static struct platform_device micro9_flash = {
        .name           = "physmap-flash",
        .id             = 0,
        .dev            = {
-               .platform_data  = &micro9h_flash_data,
+               .platform_data  = &micro9_flash_data,
        },
        .num_resources  = 1,
-       .resource       = &micro9h_flash_resource,
+       .resource       = &micro9_flash_resource,
 };
 
-static void __init micro9h_init(void)
+static void __init __micro9_register_flash(unsigned int width)
+{
+       micro9_flash_data.width = width;
+
+       platform_device_register(&micro9_flash);
+}
+
+static unsigned int __init micro9_detect_bootwidth(void)
+{
+       u32 v;
+
+       /* Detect the bus width of the external flash memory */
+       v = __raw_readl(EP93XX_SYSCON_SYSCFG);
+       if (v & EP93XX_SYSCON_SYSCFG_LCSN7)
+               return 4; /* 32-bit */
+       else
+               return 2; /* 16-bit */
+}
+
+static void __init micro9_register_flash(void)
 {
-       platform_device_register(&micro9h_flash);
+       if (machine_is_micro9())
+               __micro9_register_flash(4);
+       else if (machine_is_micro9m() || machine_is_micro9s())
+               __micro9_register_flash(micro9_detect_bootwidth());
 }
 
-static void __init micro9h_init_machine(void)
+
+/*************************************************************************
+ * Micro9 Ethernet
+ *************************************************************************/
+static struct ep93xx_eth_data micro9_eth_data = {
+       .phy_id         = 0x1f,
+};
+
+
+static void __init micro9_init_machine(void)
 {
        ep93xx_init_devices();
-       micro9_init();
-       micro9h_init();
+       ep93xx_register_eth(&micro9_eth_data, 1);
+       micro9_register_flash();
 }
 
-MACHINE_START(MICRO9, "Contec Hypercontrol Micro9-H")
-       /* Maintainer: Manfred Gruber <manfred.gruber@contec.at> */
+
+#ifdef CONFIG_MACH_MICRO9H
+MACHINE_START(MICRO9, "Contec Micro9-High")
+       /* Maintainer: Hubert Feurstein <hubert.feurstein@contec.at> */
        .phys_io        = EP93XX_APB_PHYS_BASE,
        .io_pg_offst    = ((EP93XX_APB_VIRT_BASE) >> 18) & 0xfffc,
        .boot_params    = EP93XX_SDCE3_PHYS_BASE_SYNC + 0x100,
        .map_io         = ep93xx_map_io,
        .init_irq       = ep93xx_init_irq,
        .timer          = &ep93xx_timer,
-       .init_machine   = micro9h_init_machine,
+       .init_machine   = micro9_init_machine,
 MACHINE_END
 #endif
 
-/*
- * Micro9-M
- */
 #ifdef CONFIG_MACH_MICRO9M
-static void __init micro9m_init_machine(void)
-{
-       ep93xx_init_devices();
-       micro9_init();
-}
-
-MACHINE_START(MICRO9M, "Contec Hypercontrol Micro9-M")
-       /* Maintainer: Manfred Gruber <manfred.gruber@contec.at> */
+MACHINE_START(MICRO9M, "Contec Micro9-Mid")
+       /* Maintainer: Hubert Feurstein <hubert.feurstein@contec.at> */
        .phys_io        = EP93XX_APB_PHYS_BASE,
        .io_pg_offst    = ((EP93XX_APB_VIRT_BASE) >> 18) & 0xfffc,
-       .boot_params    = EP93XX_SDCE3_PHYS_BASE_SYNC + 0x100,
+       .boot_params    = EP93XX_SDCE3_PHYS_BASE_ASYNC + 0x100,
        .map_io         = ep93xx_map_io,
        .init_irq       = ep93xx_init_irq,
        .timer          = &ep93xx_timer,
-       .init_machine   = micro9m_init_machine,
+       .init_machine   = micro9_init_machine,
 MACHINE_END
 #endif
 
-/*
- * Micro9-L
- */
 #ifdef CONFIG_MACH_MICRO9L
-static void __init micro9l_init_machine(void)
-{
-       ep93xx_init_devices();
-       micro9_init();
-}
-
-MACHINE_START(MICRO9L, "Contec Hypercontrol Micro9-L")
-       /* Maintainer: Manfred Gruber <manfred.gruber@contec.at> */
+MACHINE_START(MICRO9L, "Contec Micro9-Lite")
+       /* Maintainer: Hubert Feurstein <hubert.feurstein@contec.at> */
        .phys_io        = EP93XX_APB_PHYS_BASE,
        .io_pg_offst    = ((EP93XX_APB_VIRT_BASE) >> 18) & 0xfffc,
        .boot_params    = EP93XX_SDCE3_PHYS_BASE_SYNC + 0x100,
        .map_io         = ep93xx_map_io,
        .init_irq       = ep93xx_init_irq,
        .timer          = &ep93xx_timer,
-       .init_machine   = micro9l_init_machine,
+       .init_machine   = micro9_init_machine,
 MACHINE_END
 #endif
 
+#ifdef CONFIG_MACH_MICRO9S
+MACHINE_START(MICRO9S, "Contec Micro9-Slim")
+       /* Maintainer: Hubert Feurstein <hubert.feurstein@contec.at> */
+       .phys_io        = EP93XX_APB_PHYS_BASE,
+       .io_pg_offst    = ((EP93XX_APB_VIRT_BASE) >> 18) & 0xfffc,
+       .boot_params    = EP93XX_SDCE3_PHYS_BASE_ASYNC + 0x100,
+       .map_io         = ep93xx_map_io,
+       .init_irq       = ep93xx_init_irq,
+       .timer          = &ep93xx_timer,
+       .init_machine   = micro9_init_machine,
+MACHINE_END
+#endif
index 3a8ee22..983cc8c 100644 (file)
@@ -155,7 +155,7 @@ MODULE_PARM_DESC(pxa255_turbo_table, "Selects the frequency table (0 = run table
 
 static pxa_freqs_t pxa27x_freqs[] = {
        {104000, 104000, PXA27x_CCCR(1,  8, 2), 0, CCLKCFG2(1, 0, 1),  900000, 1705000 },
-       {156000, 104000, PXA27x_CCCR(1,  8, 6), 0, CCLKCFG2(1, 1, 1), 1000000, 1705000 },
+       {156000, 104000, PXA27x_CCCR(1,  8, 3), 0, CCLKCFG2(1, 0, 1), 1000000, 1705000 },
        {208000, 208000, PXA27x_CCCR(0, 16, 2), 1, CCLKCFG2(0, 0, 1), 1180000, 1705000 },
        {312000, 208000, PXA27x_CCCR(1, 16, 3), 1, CCLKCFG2(1, 0, 1), 1250000, 1705000 },
        {416000, 208000, PXA27x_CCCR(1, 16, 4), 1, CCLKCFG2(1, 0, 1), 1350000, 1705000 },
index 79141f8..965480e 100644 (file)
@@ -238,7 +238,7 @@ static struct resource csb726_lan_resources[] = {
 };
 
 struct smsc911x_platform_config csb726_lan_config = {
-       .irq_type       = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
+       .irq_polarity   = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
        .irq_type       = SMSC911X_IRQ_TYPE_PUSH_PULL,
        .flags          = SMSC911X_USE_32BIT,
        .phy_interface  = PHY_INTERFACE_MODE_MII,
index 8a5546e..bb7b819 100644 (file)
@@ -25,6 +25,7 @@ led-$(CONFIG_SA1100_CERF)             += leds-cerf.o
 
 obj-$(CONFIG_SA1100_COLLIE)            += collie.o
 
+obj-$(CONFIG_SA1100_H3100)             += h3600.o
 obj-$(CONFIG_SA1100_H3600)             += h3600.o
 
 obj-$(CONFIG_SA1100_HACKKIT)           += hackkit.o
index 8f5c13f..295e25d 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/linkage.h>
 #include <linux/init.h>
 #include <asm/assembler.h>
+#include <asm/unwind.h>
 
 #include "proc-macros.S"
 
@@ -121,11 +122,13 @@ ENTRY(v6_coherent_kern_range)
  *     - the Icache does not read data from the write buffer
  */
 ENTRY(v6_coherent_user_range)
-
+ UNWIND(.fnstart               )
 #ifdef HARVARD_CACHE
        bic     r0, r0, #CACHE_LINE_SIZE - 1
-1:     mcr     p15, 0, r0, c7, c10, 1          @ clean D line
+1:
+ USER( mcr     p15, 0, r0, c7, c10, 1  )       @ clean D line
        add     r0, r0, #CACHE_LINE_SIZE
+2:
        cmp     r0, r1
        blo     1b
 #endif
@@ -143,6 +146,19 @@ ENTRY(v6_coherent_user_range)
        mov     pc, lr
 
 /*
+ * Fault handling for the cache operation above. If the virtual address in r0
+ * isn't mapped, just try the next page.
+ */
+9001:
+       mov     r0, r0, lsr #12
+       mov     r0, r0, lsl #12
+       add     r0, r0, #4096
+       b       2b
+ UNWIND(.fnend         )
+ENDPROC(v6_coherent_user_range)
+ENDPROC(v6_coherent_kern_range)
+
+/*
  *     v6_flush_kern_dcache_page(kaddr)
  *
  *     Ensure that the data held in the page kaddr is written back
index bda0ec3..e1bd975 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/linkage.h>
 #include <linux/init.h>
 #include <asm/assembler.h>
+#include <asm/unwind.h>
 
 #include "proc-macros.S"
 
@@ -153,13 +154,16 @@ ENTRY(v7_coherent_kern_range)
  *     - the Icache does not read data from the write buffer
  */
 ENTRY(v7_coherent_user_range)
+ UNWIND(.fnstart               )
        dcache_line_size r2, r3
        sub     r3, r2, #1
        bic     r0, r0, r3
-1:     mcr     p15, 0, r0, c7, c11, 1          @ clean D line to the point of unification
+1:
+ USER( mcr     p15, 0, r0, c7, c11, 1  )       @ clean D line to the point of unification
        dsb
-       mcr     p15, 0, r0, c7, c5, 1           @ invalidate I line
+ USER( mcr     p15, 0, r0, c7, c5, 1   )       @ invalidate I line
        add     r0, r0, r2
+2:
        cmp     r0, r1
        blo     1b
        mov     r0, #0
@@ -167,6 +171,17 @@ ENTRY(v7_coherent_user_range)
        dsb
        isb
        mov     pc, lr
+
+/*
+ * Fault handling for the cache operation above. If the virtual address in r0
+ * isn't mapped, just try the next page.
+ */
+9001:
+       mov     r0, r0, lsr #12
+       mov     r0, r0, lsl #12
+       add     r0, r0, #4096
+       b       2b
+ UNWIND(.fnend         )
 ENDPROC(v7_coherent_kern_range)
 ENDPROC(v7_coherent_user_range)
 
index bc0099d..d0d17b6 100644 (file)
@@ -153,14 +153,11 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
 
        page = pfn_to_page(pfn);
        mapping = page_mapping(page);
-       if (mapping) {
 #ifndef CONFIG_SMP
-               int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
-
-               if (dirty)
-                       __flush_dcache_page(mapping, page);
+       if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
+               __flush_dcache_page(mapping, page);
 #endif
-
+       if (mapping) {
                if (cache_is_vivt())
                        make_coherent(mapping, vma, addr, pfn);
                else if (vma->vm_flags & VM_EXEC)
index ae0e25f..10e0680 100644 (file)
@@ -292,6 +292,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
                 * down_read()
                 */
                might_sleep();
+#ifdef CONFIG_DEBUG_VM
+               if (!user_mode(regs) &&
+                   !search_exception_tables(regs->ARM_pc))
+                       goto no_context;
+#endif
        }
 
        fault = __do_page_fault(mm, addr, fsr, tsk);
index 73cae57..30f82fb 100644 (file)
@@ -46,6 +46,8 @@ void *kmap_atomic(struct page *page, enum km_type type)
        if (!PageHighMem(page))
                return page_address(page);
 
+       debug_kmap_atomic(type);
+
        kmap = kmap_high_get(page);
        if (kmap)
                return kmap;
index 877c492..40940d7 100644 (file)
@@ -483,7 +483,7 @@ free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn)
        /*
         * Convert start_pfn/end_pfn to a struct page pointer.
         */
-       start_pg = pfn_to_page(start_pfn);
+       start_pg = pfn_to_page(start_pfn - 1) + 1;
        end_pg = pfn_to_page(end_pfn);
 
        /*
index 704dd39..77df726 100644 (file)
@@ -438,7 +438,7 @@ static int diag204_probe(void)
                }
                if (diag204((unsigned long)SUBC_STIB6 |
                            (unsigned long)INFO_EXT, pages, buf) >= 0) {
-                       diag204_store_sc = SUBC_STIB7;
+                       diag204_store_sc = SUBC_STIB6;
                        diag204_info_type = INFO_EXT;
                        goto out;
                }
index f5fe34d..5a82bc6 100644 (file)
@@ -203,73 +203,10 @@ out:
 
 #ifdef CONFIG_FTRACE_SYSCALLS
 
-extern unsigned long __start_syscalls_metadata[];
-extern unsigned long __stop_syscalls_metadata[];
 extern unsigned int sys_call_table[];
 
-static struct syscall_metadata **syscalls_metadata;
-
-struct syscall_metadata *syscall_nr_to_meta(int nr)
-{
-       if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
-               return NULL;
-
-       return syscalls_metadata[nr];
-}
-
-int syscall_name_to_nr(char *name)
-{
-       int i;
-
-       if (!syscalls_metadata)
-               return -1;
-       for (i = 0; i < NR_syscalls; i++)
-               if (syscalls_metadata[i])
-                       if (!strcmp(syscalls_metadata[i]->name, name))
-                               return i;
-       return -1;
-}
-
-void set_syscall_enter_id(int num, int id)
-{
-       syscalls_metadata[num]->enter_id = id;
-}
-
-void set_syscall_exit_id(int num, int id)
+unsigned long __init arch_syscall_addr(int nr)
 {
-       syscalls_metadata[num]->exit_id = id;
-}
-
-static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
-{
-       struct syscall_metadata *start;
-       struct syscall_metadata *stop;
-       char str[KSYM_SYMBOL_LEN];
-
-       start = (struct syscall_metadata *)__start_syscalls_metadata;
-       stop = (struct syscall_metadata *)__stop_syscalls_metadata;
-       kallsyms_lookup(syscall, NULL, NULL, NULL, str);
-
-       for ( ; start < stop; start++) {
-               if (start->name && !strcmp(start->name + 3, str + 3))
-                       return start;
-       }
-       return NULL;
-}
-
-static int __init arch_init_ftrace_syscalls(void)
-{
-       struct syscall_metadata *meta;
-       int i;
-       syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * NR_syscalls,
-                                   GFP_KERNEL);
-       if (!syscalls_metadata)
-               return -ENOMEM;
-       for (i = 0; i < NR_syscalls; i++) {
-               meta = find_syscall_meta((unsigned long)sys_call_table[i]);
-               syscalls_metadata[i] = meta;
-       }
-       return 0;
+       return (unsigned long)sys_call_table[nr];
 }
-arch_initcall(arch_init_ftrace_syscalls);
 #endif
index 802c8ab..0729f36 100644 (file)
@@ -31,9 +31,9 @@ void __cpuinit print_cpu_info(void)
 
 static int show_cpuinfo(struct seq_file *m, void *v)
 {
-       static const char *hwcap_str[9] = {
+       static const char *hwcap_str[10] = {
                "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
-               "edat", "etf3eh"
+               "edat", "etf3eh", "highgprs"
        };
        struct _lowcore *lc;
        unsigned long n = (unsigned long) v - 1;
@@ -48,7 +48,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                           num_online_cpus(), loops_per_jiffy/(500000/HZ),
                           (loops_per_jiffy/(5000/HZ))%100);
                seq_puts(m, "features\t: ");
-               for (i = 0; i < 9; i++)
+               for (i = 0; i < 10; i++)
                        if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
                                seq_printf(m, "%s ", hwcap_str[i]);
                seq_puts(m, "\n");
index 68d9223..3eb8493 100644 (file)
@@ -121,7 +121,7 @@ noresched:
 ENTRY(resume_userspace)
        ! r8: current_thread_info
        cli
-       TRACE_IRQS_OfF
+       TRACE_IRQS_OFF
        mov.l   @(TI_FLAGS,r8), r0              ! current_thread_info->flags
        tst     #(_TIF_WORK_MASK & 0xff), r0
        bt/s    __restore_all
index a3dcc6d..2c48e26 100644 (file)
@@ -291,31 +291,48 @@ struct syscall_metadata *syscall_nr_to_meta(int nr)
        return syscalls_metadata[nr];
 }
 
-void arch_init_ftrace_syscalls(void)
+int syscall_name_to_nr(char *name)
+{
+       int i;
+
+       if (!syscalls_metadata)
+               return -1;
+       for (i = 0; i < NR_syscalls; i++)
+               if (syscalls_metadata[i])
+                       if (!strcmp(syscalls_metadata[i]->name, name))
+                               return i;
+       return -1;
+}
+
+void set_syscall_enter_id(int num, int id)
+{
+       syscalls_metadata[num]->enter_id = id;
+}
+
+void set_syscall_exit_id(int num, int id)
+{
+       syscalls_metadata[num]->exit_id = id;
+}
+
+static int __init arch_init_ftrace_syscalls(void)
 {
        int i;
        struct syscall_metadata *meta;
        unsigned long **psys_syscall_table = &sys_call_table;
-       static atomic_t refs;
-
-       if (atomic_inc_return(&refs) != 1)
-               goto end;
 
        syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
                                        FTRACE_SYSCALL_MAX, GFP_KERNEL);
        if (!syscalls_metadata) {
                WARN_ON(1);
-               return;
+               return -ENOMEM;
        }
 
        for (i = 0; i < FTRACE_SYSCALL_MAX; i++) {
                meta = find_syscall_meta(psys_syscall_table[i]);
                syscalls_metadata[i] = meta;
        }
-       return;
 
-       /* Paranoid: avoid overflow */
-end:
-       atomic_dec(&refs);
+       return 0;
 }
+arch_initcall(arch_init_ftrace_syscalls);
 #endif /* CONFIG_FTRACE_SYSCALLS */
index f9d44f8..99b4fb5 100644 (file)
@@ -549,6 +549,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
 
        if (cpu == 0)
                seq_printf(m, "machine\t\t: %s\n", get_system_type());
+       else
+               seq_printf(m, "\n");
 
        seq_printf(m, "processor\t: %d\n", cpu);
        seq_printf(m, "cpu family\t: %s\n", init_utsname()->machine);
index 6729703..3db3742 100644 (file)
@@ -145,7 +145,7 @@ static inline int restore_sigcontext_fpu(struct sigcontext __user *sc)
 {
        struct task_struct *tsk = current;
 
-       if (!(current_cpu_data.flags & CPU_HAS_FPU))
+       if (!(boot_cpu_data.flags & CPU_HAS_FPU))
                return 0;
 
        set_used_math();
@@ -158,7 +158,7 @@ static inline int save_sigcontext_fpu(struct sigcontext __user *sc,
 {
        struct task_struct *tsk = current;
 
-       if (!(current_cpu_data.flags & CPU_HAS_FPU))
+       if (!(boot_cpu_data.flags & CPU_HAS_FPU))
                return 0;
 
        if (!used_math()) {
@@ -199,7 +199,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p
 #undef COPY
 
 #ifdef CONFIG_SH_FPU
-       if (current_cpu_data.flags & CPU_HAS_FPU) {
+       if (boot_cpu_data.flags & CPU_HAS_FPU) {
                int owned_fp;
                struct task_struct *tsk = current;
 
@@ -472,6 +472,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
                err |= __put_user(OR_R0_R0, &frame->retcode[6]);
                err |= __put_user((__NR_rt_sigreturn), &frame->retcode[7]);
                regs->pr = (unsigned long) frame->retcode;
+               flush_icache_range(regs->pr, regs->pr + sizeof(frame->retcode));
        }
 
        if (err)
@@ -497,8 +498,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
                 current->comm, task_pid_nr(current), frame, regs->pc, regs->pr);
 
-       flush_icache_range(regs->pr, regs->pr + sizeof(frame->retcode));
-
        return 0;
 
 give_sigsegv:
index 442d8d4..160db10 100644 (file)
@@ -35,6 +35,8 @@ static inline void __init smp_store_cpu_info(unsigned int cpu)
 {
        struct sh_cpuinfo *c = cpu_data + cpu;
 
+       memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
+
        c->loops_per_jiffy = loops_per_jiffy;
 }
 
index e0b5e4b..7a2ee3a 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/kexec.h>
 #include <linux/limits.h>
 #include <linux/proc_fs.h>
+#include <linux/sysfs.h>
 #include <asm/system.h>
 #include <asm/uaccess.h>
 #include <asm/fpu.h>
@@ -159,12 +160,12 @@ void die(const char * str, struct pt_regs * regs, long err)
 
        oops_enter();
 
-       console_verbose();
        spin_lock_irq(&die_lock);
+       console_verbose();
        bust_spinlocks(1);
 
        printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
-
+       sysfs_printk_last_file();
        print_modules();
        show_regs(regs);
 
@@ -180,6 +181,7 @@ void die(const char * str, struct pt_regs * regs, long err)
        bust_spinlocks(0);
        add_taint(TAINT_DIE);
        spin_unlock_irq(&die_lock);
+       oops_exit();
 
        if (kexec_should_crash(current))
                crash_kexec(regs);
@@ -190,7 +192,6 @@ void die(const char * str, struct pt_regs * regs, long err)
        if (panic_on_oops)
                panic("Fatal exception");
 
-       oops_exit();
        do_exit(SIGSEGV);
 }
 
index 35c37b7..5e1091b 100644 (file)
@@ -128,7 +128,7 @@ void __update_cache(struct vm_area_struct *vma,
                return;
 
        page = pfn_to_page(pfn);
-       if (pfn_valid(pfn) && page_mapping(page)) {
+       if (pfn_valid(pfn)) {
                int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
                if (dirty) {
                        unsigned long addr = (unsigned long)page_address(page);
index adf5f27..cb3c72c 100644 (file)
@@ -1242,13 +1242,13 @@ int ldc_bind(struct ldc_channel *lp, const char *name)
        snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
 
        err = request_irq(lp->cfg.rx_irq, ldc_rx,
-                         IRQF_SAMPLE_RANDOM | IRQF_SHARED,
+                         IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED,
                          lp->rx_irq_name, lp);
        if (err)
                return err;
 
        err = request_irq(lp->cfg.tx_irq, ldc_tx,
-                         IRQF_SAMPLE_RANDOM | IRQF_SHARED,
+                         IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED,
                          lp->tx_irq_name, lp);
        if (err) {
                free_irq(lp->cfg.rx_irq, lp);
index 04db927..fa5936e 100644 (file)
@@ -437,7 +437,7 @@ static const struct sparc_pmu niagara2_pmu = {
        .lower_shift    = 6,
        .event_mask     = 0xfff,
        .hv_bit         = 0x8,
-       .irq_bit        = 0x03,
+       .irq_bit        = 0x30,
        .upper_nop      = 0x220,
        .lower_nop      = 0x220,
 };
index a70a5e1..1886d37 100644 (file)
@@ -265,7 +265,7 @@ static void flush_dcache(unsigned long pfn)
        struct page *page;
 
        page = pfn_to_page(pfn);
-       if (page && page_mapping(page)) {
+       if (page) {
                unsigned long pg_flags;
 
                pg_flags = page->flags;
index c876bac..07e0114 100644 (file)
@@ -491,7 +491,7 @@ if PARAVIRT_GUEST
 source "arch/x86/xen/Kconfig"
 
 config VMI
-       bool "VMI Guest support"
+       bool "VMI Guest support (DEPRECATED)"
        select PARAVIRT
        depends on X86_32
        ---help---
@@ -500,6 +500,15 @@ config VMI
          at the moment), by linking the kernel to a GPL-ed ROM module
          provided by the hypervisor.
 
+         As of September 2009, VMware has started a phased retirement
+         of this feature from VMware's products. Please see
+         feature-removal-schedule.txt for details.  If you are
+         planning to enable this option, please note that you cannot
+         live migrate a VMI enabled VM to a future VMware product,
+         which doesn't support VMI. So if you expect your kernel to
+         seamlessly migrate to newer VMware products, keep this
+         disabled.
+
 config KVM_CLOCK
        bool "KVM paravirtualized clock"
        select PARAVIRT
index 8aebcc4..efb3899 100644 (file)
@@ -840,42 +840,22 @@ static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
 
 static inline unsigned long __raw_local_save_flags(void)
 {
-       unsigned long f;
-
-       asm volatile(paravirt_alt(PARAVIRT_CALL)
-                    : "=a"(f)
-                    : paravirt_type(pv_irq_ops.save_fl),
-                      paravirt_clobber(CLBR_EAX)
-                    : "memory", "cc");
-       return f;
+       return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
 }
 
 static inline void raw_local_irq_restore(unsigned long f)
 {
-       asm volatile(paravirt_alt(PARAVIRT_CALL)
-                    : "=a"(f)
-                    : PV_FLAGS_ARG(f),
-                      paravirt_type(pv_irq_ops.restore_fl),
-                      paravirt_clobber(CLBR_EAX)
-                    : "memory", "cc");
+       PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
 }
 
 static inline void raw_local_irq_disable(void)
 {
-       asm volatile(paravirt_alt(PARAVIRT_CALL)
-                    :
-                    : paravirt_type(pv_irq_ops.irq_disable),
-                      paravirt_clobber(CLBR_EAX)
-                    : "memory", "eax", "cc");
+       PVOP_VCALLEE0(pv_irq_ops.irq_disable);
 }
 
 static inline void raw_local_irq_enable(void)
 {
-       asm volatile(paravirt_alt(PARAVIRT_CALL)
-                    :
-                    : paravirt_type(pv_irq_ops.irq_enable),
-                      paravirt_clobber(CLBR_EAX)
-                    : "memory", "eax", "cc");
+       PVOP_VCALLEE0(pv_irq_ops.irq_enable);
 }
 
 static inline unsigned long __raw_local_irq_save(void)
index dd0f5b3..9357473 100644 (file)
@@ -494,10 +494,11 @@ int paravirt_disable_iospace(void);
 #define EXTRA_CLOBBERS
 #define VEXTRA_CLOBBERS
 #else  /* CONFIG_X86_64 */
+/* [re]ax isn't an arg, but the return val */
 #define PVOP_VCALL_ARGS                                        \
        unsigned long __edi = __edi, __esi = __esi,     \
-               __edx = __edx, __ecx = __ecx
-#define PVOP_CALL_ARGS         PVOP_VCALL_ARGS, __eax
+               __edx = __edx, __ecx = __ecx, __eax = __eax
+#define PVOP_CALL_ARGS         PVOP_VCALL_ARGS
 
 #define PVOP_CALL_ARG1(x)              "D" ((unsigned long)(x))
 #define PVOP_CALL_ARG2(x)              "S" ((unsigned long)(x))
@@ -509,6 +510,7 @@ int paravirt_disable_iospace(void);
                                "=c" (__ecx)
 #define PVOP_CALL_CLOBBERS     PVOP_VCALL_CLOBBERS, "=a" (__eax)
 
+/* void functions are still allowed [re]ax for scratch */
 #define PVOP_VCALLEE_CLOBBERS  "=a" (__eax)
 #define PVOP_CALLEE_CLOBBERS   PVOP_VCALLEE_CLOBBERS
 
@@ -583,8 +585,8 @@ int paravirt_disable_iospace(void);
                       VEXTRA_CLOBBERS,                                 \
                       pre, post, ##__VA_ARGS__)
 
-#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...)                        \
-       ____PVOP_CALL(rettype, op.func, CLBR_RET_REG,                   \
+#define __PVOP_VCALLEESAVE(op, pre, post, ...)                         \
+       ____PVOP_VCALL(op.func, CLBR_RET_REG,                           \
                      PVOP_VCALLEE_CLOBBERS, ,                          \
                      pre, post, ##__VA_ARGS__)
 
index ad7ce3f..8d9f854 100644 (file)
  */
 #define ARCH_PERFMON_EVENT_MASK                                    0xffff
 
+/*
+ * filter mask to validate fixed counter events.
+ * the following filters disqualify for fixed counters:
+ *  - inv
+ *  - edge
+ *  - cnt-mask
+ *  The other filters are supported by fixed counters.
+ *  The any-thread option is supported starting with v3.
+ */
+#define ARCH_PERFMON_EVENT_FILTER_MASK                 0xff840000
+
 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL                0x3c
 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK                (0x00 << 8)
-#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX                 0
+#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX                         0
 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
                (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
 
index 7da00b7..0e50e1e 100644 (file)
@@ -56,6 +56,6 @@ SECTIONS
        /DISCARD/ : {
                *(.note*)
        }
-
-       . = ASSERT(_end <= WAKEUP_SIZE, "Wakeup too big!");
 }
+
+ASSERT(_end <= WAKEUP_SIZE, "Wakeup too big!");
index b5801c3..2e20bca 100644 (file)
@@ -77,6 +77,18 @@ struct cpu_hw_events {
        struct debug_store      *ds;
 };
 
+struct event_constraint {
+       unsigned long   idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+       int             code;
+};
+
+#define EVENT_CONSTRAINT(c, m) { .code = (c), .idxmsk[0] = (m) }
+#define EVENT_CONSTRAINT_END  { .code = 0, .idxmsk[0] = 0 }
+
+#define for_each_event_constraint(e, c) \
+       for ((e) = (c); (e)->idxmsk[0]; (e)++)
+
+
 /*
  * struct x86_pmu - generic x86 pmu
  */
@@ -102,6 +114,8 @@ struct x86_pmu {
        u64             intel_ctrl;
        void            (*enable_bts)(u64 config);
        void            (*disable_bts)(void);
+       int             (*get_event_idx)(struct cpu_hw_events *cpuc,
+                                        struct hw_perf_event *hwc);
 };
 
 static struct x86_pmu x86_pmu __read_mostly;
@@ -110,6 +124,8 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
        .enabled = 1,
 };
 
+static const struct event_constraint *event_constraints;
+
 /*
  * Not sure about some of these
  */
@@ -155,6 +171,16 @@ static u64 p6_pmu_raw_event(u64 hw_event)
        return hw_event & P6_EVNTSEL_MASK;
 }
 
+static const struct event_constraint intel_p6_event_constraints[] =
+{
+       EVENT_CONSTRAINT(0xc1, 0x1),    /* FLOPS */
+       EVENT_CONSTRAINT(0x10, 0x1),    /* FP_COMP_OPS_EXE */
+       EVENT_CONSTRAINT(0x11, 0x1),    /* FP_ASSIST */
+       EVENT_CONSTRAINT(0x12, 0x2),    /* MUL */
+       EVENT_CONSTRAINT(0x13, 0x2),    /* DIV */
+       EVENT_CONSTRAINT(0x14, 0x1),    /* CYCLES_DIV_BUSY */
+       EVENT_CONSTRAINT_END
+};
 
 /*
  * Intel PerfMon v3. Used on Core2 and later.
@@ -170,6 +196,35 @@ static const u64 intel_perfmon_event_map[] =
   [PERF_COUNT_HW_BUS_CYCLES]           = 0x013c,
 };
 
+static const struct event_constraint intel_core_event_constraints[] =
+{
+       EVENT_CONSTRAINT(0x10, 0x1),    /* FP_COMP_OPS_EXE */
+       EVENT_CONSTRAINT(0x11, 0x2),    /* FP_ASSIST */
+       EVENT_CONSTRAINT(0x12, 0x2),    /* MUL */
+       EVENT_CONSTRAINT(0x13, 0x2),    /* DIV */
+       EVENT_CONSTRAINT(0x14, 0x1),    /* CYCLES_DIV_BUSY */
+       EVENT_CONSTRAINT(0x18, 0x1),    /* IDLE_DURING_DIV */
+       EVENT_CONSTRAINT(0x19, 0x2),    /* DELAYED_BYPASS */
+       EVENT_CONSTRAINT(0xa1, 0x1),    /* RS_UOPS_DISPATCH_CYCLES */
+       EVENT_CONSTRAINT(0xcb, 0x1),    /* MEM_LOAD_RETIRED */
+       EVENT_CONSTRAINT_END
+};
+
+static const struct event_constraint intel_nehalem_event_constraints[] =
+{
+       EVENT_CONSTRAINT(0x40, 0x3),    /* L1D_CACHE_LD */
+       EVENT_CONSTRAINT(0x41, 0x3),    /* L1D_CACHE_ST */
+       EVENT_CONSTRAINT(0x42, 0x3),    /* L1D_CACHE_LOCK */
+       EVENT_CONSTRAINT(0x43, 0x3),    /* L1D_ALL_REF */
+       EVENT_CONSTRAINT(0x4e, 0x3),    /* L1D_PREFETCH */
+       EVENT_CONSTRAINT(0x4c, 0x3),    /* LOAD_HIT_PRE */
+       EVENT_CONSTRAINT(0x51, 0x3),    /* L1D */
+       EVENT_CONSTRAINT(0x52, 0x3),    /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */
+       EVENT_CONSTRAINT(0x53, 0x3),    /* L1D_CACHE_LOCK_FB_HIT */
+       EVENT_CONSTRAINT(0xc5, 0x3),    /* CACHE_LOCK_CYCLES */
+       EVENT_CONSTRAINT_END
+};
+
 static u64 intel_pmu_event_map(int hw_event)
 {
        return intel_perfmon_event_map[hw_event];
@@ -469,7 +524,7 @@ static u64 intel_pmu_raw_event(u64 hw_event)
 #define CORE_EVNTSEL_UNIT_MASK         0x0000FF00ULL
 #define CORE_EVNTSEL_EDGE_MASK         0x00040000ULL
 #define CORE_EVNTSEL_INV_MASK          0x00800000ULL
-#define CORE_EVNTSEL_REG_MASK  0xFF000000ULL
+#define CORE_EVNTSEL_REG_MASK          0xFF000000ULL
 
 #define CORE_EVNTSEL_MASK              \
        (CORE_EVNTSEL_EVENT_MASK |      \
@@ -932,6 +987,8 @@ static int __hw_perf_event_init(struct perf_event *event)
         */
        hwc->config = ARCH_PERFMON_EVENTSEL_INT;
 
+       hwc->idx = -1;
+
        /*
         * Count user and OS events unless requested not to.
         */
@@ -1334,8 +1391,7 @@ static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx)
                x86_pmu_enable_event(hwc, idx);
 }
 
-static int
-fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc)
+static int fixed_mode_idx(struct hw_perf_event *hwc)
 {
        unsigned int hw_event;
 
@@ -1349,6 +1405,12 @@ fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc)
        if (!x86_pmu.num_events_fixed)
                return -1;
 
+       /*
+        * fixed counters do not take all possible filters
+        */
+       if (hwc->config & ARCH_PERFMON_EVENT_FILTER_MASK)
+               return -1;
+
        if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
                return X86_PMC_IDX_FIXED_INSTRUCTIONS;
        if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES)))
@@ -1360,22 +1422,57 @@ fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc)
 }
 
 /*
- * Find a PMC slot for the freshly enabled / scheduled in event:
+ * generic counter allocator: get next free counter
  */
-static int x86_pmu_enable(struct perf_event *event)
+static int
+gen_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
+{
+       int idx;
+
+       idx = find_first_zero_bit(cpuc->used_mask, x86_pmu.num_events);
+       return idx == x86_pmu.num_events ? -1 : idx;
+}
+
+/*
+ * intel-specific counter allocator: check event constraints
+ */
+static int
+intel_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
+{
+       const struct event_constraint *event_constraint;
+       int i, code;
+
+       if (!event_constraints)
+               goto skip;
+
+       code = hwc->config & CORE_EVNTSEL_EVENT_MASK;
+
+       for_each_event_constraint(event_constraint, event_constraints) {
+               if (code == event_constraint->code) {
+                       for_each_bit(i, event_constraint->idxmsk, X86_PMC_IDX_MAX) {
+                               if (!test_and_set_bit(i, cpuc->used_mask))
+                                       return i;
+                       }
+                       return -1;
+               }
+       }
+skip:
+       return gen_get_event_idx(cpuc, hwc);
+}
+
+static int
+x86_schedule_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
 {
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-       struct hw_perf_event *hwc = &event->hw;
        int idx;
 
-       idx = fixed_mode_idx(event, hwc);
+       idx = fixed_mode_idx(hwc);
        if (idx == X86_PMC_IDX_FIXED_BTS) {
                /* BTS is already occupied. */
                if (test_and_set_bit(idx, cpuc->used_mask))
                        return -EAGAIN;
 
                hwc->config_base        = 0;
-               hwc->event_base = 0;
+               hwc->event_base         = 0;
                hwc->idx                = idx;
        } else if (idx >= 0) {
                /*
@@ -1396,20 +1493,35 @@ static int x86_pmu_enable(struct perf_event *event)
        } else {
                idx = hwc->idx;
                /* Try to get the previous generic event again */
-               if (test_and_set_bit(idx, cpuc->used_mask)) {
+               if (idx == -1 || test_and_set_bit(idx, cpuc->used_mask)) {
 try_generic:
-                       idx = find_first_zero_bit(cpuc->used_mask,
-                                                 x86_pmu.num_events);
-                       if (idx == x86_pmu.num_events)
+                       idx = x86_pmu.get_event_idx(cpuc, hwc);
+                       if (idx == -1)
                                return -EAGAIN;
 
                        set_bit(idx, cpuc->used_mask);
                        hwc->idx = idx;
                }
-               hwc->config_base  = x86_pmu.eventsel;
-               hwc->event_base = x86_pmu.perfctr;
+               hwc->config_base = x86_pmu.eventsel;
+               hwc->event_base  = x86_pmu.perfctr;
        }
 
+       return idx;
+}
+
+/*
+ * Find a PMC slot for the freshly enabled / scheduled in event:
+ */
+static int x86_pmu_enable(struct perf_event *event)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       struct hw_perf_event *hwc = &event->hw;
+       int idx;
+
+       idx = x86_schedule_event(cpuc, hwc);
+       if (idx < 0)
+               return idx;
+
        perf_events_lapic_init();
 
        x86_pmu.disable(hwc, idx);
@@ -1877,6 +1989,7 @@ static struct x86_pmu p6_pmu = {
         */
        .event_bits             = 32,
        .event_mask             = (1ULL << 32) - 1,
+       .get_event_idx          = intel_get_event_idx,
 };
 
 static struct x86_pmu intel_pmu = {
@@ -1900,6 +2013,7 @@ static struct x86_pmu intel_pmu = {
        .max_period             = (1ULL << 31) - 1,
        .enable_bts             = intel_pmu_enable_bts,
        .disable_bts            = intel_pmu_disable_bts,
+       .get_event_idx          = intel_get_event_idx,
 };
 
 static struct x86_pmu amd_pmu = {
@@ -1920,6 +2034,7 @@ static struct x86_pmu amd_pmu = {
        .apic                   = 1,
        /* use highest bit to detect overflow */
        .max_period             = (1ULL << 47) - 1,
+       .get_event_idx          = gen_get_event_idx,
 };
 
 static int p6_pmu_init(void)
@@ -1932,10 +2047,12 @@ static int p6_pmu_init(void)
        case 7:
        case 8:
        case 11: /* Pentium III */
+               event_constraints = intel_p6_event_constraints;
                break;
        case 9:
        case 13:
                /* Pentium M */
+               event_constraints = intel_p6_event_constraints;
                break;
        default:
                pr_cont("unsupported p6 CPU model %d ",
@@ -2007,12 +2124,14 @@ static int intel_pmu_init(void)
                       sizeof(hw_cache_event_ids));
 
                pr_cont("Core2 events, ");
+               event_constraints = intel_core_event_constraints;
                break;
        default:
        case 26:
                memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
 
+               event_constraints = intel_nehalem_event_constraints;
                pr_cont("Nehalem/Corei7 events, ");
                break;
        case 28:
@@ -2105,11 +2224,47 @@ static const struct pmu pmu = {
        .unthrottle     = x86_pmu_unthrottle,
 };
 
+static int
+validate_event(struct cpu_hw_events *cpuc, struct perf_event *event)
+{
+       struct hw_perf_event fake_event = event->hw;
+
+       if (event->pmu != &pmu)
+               return 0;
+
+       return x86_schedule_event(cpuc, &fake_event);
+}
+
+static int validate_group(struct perf_event *event)
+{
+       struct perf_event *sibling, *leader = event->group_leader;
+       struct cpu_hw_events fake_pmu;
+
+       memset(&fake_pmu, 0, sizeof(fake_pmu));
+
+       if (!validate_event(&fake_pmu, leader))
+               return -ENOSPC;
+
+       list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+               if (!validate_event(&fake_pmu, sibling))
+                       return -ENOSPC;
+       }
+
+       if (!validate_event(&fake_pmu, event))
+               return -ENOSPC;
+
+       return 0;
+}
+
 const struct pmu *hw_perf_event_init(struct perf_event *event)
 {
        int err;
 
        err = __hw_perf_event_init(event);
+       if (!err) {
+               if (event->group_leader != event)
+                       err = validate_group(event);
+       }
        if (err) {
                if (event->destroy)
                        event->destroy(event);
index c097e7d..7d52e9d 100644 (file)
@@ -1185,17 +1185,14 @@ END(ftrace_graph_caller)
 
 .globl return_to_handler
 return_to_handler:
-       pushl $0
        pushl %eax
-       pushl %ecx
        pushl %edx
        movl %ebp, %eax
        call ftrace_return_to_handler
-       movl %eax, 0xc(%esp)
+       movl %eax, %ecx
        popl %edx
-       popl %ecx
        popl %eax
-       ret
+       jmp *%ecx
 #endif
 
 .section .rodata,"a"
index b5c061f..bd5bbdd 100644 (file)
@@ -155,11 +155,11 @@ GLOBAL(return_to_handler)
 
        call ftrace_return_to_handler
 
-       movq %rax, 16(%rsp)
+       movq %rax, %rdi
        movq 8(%rsp), %rdx
        movq (%rsp), %rax
-       addq $16, %rsp
-       retq
+       addq $24, %rsp
+       jmp *%rdi
 #endif
 
 
index 9dbb527..5a1b975 100644 (file)
@@ -9,6 +9,8 @@
  * the dangers of modifying code on the run.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/spinlock.h>
 #include <linux/hardirq.h>
 #include <linux/uaccess.h>
@@ -336,15 +338,15 @@ int __init ftrace_dyn_arch_init(void *data)
 
        switch (faulted) {
        case 0:
-               pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
+               pr_info("converting mcount calls to 0f 1f 44 00 00\n");
                memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
                break;
        case 1:
-               pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
+               pr_info("converting mcount calls to 66 66 66 66 90\n");
                memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
                break;
        case 2:
-               pr_info("ftrace: converting mcount calls to jmp . + 5\n");
+               pr_info("converting mcount calls to jmp . + 5\n");
                memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
                break;
        }
@@ -468,82 +470,10 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
 
 #ifdef CONFIG_FTRACE_SYSCALLS
 
-extern unsigned long __start_syscalls_metadata[];
-extern unsigned long __stop_syscalls_metadata[];
 extern unsigned long *sys_call_table;
 
-static struct syscall_metadata **syscalls_metadata;
-
-static struct syscall_metadata *find_syscall_meta(unsigned long *syscall)
-{
-       struct syscall_metadata *start;
-       struct syscall_metadata *stop;
-       char str[KSYM_SYMBOL_LEN];
-
-
-       start = (struct syscall_metadata *)__start_syscalls_metadata;
-       stop = (struct syscall_metadata *)__stop_syscalls_metadata;
-       kallsyms_lookup((unsigned long) syscall, NULL, NULL, NULL, str);
-
-       for ( ; start < stop; start++) {
-               if (start->name && !strcmp(start->name, str))
-                       return start;
-       }
-       return NULL;
-}
-
-struct syscall_metadata *syscall_nr_to_meta(int nr)
-{
-       if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
-               return NULL;
-
-       return syscalls_metadata[nr];
-}
-
-int syscall_name_to_nr(char *name)
+unsigned long __init arch_syscall_addr(int nr)
 {
-       int i;
-
-       if (!syscalls_metadata)
-               return -1;
-
-       for (i = 0; i < NR_syscalls; i++) {
-               if (syscalls_metadata[i]) {
-                       if (!strcmp(syscalls_metadata[i]->name, name))
-                               return i;
-               }
-       }
-       return -1;
-}
-
-void set_syscall_enter_id(int num, int id)
-{
-       syscalls_metadata[num]->enter_id = id;
-}
-
-void set_syscall_exit_id(int num, int id)
-{
-       syscalls_metadata[num]->exit_id = id;
-}
-
-static int __init arch_init_ftrace_syscalls(void)
-{
-       int i;
-       struct syscall_metadata *meta;
-       unsigned long **psys_syscall_table = &sys_call_table;
-
-       syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
-                                       NR_syscalls, GFP_KERNEL);
-       if (!syscalls_metadata) {
-               WARN_ON(1);
-               return -ENOMEM;
-       }
-
-       for (i = 0; i < NR_syscalls; i++) {
-               meta = find_syscall_meta(psys_syscall_table[i]);
-               syscalls_metadata[i] = meta;
-       }
-       return 0;
+       return (unsigned long)(&sys_call_table)[nr];
 }
-arch_initcall(arch_init_ftrace_syscalls);
 #endif
index 3912061..74656d1 100644 (file)
@@ -244,7 +244,6 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
                                __func__, smp_processor_id(), vector, irq);
        }
 
-       run_local_timers();
        irq_exit();
 
        set_irq_regs(old_regs);
@@ -269,7 +268,6 @@ void smp_generic_interrupt(struct pt_regs *regs)
        if (generic_interrupt_extension)
                generic_interrupt_extension();
 
-       run_local_timers();
        irq_exit();
 
        set_irq_regs(old_regs);
index d20009b..b2a71dc 100644 (file)
@@ -311,7 +311,7 @@ void pci_iommu_shutdown(void)
        amd_iommu_shutdown();
 }
 /* Must execute after PCI subsystem */
-fs_initcall(pci_iommu_init);
+rootfs_initcall(pci_iommu_init);
 
 #ifdef CONFIG_PCI
 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
index d915d95..ec1de97 100644 (file)
@@ -198,7 +198,6 @@ void smp_reschedule_interrupt(struct pt_regs *regs)
 {
        ack_APIC_irq();
        inc_irq_stat(irq_resched_count);
-       run_local_timers();
        /*
         * KVM uses this interrupt to force a cpu out of guest mode
         */
index dcb00d2..be25734 100644 (file)
@@ -38,7 +38,8 @@ unsigned long profile_pc(struct pt_regs *regs)
 #ifdef CONFIG_FRAME_POINTER
                return *(unsigned long *)(regs->bp + sizeof(long));
 #else
-               unsigned long *sp = (unsigned long *)regs->sp;
+               unsigned long *sp =
+                       (unsigned long *)kernel_stack_pointer(regs);
                /*
                 * Return address is either directly at stack pointer
                 * or above a saved flags. Eflags has bits 22-31 zero,
index 699f7ee..cd02212 100644 (file)
@@ -3,8 +3,16 @@
 #include <asm/trampoline.h>
 #include <asm/e820.h>
 
+#if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP)
+#define __trampinit
+#define __trampinitdata
+#else
+#define __trampinit __cpuinit
+#define __trampinitdata __cpuinitdata
+#endif
+
 /* ready for x86_64 and x86 */
-unsigned char *__cpuinitdata trampoline_base = __va(TRAMPOLINE_BASE);
+unsigned char *__trampinitdata trampoline_base = __va(TRAMPOLINE_BASE);
 
 void __init reserve_trampoline_memory(void)
 {
@@ -26,7 +34,7 @@ void __init reserve_trampoline_memory(void)
  * bootstrap into the page concerned. The caller
  * has made sure it's suitably aligned.
  */
-unsigned long __cpuinit setup_trampoline(void)
+unsigned long __trampinit setup_trampoline(void)
 {
        memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE);
        return virt_to_phys(trampoline_base);
index 596d54c..3af2dff 100644 (file)
 #include <asm/segment.h>
 #include <asm/processor-flags.h>
 
+#ifdef CONFIG_ACPI_SLEEP
+.section .rodata, "a", @progbits
+#else
 /* We can free up the trampoline after bootup if cpu hotplug is not supported. */
 __CPUINITRODATA
+#endif
 .code16
 
 ENTRY(trampoline_data)
index 31e6f6c..d430e4c 100644 (file)
@@ -648,7 +648,7 @@ static inline int __init activate_vmi(void)
 
        pv_info.paravirt_enabled = 1;
        pv_info.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK;
-       pv_info.name = "vmi";
+       pv_info.name = "vmi [deprecated]";
 
        pv_init_ops.patch = vmi_patch;
 
index 92929fb..8d6001a 100644 (file)
@@ -305,8 +305,8 @@ SECTIONS
 
 
 #ifdef CONFIG_X86_32
-. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
-          "kernel image bigger than KERNEL_IMAGE_SIZE");
+ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
+       "kernel image bigger than KERNEL_IMAGE_SIZE");
 #else
 /*
  * Per-cpu symbols which need to be offset from __per_cpu_load
@@ -319,12 +319,12 @@ INIT_PER_CPU(irq_stack_union);
 /*
  * Build-time check on the image size:
  */
-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
-          "kernel image bigger than KERNEL_IMAGE_SIZE");
+ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
+       "kernel image bigger than KERNEL_IMAGE_SIZE");
 
 #ifdef CONFIG_SMP
-. = ASSERT((per_cpu__irq_stack_union == 0),
-           "irq_stack_union is not at start of per-cpu area");
+ASSERT((per_cpu__irq_stack_union == 0),
+       "irq_stack_union is not at start of per-cpu area");
 #endif
 
 #endif /* CONFIG_X86_32 */
@@ -332,7 +332,6 @@ INIT_PER_CPU(irq_stack_union);
 #ifdef CONFIG_KEXEC
 #include <asm/kexec.h>
 
-. = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
-           "kexec control code size is too big");
+ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
+       "kexec control code size is too big");
 #endif
-
index 427fd1b..8565d94 100644 (file)
@@ -1,12 +1,13 @@
 /*
  * Written by Pekka Paalanen, 2008-2009 <pq@iki.fi>
  */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/io.h>
 #include <linux/mmiotrace.h>
 
-#define MODULE_NAME "testmmiotrace"
-
 static unsigned long mmio_address;
 module_param(mmio_address, ulong, 0);
 MODULE_PARM_DESC(mmio_address, " Start address of the mapping of 16 kB "
@@ -30,7 +31,7 @@ static unsigned v32(unsigned i)
 static void do_write_test(void __iomem *p)
 {
        unsigned int i;
-       pr_info(MODULE_NAME ": write test.\n");
+       pr_info("write test.\n");
        mmiotrace_printk("Write test.\n");
 
        for (i = 0; i < 256; i++)
@@ -47,7 +48,7 @@ static void do_read_test(void __iomem *p)
 {
        unsigned int i;
        unsigned errs[3] = { 0 };
-       pr_info(MODULE_NAME ": read test.\n");
+       pr_info("read test.\n");
        mmiotrace_printk("Read test.\n");
 
        for (i = 0; i < 256; i++)
@@ -68,7 +69,7 @@ static void do_read_test(void __iomem *p)
 
 static void do_read_far_test(void __iomem *p)
 {
-       pr_info(MODULE_NAME ": read far test.\n");
+       pr_info("read far test.\n");
        mmiotrace_printk("Read far test.\n");
 
        ioread32(p + read_far);
@@ -78,7 +79,7 @@ static void do_test(unsigned long size)
 {
        void __iomem *p = ioremap_nocache(mmio_address, size);
        if (!p) {
-               pr_err(MODULE_NAME ": could not ioremap, aborting.\n");
+               pr_err("could not ioremap, aborting.\n");
                return;
        }
        mmiotrace_printk("ioremap returned %p.\n", p);
@@ -94,24 +95,22 @@ static int __init init(void)
        unsigned long size = (read_far) ? (8 << 20) : (16 << 10);
 
        if (mmio_address == 0) {
-               pr_err(MODULE_NAME ": you have to use the module argument "
-                                                       "mmio_address.\n");
-               pr_err(MODULE_NAME ": DO NOT LOAD THIS MODULE UNLESS"
-                               " YOU REALLY KNOW WHAT YOU ARE DOING!\n");
+               pr_err("you have to use the module argument mmio_address.\n");
+               pr_err("DO NOT LOAD THIS MODULE UNLESS YOU REALLY KNOW WHAT YOU ARE DOING!\n");
                return -ENXIO;
        }
 
-       pr_warning(MODULE_NAME ": WARNING: mapping %lu kB @ 0x%08lx in PCI "
-               "address space, and writing 16 kB of rubbish in there.\n",
-                size >> 10, mmio_address);
+       pr_warning("WARNING: mapping %lu kB @ 0x%08lx in PCI address space, "
+                  "and writing 16 kB of rubbish in there.\n",
+                  size >> 10, mmio_address);
        do_test(size);
-       pr_info(MODULE_NAME ": All done.\n");
+       pr_info("All done.\n");
        return 0;
 }
 
 static void __exit cleanup(void)
 {
-       pr_debug(MODULE_NAME ": unloaded.\n");
+       pr_debug("unloaded.\n");
 }
 
 module_init(init);
index 81f3431..ac0fa10 100644 (file)
@@ -70,7 +70,7 @@ static void drive_stat_acct(struct request *rq, int new_io)
                part_stat_inc(cpu, part, merges[rw]);
        else {
                part_round_stats(cpu, part);
-               part_inc_in_flight(part);
+               part_inc_in_flight(part, rw);
        }
 
        part_stat_unlock();
@@ -1030,9 +1030,9 @@ static void part_round_stats_single(int cpu, struct hd_struct *part,
        if (now == part->stamp)
                return;
 
-       if (part->in_flight) {
+       if (part_in_flight(part)) {
                __part_stat_add(cpu, part, time_in_queue,
-                               part->in_flight * (now - part->stamp));
+                               part_in_flight(part) * (now - part->stamp));
                __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
        }
        part->stamp = now;
@@ -1739,7 +1739,7 @@ static void blk_account_io_done(struct request *req)
                part_stat_inc(cpu, part, ios[rw]);
                part_stat_add(cpu, part, ticks[rw], duration);
                part_round_stats(cpu, part);
-               part_dec_in_flight(part);
+               part_dec_in_flight(part, rw);
 
                part_stat_unlock();
        }
@@ -2492,14 +2492,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
 }
 EXPORT_SYMBOL(kblockd_schedule_work);
 
-int kblockd_schedule_delayed_work(struct request_queue *q,
-                                 struct delayed_work *work,
-                                 unsigned long delay)
-{
-       return queue_delayed_work(kblockd_workqueue, work, delay);
-}
-EXPORT_SYMBOL(kblockd_schedule_delayed_work);
-
 int __init blk_dev_init(void)
 {
        BUILD_BUG_ON(__REQ_NR_BITS > 8 *
index b0de857..99cb5cf 100644 (file)
@@ -351,7 +351,7 @@ static void blk_account_io_merge(struct request *req)
                part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
 
                part_round_stats(cpu, part);
-               part_dec_in_flight(part);
+               part_dec_in_flight(part, rq_data_dir(req));
 
                part_stat_unlock();
        }
index e0695bc..66d4aa8 100644 (file)
@@ -242,7 +242,7 @@ EXPORT_SYMBOL(blk_queue_max_hw_sectors);
 /**
  * blk_queue_max_discard_sectors - set max sectors for a single discard
  * @q:  the request queue for the device
- * @max_discard: maximum number of sectors to discard
+ * @max_discard_sectors: maximum number of sectors to discard
  **/
 void blk_queue_max_discard_sectors(struct request_queue *q,
                unsigned int max_discard_sectors)
index 2e5cfeb..6b0f52c 100644 (file)
@@ -359,7 +359,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
                max_depth -= 2;
                if (!max_depth)
                        max_depth = 1;
-               if (q->in_flight[0] > max_depth)
+               if (q->in_flight[BLK_RW_ASYNC] > max_depth)
                        return 1;
        }
 
index 9c4b679..069a610 100644 (file)
@@ -150,7 +150,7 @@ struct cfq_data {
         * idle window management
         */
        struct timer_list idle_slice_timer;
-       struct delayed_work unplug_work;
+       struct work_struct unplug_work;
 
        struct cfq_queue *active_queue;
        struct cfq_io_context *active_cic;
@@ -230,7 +230,7 @@ CFQ_CFQQ_FNS(coop);
        blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
 
 static void cfq_dispatch_insert(struct request_queue *, struct request *);
-static struct cfq_queue *cfq_get_queue(struct cfq_data *, int,
+static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
                                       struct io_context *, gfp_t);
 static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
                                                struct io_context *);
@@ -241,40 +241,35 @@ static inline int rq_in_driver(struct cfq_data *cfqd)
 }
 
 static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
-                                           int is_sync)
+                                           bool is_sync)
 {
-       return cic->cfqq[!!is_sync];
+       return cic->cfqq[is_sync];
 }
 
 static inline void cic_set_cfqq(struct cfq_io_context *cic,
-                               struct cfq_queue *cfqq, int is_sync)
+                               struct cfq_queue *cfqq, bool is_sync)
 {
-       cic->cfqq[!!is_sync] = cfqq;
+       cic->cfqq[is_sync] = cfqq;
 }
 
 /*
  * We regard a request as SYNC, if it's either a read or has the SYNC bit
  * set (in which case it could also be direct WRITE).
  */
-static inline int cfq_bio_sync(struct bio *bio)
+static inline bool cfq_bio_sync(struct bio *bio)
 {
-       if (bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO))
-               return 1;
-
-       return 0;
+       return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO);
 }
 
 /*
  * scheduler run of queue, if there are requests pending and no one in the
  * driver that will restart queueing
  */
-static inline void cfq_schedule_dispatch(struct cfq_data *cfqd,
-                                        unsigned long delay)
+static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
 {
        if (cfqd->busy_queues) {
                cfq_log(cfqd, "schedule dispatch");
-               kblockd_schedule_delayed_work(cfqd->queue, &cfqd->unplug_work,
-                                               delay);
+               kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
        }
 }
 
@@ -290,7 +285,7 @@ static int cfq_queue_empty(struct request_queue *q)
  * if a queue is marked sync and has sync io queued. A sync queue with async
  * io only, should not get full sync slice length.
  */
-static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync,
+static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
                                 unsigned short prio)
 {
        const int base_slice = cfqd->cfq_slice[sync];
@@ -318,7 +313,7 @@ cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  * isn't valid until the first request from the dispatch is activated
  * and the slice time set.
  */
-static inline int cfq_slice_used(struct cfq_queue *cfqq)
+static inline bool cfq_slice_used(struct cfq_queue *cfqq)
 {
        if (cfq_cfqq_slice_new(cfqq))
                return 0;
@@ -493,7 +488,7 @@ static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
  * we will service the queues.
  */
 static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
-                                int add_front)
+                                bool add_front)
 {
        struct rb_node **p, *parent;
        struct cfq_queue *__cfqq;
@@ -509,11 +504,20 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                } else
                        rb_key += jiffies;
        } else if (!add_front) {
+               /*
+                * Get our rb key offset. Subtract any residual slice
+                * value carried from last service. A negative resid
+                * count indicates slice overrun, and this should position
+                * the next service time further away in the tree.
+                */
                rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
-               rb_key += cfqq->slice_resid;
+               rb_key -= cfqq->slice_resid;
                cfqq->slice_resid = 0;
-       } else
-               rb_key = 0;
+       } else {
+               rb_key = -HZ;
+               __cfqq = cfq_rb_first(&cfqd->service_tree);
+               rb_key += __cfqq ? __cfqq->rb_key : jiffies;
+       }
 
        if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
                /*
@@ -547,7 +551,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                        n = &(*p)->rb_left;
                else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq))
                        n = &(*p)->rb_right;
-               else if (rb_key < __cfqq->rb_key)
+               else if (time_before(rb_key, __cfqq->rb_key))
                        n = &(*p)->rb_left;
                else
                        n = &(*p)->rb_right;
@@ -827,8 +831,10 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
         * reposition in fifo if next is older than rq
         */
        if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
-           time_before(next->start_time, rq->start_time))
+           time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
                list_move(&rq->queuelist, &next->queuelist);
+               rq_set_fifo_time(rq, rq_fifo_time(next));
+       }
 
        cfq_remove_request(next);
 }
@@ -844,7 +850,7 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
         * Disallow merge of a sync bio into an async request.
         */
        if (cfq_bio_sync(bio) && !rq_is_sync(rq))
-               return 0;
+               return false;
 
        /*
         * Lookup the cfqq that this bio will be queued with. Allow
@@ -852,13 +858,10 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
         */
        cic = cfq_cic_lookup(cfqd, current->io_context);
        if (!cic)
-               return 0;
+               return false;
 
        cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
-       if (cfqq == RQ_CFQQ(rq))
-               return 1;
-
-       return 0;
+       return cfqq == RQ_CFQQ(rq);
 }
 
 static void __cfq_set_active_queue(struct cfq_data *cfqd,
@@ -886,7 +889,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
  */
 static void
 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
-                   int timed_out)
+                   bool timed_out)
 {
        cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
 
@@ -914,7 +917,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
        }
 }
 
-static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out)
+static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
 {
        struct cfq_queue *cfqq = cfqd->active_queue;
 
@@ -1026,7 +1029,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
  */
 static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
                                              struct cfq_queue *cur_cfqq,
-                                             int probe)
+                                             bool probe)
 {
        struct cfq_queue *cfqq;
 
@@ -1090,6 +1093,15 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
        if (!cic || !atomic_read(&cic->ioc->nr_tasks))
                return;
 
+       /*
+        * If our average think time is larger than the remaining time
+        * slice, then don't idle. This avoids overrunning the allotted
+        * time slice.
+        */
+       if (sample_valid(cic->ttime_samples) &&
+           (cfqq->slice_end - jiffies < cic->ttime_mean))
+               return;
+
        cfq_mark_cfqq_wait_request(cfqq);
 
        /*
@@ -1129,9 +1141,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
  */
 static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
 {
-       struct cfq_data *cfqd = cfqq->cfqd;
-       struct request *rq;
-       int fifo;
+       struct request *rq = NULL;
 
        if (cfq_cfqq_fifo_expire(cfqq))
                return NULL;
@@ -1141,13 +1151,11 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
        if (list_empty(&cfqq->fifo))
                return NULL;
 
-       fifo = cfq_cfqq_sync(cfqq);
        rq = rq_entry_fifo(cfqq->fifo.next);
-
-       if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo]))
+       if (time_before(jiffies, rq_fifo_time(rq)))
                rq = NULL;
 
-       cfq_log_cfqq(cfqd, cfqq, "fifo=%p", rq);
+       cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
        return rq;
 }
 
@@ -1248,67 +1256,21 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
        return dispatched;
 }
 
-/*
- * Dispatch a request from cfqq, moving them to the request queue
- * dispatch list.
- */
-static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
-       struct request *rq;
-
-       BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
-
-       /*
-        * follow expired path, else get first next available
-        */
-       rq = cfq_check_fifo(cfqq);
-       if (!rq)
-               rq = cfqq->next_rq;
-
-       /*
-        * insert request into driver dispatch list
-        */
-       cfq_dispatch_insert(cfqd->queue, rq);
-
-       if (!cfqd->active_cic) {
-               struct cfq_io_context *cic = RQ_CIC(rq);
-
-               atomic_long_inc(&cic->ioc->refcount);
-               cfqd->active_cic = cic;
-       }
-}
-
-/*
- * Find the cfqq that we need to service and move a request from that to the
- * dispatch list
- */
-static int cfq_dispatch_requests(struct request_queue *q, int force)
-{
-       struct cfq_data *cfqd = q->elevator->elevator_data;
-       struct cfq_queue *cfqq;
        unsigned int max_dispatch;
 
-       if (!cfqd->busy_queues)
-               return 0;
-
-       if (unlikely(force))
-               return cfq_forced_dispatch(cfqd);
-
-       cfqq = cfq_select_queue(cfqd);
-       if (!cfqq)
-               return 0;
-
        /*
         * Drain async requests before we start sync IO
         */
        if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC])
-               return 0;
+               return false;
 
        /*
         * If this is an async queue and we have sync IO in flight, let it wait
         */
        if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
-               return 0;
+               return false;
 
        max_dispatch = cfqd->cfq_quantum;
        if (cfq_class_idle(cfqq))
@@ -1322,13 +1284,13 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
                 * idle queue must always only have a single IO in flight
                 */
                if (cfq_class_idle(cfqq))
-                       return 0;
+                       return false;
 
                /*
                 * We have other queues, don't allow more IO from this one
                 */
                if (cfqd->busy_queues > 1)
-                       return 0;
+                       return false;
 
                /*
                 * Sole queue user, allow bigger slice
@@ -1352,13 +1314,72 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
                        max_dispatch = depth;
        }
 
-       if (cfqq->dispatched >= max_dispatch)
+       /*
+        * If we're below the current max, allow a dispatch
+        */
+       return cfqq->dispatched < max_dispatch;
+}
+
+/*
+ * Dispatch a request from cfqq, moving them to the request queue
+ * dispatch list.
+ */
+static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+       struct request *rq;
+
+       BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
+
+       if (!cfq_may_dispatch(cfqd, cfqq))
+               return false;
+
+       /*
+        * follow expired path, else get first next available
+        */
+       rq = cfq_check_fifo(cfqq);
+       if (!rq)
+               rq = cfqq->next_rq;
+
+       /*
+        * insert request into driver dispatch list
+        */
+       cfq_dispatch_insert(cfqd->queue, rq);
+
+       if (!cfqd->active_cic) {
+               struct cfq_io_context *cic = RQ_CIC(rq);
+
+               atomic_long_inc(&cic->ioc->refcount);
+               cfqd->active_cic = cic;
+       }
+
+       return true;
+}
+
+/*
+ * Find the cfqq that we need to service and move a request from that to the
+ * dispatch list
+ */
+static int cfq_dispatch_requests(struct request_queue *q, int force)
+{
+       struct cfq_data *cfqd = q->elevator->elevator_data;
+       struct cfq_queue *cfqq;
+
+       if (!cfqd->busy_queues)
+               return 0;
+
+       if (unlikely(force))
+               return cfq_forced_dispatch(cfqd);
+
+       cfqq = cfq_select_queue(cfqd);
+       if (!cfqq)
                return 0;
 
        /*
-        * Dispatch a request from this cfqq
+        * Dispatch a request from this cfqq, if it is allowed
         */
-       cfq_dispatch_request(cfqd, cfqq);
+       if (!cfq_dispatch_request(cfqd, cfqq))
+               return 0;
+
        cfqq->slice_dispatch++;
        cfq_clear_cfqq_must_dispatch(cfqq);
 
@@ -1399,7 +1420,7 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
 
        if (unlikely(cfqd->active_queue == cfqq)) {
                __cfq_slice_expired(cfqd, cfqq, 0);
-               cfq_schedule_dispatch(cfqd, 0);
+               cfq_schedule_dispatch(cfqd);
        }
 
        kmem_cache_free(cfq_pool, cfqq);
@@ -1494,7 +1515,7 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
        if (unlikely(cfqq == cfqd->active_queue)) {
                __cfq_slice_expired(cfqd, cfqq, 0);
-               cfq_schedule_dispatch(cfqd, 0);
+               cfq_schedule_dispatch(cfqd);
        }
 
        cfq_put_queue(cfqq);
@@ -1658,7 +1679,7 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc)
 }
 
 static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
-                         pid_t pid, int is_sync)
+                         pid_t pid, bool is_sync)
 {
        RB_CLEAR_NODE(&cfqq->rb_node);
        RB_CLEAR_NODE(&cfqq->p_node);
@@ -1678,7 +1699,7 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 }
 
 static struct cfq_queue *
-cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
+cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
                     struct io_context *ioc, gfp_t gfp_mask)
 {
        struct cfq_queue *cfqq, *new_cfqq = NULL;
@@ -1742,7 +1763,7 @@ cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
 }
 
 static struct cfq_queue *
-cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc,
+cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
              gfp_t gfp_mask)
 {
        const int ioprio = task_ioprio(ioc);
@@ -1977,7 +1998,10 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
            (!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic)))
                enable_idle = 0;
        else if (sample_valid(cic->ttime_samples)) {
-               if (cic->ttime_mean > cfqd->cfq_slice_idle)
+               unsigned int slice_idle = cfqd->cfq_slice_idle;
+               if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
+                       slice_idle = msecs_to_jiffies(CFQ_MIN_TT);
+               if (cic->ttime_mean > slice_idle)
                        enable_idle = 0;
                else
                        enable_idle = 1;
@@ -1996,7 +2020,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  * Check if new_cfqq should preempt the currently active queue. Return 0 for
  * no or if we aren't sure, a 1 will cause a preempt.
  */
-static int
+static bool
 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
                   struct request *rq)
 {
@@ -2004,48 +2028,48 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
 
        cfqq = cfqd->active_queue;
        if (!cfqq)
-               return 0;
+               return false;
 
        if (cfq_slice_used(cfqq))
-               return 1;
+               return true;
 
        if (cfq_class_idle(new_cfqq))
-               return 0;
+               return false;
 
        if (cfq_class_idle(cfqq))
-               return 1;
+               return true;
 
        /*
         * if the new request is sync, but the currently running queue is
         * not, let the sync request have priority.
         */
        if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
-               return 1;
+               return true;
 
        /*
         * So both queues are sync. Let the new request get disk time if
         * it's a metadata request and the current queue is doing regular IO.
         */
        if (rq_is_meta(rq) && !cfqq->meta_pending)
-               return 1;
+               return false;
 
        /*
         * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
         */
        if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
-               return 1;
+               return true;
 
        if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
-               return 0;
+               return false;
 
        /*
         * if this request is as-good as one we would expect from the
         * current cfqq, let it preempt
         */
        if (cfq_rq_close(cfqd, rq))
-               return 1;
+               return true;
 
-       return 0;
+       return false;
 }
 
 /*
@@ -2130,6 +2154,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
 
        cfq_add_rq_rb(rq);
 
+       rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
        list_add_tail(&rq->queuelist, &cfqq->fifo);
 
        cfq_rq_enqueued(cfqd, cfqq, rq);
@@ -2211,7 +2236,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
        }
 
        if (!rq_in_driver(cfqd))
-               cfq_schedule_dispatch(cfqd, 0);
+               cfq_schedule_dispatch(cfqd);
 }
 
 /*
@@ -2309,7 +2334,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
        struct cfq_data *cfqd = q->elevator->elevator_data;
        struct cfq_io_context *cic;
        const int rw = rq_data_dir(rq);
-       const int is_sync = rq_is_sync(rq);
+       const bool is_sync = rq_is_sync(rq);
        struct cfq_queue *cfqq;
        unsigned long flags;
 
@@ -2341,7 +2366,7 @@ queue_fail:
        if (cic)
                put_io_context(cic->ioc);
 
-       cfq_schedule_dispatch(cfqd, 0);
+       cfq_schedule_dispatch(cfqd);
        spin_unlock_irqrestore(q->queue_lock, flags);
        cfq_log(cfqd, "set_request fail");
        return 1;
@@ -2350,7 +2375,7 @@ queue_fail:
 static void cfq_kick_queue(struct work_struct *work)
 {
        struct cfq_data *cfqd =
-               container_of(work, struct cfq_data, unplug_work.work);
+               container_of(work, struct cfq_data, unplug_work);
        struct request_queue *q = cfqd->queue;
 
        spin_lock_irq(q->queue_lock);
@@ -2404,7 +2429,7 @@ static void cfq_idle_slice_timer(unsigned long data)
 expire:
        cfq_slice_expired(cfqd, timed_out);
 out_kick:
-       cfq_schedule_dispatch(cfqd, 0);
+       cfq_schedule_dispatch(cfqd);
 out_cont:
        spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
 }
@@ -2412,7 +2437,7 @@ out_cont:
 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
 {
        del_timer_sync(&cfqd->idle_slice_timer);
-       cancel_delayed_work_sync(&cfqd->unplug_work);
+       cancel_work_sync(&cfqd->unplug_work);
 }
 
 static void cfq_put_async_queues(struct cfq_data *cfqd)
@@ -2494,7 +2519,7 @@ static void *cfq_init_queue(struct request_queue *q)
        cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
        cfqd->idle_slice_timer.data = (unsigned long) cfqd;
 
-       INIT_DELAYED_WORK(&cfqd->unplug_work, cfq_kick_queue);
+       INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
 
        cfqd->cfq_quantum = cfq_quantum;
        cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
index 1975b61..a847046 100644 (file)
@@ -1059,9 +1059,7 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
                return count;
 
        strlcpy(elevator_name, name, sizeof(elevator_name));
-       strstrip(elevator_name);
-
-       e = elevator_get(elevator_name);
+       e = elevator_get(strstrip(elevator_name));
        if (!e) {
                printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
                return -EINVAL;
index 5a0861d..517e433 100644 (file)
@@ -869,6 +869,7 @@ static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
 static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL);
 static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL);
 static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
+static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL);
 #ifdef CONFIG_FAIL_MAKE_REQUEST
 static struct device_attribute dev_attr_fail =
        __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store);
@@ -888,6 +889,7 @@ static struct attribute *disk_attrs[] = {
        &dev_attr_alignment_offset.attr,
        &dev_attr_capability.attr,
        &dev_attr_stat.attr,
+       &dev_attr_inflight.attr,
 #ifdef CONFIG_FAIL_MAKE_REQUEST
        &dev_attr_fail.attr,
 #endif
@@ -1053,7 +1055,7 @@ static int diskstats_show(struct seq_file *seqf, void *v)
                           part_stat_read(hd, merges[1]),
                           (unsigned long long)part_stat_read(hd, sectors[1]),
                           jiffies_to_msecs(part_stat_read(hd, ticks[1])),
-                          hd->in_flight,
+                          part_in_flight(hd),
                           jiffies_to_msecs(part_stat_read(hd, io_ticks)),
                           jiffies_to_msecs(part_stat_read(hd, time_in_queue))
                        );
index fb5be2d..6399e50 100644 (file)
@@ -68,6 +68,12 @@ MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
 MODULE_VERSION("3.6.20");
 MODULE_LICENSE("GPL");
 
+static int cciss_allow_hpsa;
+module_param(cciss_allow_hpsa, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(cciss_allow_hpsa,
+       "Prevent cciss driver from accessing hardware known to be "
+       " supported by the hpsa driver");
+
 #include "cciss_cmd.h"
 #include "cciss.h"
 #include <linux/cciss_ioctl.h>
@@ -101,8 +107,6 @@ static const struct pci_device_id cciss_pci_device_id[] = {
        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3249},
        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324A},
        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324B},
-       {PCI_VENDOR_ID_HP,     PCI_ANY_ID,      PCI_ANY_ID, PCI_ANY_ID,
-               PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
        {0,}
 };
 
@@ -123,8 +127,6 @@ static struct board_type products[] = {
        {0x409D0E11, "Smart Array 6400 EM", &SA5_access},
        {0x40910E11, "Smart Array 6i", &SA5_access},
        {0x3225103C, "Smart Array P600", &SA5_access},
-       {0x3223103C, "Smart Array P800", &SA5_access},
-       {0x3234103C, "Smart Array P400", &SA5_access},
        {0x3235103C, "Smart Array P400i", &SA5_access},
        {0x3211103C, "Smart Array E200i", &SA5_access},
        {0x3212103C, "Smart Array E200", &SA5_access},
@@ -132,6 +134,10 @@ static struct board_type products[] = {
        {0x3214103C, "Smart Array E200i", &SA5_access},
        {0x3215103C, "Smart Array E200i", &SA5_access},
        {0x3237103C, "Smart Array E500", &SA5_access},
+/* controllers below this line are also supported by the hpsa driver. */
+#define HPSA_BOUNDARY 0x3223103C
+       {0x3223103C, "Smart Array P800", &SA5_access},
+       {0x3234103C, "Smart Array P400", &SA5_access},
        {0x323D103C, "Smart Array P700m", &SA5_access},
        {0x3241103C, "Smart Array P212", &SA5_access},
        {0x3243103C, "Smart Array P410", &SA5_access},
@@ -140,7 +146,6 @@ static struct board_type products[] = {
        {0x3249103C, "Smart Array P812", &SA5_access},
        {0x324A103C, "Smart Array P712m", &SA5_access},
        {0x324B103C, "Smart Array P711m", &SA5_access},
-       {0xFFFF103C, "Unknown Smart Array", &SA5_access},
 };
 
 /* How long to wait (in milliseconds) for board to go into simple mode */
@@ -3754,7 +3759,27 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
        __u64 cfg_offset;
        __u32 cfg_base_addr;
        __u64 cfg_base_addr_index;
-       int i, err;
+       int i, prod_index, err;
+
+       subsystem_vendor_id = pdev->subsystem_vendor;
+       subsystem_device_id = pdev->subsystem_device;
+       board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
+                   subsystem_vendor_id);
+
+       for (i = 0; i < ARRAY_SIZE(products); i++) {
+               /* Stand aside for hpsa driver on request */
+               if (cciss_allow_hpsa && products[i].board_id == HPSA_BOUNDARY)
+                       return -ENODEV;
+               if (board_id == products[i].board_id)
+                       break;
+       }
+       prod_index = i;
+       if (prod_index == ARRAY_SIZE(products)) {
+               dev_warn(&pdev->dev,
+                       "unrecognized board ID: 0x%08lx, ignoring.\n",
+                       (unsigned long) board_id);
+               return -ENODEV;
+       }
 
        /* check to see if controller has been disabled */
        /* BEFORE trying to enable it */
@@ -3778,11 +3803,6 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
                return err;
        }
 
-       subsystem_vendor_id = pdev->subsystem_vendor;
-       subsystem_device_id = pdev->subsystem_device;
-       board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
-                   subsystem_vendor_id);
-
 #ifdef CCISS_DEBUG
        printk("command = %x\n", command);
        printk("irq = %x\n", pdev->irq);
@@ -3868,14 +3888,9 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
         * leave a little room for ioctl calls.
         */
        c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
-       for (i = 0; i < ARRAY_SIZE(products); i++) {
-               if (board_id == products[i].board_id) {
-                       c->product_name = products[i].product_name;
-                       c->access = *(products[i].access);
-                       c->nr_cmds = c->max_commands - 4;
-                       break;
-               }
-       }
+       c->product_name = products[prod_index].product_name;
+       c->access = *(products[prod_index].access);
+       c->nr_cmds = c->max_commands - 4;
        if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
            (readb(&c->cfgtable->Signature[1]) != 'I') ||
            (readb(&c->cfgtable->Signature[2]) != 'S') ||
@@ -3884,27 +3899,6 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
                err = -ENODEV;
                goto err_out_free_res;
        }
-       /* We didn't find the controller in our list. We know the
-        * signature is valid. If it's an HP device let's try to
-        * bind to the device and fire it up. Otherwise we bail.
-        */
-       if (i == ARRAY_SIZE(products)) {
-               if (subsystem_vendor_id == PCI_VENDOR_ID_HP) {
-                       c->product_name = products[i-1].product_name;
-                       c->access = *(products[i-1].access);
-                       c->nr_cmds = c->max_commands - 4;
-                       printk(KERN_WARNING "cciss: This is an unknown "
-                               "Smart Array controller.\n"
-                               "cciss: Please update to the latest driver "
-                               "available from www.hp.com.\n");
-               } else {
-                       printk(KERN_WARNING "cciss: Sorry, I don't know how"
-                               " to access the Smart Array controller %08lx\n"
-                                       , (unsigned long)board_id);
-                       err = -ENODEV;
-                       goto err_out_free_res;
-               }
-       }
 #ifdef CONFIG_X86
        {
                /* Need to enable prefetch in the SCSI core for 6400 in x86 */
@@ -4254,7 +4248,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
        mutex_init(&hba[i]->busy_shutting_down);
 
        if (cciss_pci_init(hba[i], pdev) != 0)
-               goto clean0;
+               goto clean_no_release_regions;
 
        sprintf(hba[i]->devname, "cciss%d", i);
        hba[i]->ctlr = i;
@@ -4391,13 +4385,14 @@ clean2:
 clean1:
        cciss_destroy_hba_sysfs_entry(hba[i]);
 clean0:
+       pci_release_regions(pdev);
+clean_no_release_regions:
        hba[i]->busy_initializing = 0;
 
        /*
         * Deliberately omit pci_disable_device(): it does something nasty to
         * Smart Array controllers that pci_enable_device does not undo
         */
-       pci_release_regions(pdev);
        pci_set_drvdata(pdev, NULL);
        free_hba(i);
        return -1;
index aac0985..31e7c91 100644 (file)
@@ -43,6 +43,7 @@
 #define RTC_VERSION    "1.07"
 
 #include <linux/module.h>
+#include <linux/sched.h>
 #include <linux/errno.h>
 #include <linux/miscdevice.h>
 #include <linux/fcntl.h>
index e0d0f8b..bc4ab3e 100644 (file)
@@ -74,6 +74,7 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/spinlock.h>
+#include <linux/sched.h>
 #include <linux/sysctl.h>
 #include <linux/wait.h>
 #include <linux/bcd.h>
index fd3dced..8c262aa 100644 (file)
@@ -36,6 +36,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/sched.h>
 #include <linux/input.h>
 #include <linux/pci.h>
 #include <linux/init.h>
index 3108991..66fa4e1 100644 (file)
@@ -402,28 +402,26 @@ static void flush_to_ldisc(struct work_struct *work)
                container_of(work, struct tty_struct, buf.work.work);
        unsigned long   flags;
        struct tty_ldisc *disc;
-       struct tty_buffer *tbuf, *head;
-       char *char_buf;
-       unsigned char *flag_buf;
 
        disc = tty_ldisc_ref(tty);
        if (disc == NULL)       /*  !TTY_LDISC */
                return;
 
        spin_lock_irqsave(&tty->buf.lock, flags);
-       /* So we know a flush is running */
-       set_bit(TTY_FLUSHING, &tty->flags);
-       head = tty->buf.head;
-       if (head != NULL) {
-               tty->buf.head = NULL;
-               for (;;) {
-                       int count = head->commit - head->read;
+
+       if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) {
+               struct tty_buffer *head;
+               while ((head = tty->buf.head) != NULL) {
+                       int count;
+                       char *char_buf;
+                       unsigned char *flag_buf;
+
+                       count = head->commit - head->read;
                        if (!count) {
                                if (head->next == NULL)
                                        break;
-                               tbuf = head;
-                               head = head->next;
-                               tty_buffer_free(tty, tbuf);
+                               tty->buf.head = head->next;
+                               tty_buffer_free(tty, head);
                                continue;
                        }
                        /* Ldisc or user is trying to flush the buffers
@@ -445,9 +443,9 @@ static void flush_to_ldisc(struct work_struct *work)
                                                        flag_buf, count);
                        spin_lock_irqsave(&tty->buf.lock, flags);
                }
-               /* Restore the queue head */
-               tty->buf.head = head;
+               clear_bit(TTY_FLUSHING, &tty->flags);
        }
+
        /* We may have a deferred request to flush the input buffer,
           if so pull the chain under the lock and empty the queue */
        if (test_bit(TTY_FLUSHPENDING, &tty->flags)) {
@@ -455,7 +453,6 @@ static void flush_to_ldisc(struct work_struct *work)
                clear_bit(TTY_FLUSHPENDING, &tty->flags);
                wake_up(&tty->read_wait);
        }
-       clear_bit(TTY_FLUSHING, &tty->flags);
        spin_unlock_irqrestore(&tty->buf.lock, flags);
 
        tty_ldisc_deref(disc);
@@ -471,7 +468,7 @@ static void flush_to_ldisc(struct work_struct *work)
  */
 void tty_flush_to_ldisc(struct tty_struct *tty)
 {
-       flush_to_ldisc(&tty->buf.work.work);
+       flush_delayed_work(&tty->buf.work);
 }
 
 /**
index 50f0176..98dbbda 100644 (file)
@@ -188,14 +188,7 @@ static struct fw_device *target_device(struct sbp2_target *tgt)
 /* Impossible login_id, to detect logout attempt before successful login */
 #define INVALID_LOGIN_ID 0x10000
 
-/*
- * Per section 7.4.8 of the SBP-2 spec, a mgt_ORB_timeout value can be
- * provided in the config rom. Most devices do provide a value, which
- * we'll use for login management orbs, but with some sane limits.
- */
-#define SBP2_MIN_LOGIN_ORB_TIMEOUT     5000U   /* Timeout in ms */
-#define SBP2_MAX_LOGIN_ORB_TIMEOUT     40000U  /* Timeout in ms */
-#define SBP2_ORB_TIMEOUT               2000U   /* Timeout in ms */
+#define SBP2_ORB_TIMEOUT               2000U           /* Timeout in ms */
 #define SBP2_ORB_NULL                  0x80000000
 #define SBP2_RETRY_LIMIT               0xf             /* 15 retries */
 #define SBP2_CYCLE_LIMIT               (0xc8 << 12)    /* 200 125us cycles */
@@ -1034,7 +1027,6 @@ static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory,
 {
        struct fw_csr_iterator ci;
        int key, value;
-       unsigned int timeout;
 
        fw_csr_iterator_init(&ci, directory);
        while (fw_csr_iterator_next(&ci, &key, &value)) {
@@ -1059,17 +1051,7 @@ static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory,
 
                case SBP2_CSR_UNIT_CHARACTERISTICS:
                        /* the timeout value is stored in 500ms units */
-                       timeout = ((unsigned int) value >> 8 & 0xff) * 500;
-                       timeout = max(timeout, SBP2_MIN_LOGIN_ORB_TIMEOUT);
-                       tgt->mgt_orb_timeout =
-                                 min(timeout, SBP2_MAX_LOGIN_ORB_TIMEOUT);
-
-                       if (timeout > tgt->mgt_orb_timeout)
-                               fw_notify("%s: config rom contains %ds "
-                                         "management ORB timeout, limiting "
-                                         "to %ds\n", tgt->bus_id,
-                                         timeout / 1000,
-                                         tgt->mgt_orb_timeout / 1000);
+                       tgt->mgt_orb_timeout = (value >> 8 & 0xff) * 500;
                        break;
 
                case SBP2_CSR_LOGICAL_UNIT_NUMBER:
@@ -1087,6 +1069,22 @@ static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory,
        return 0;
 }
 
+/*
+ * Per section 7.4.8 of the SBP-2 spec, a mgt_ORB_timeout value can be
+ * provided in the config rom. Most devices do provide a value, which
+ * we'll use for login management orbs, but with some sane limits.
+ */
+static void sbp2_clamp_management_orb_timeout(struct sbp2_target *tgt)
+{
+       unsigned int timeout = tgt->mgt_orb_timeout;
+
+       if (timeout > 40000)
+               fw_notify("%s: %ds mgt_ORB_timeout limited to 40s\n",
+                         tgt->bus_id, timeout / 1000);
+
+       tgt->mgt_orb_timeout = clamp_val(timeout, 5000, 40000);
+}
+
 static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model,
                                  u32 firmware_revision)
 {
@@ -1171,6 +1169,7 @@ static int sbp2_probe(struct device *dev)
                               &firmware_revision) < 0)
                goto fail_tgt_put;
 
+       sbp2_clamp_management_orb_timeout(tgt);
        sbp2_init_workarounds(tgt, model, firmware_revision);
 
        /*
index be34d32..7d05c4b 100644 (file)
@@ -1066,7 +1066,7 @@ EXPORT_SYMBOL_GPL(hid_report_raw_event);
  * @type: HID report type (HID_*_REPORT)
  * @data: report contents
  * @size: size of data parameter
- * @interrupt: called from atomic?
+ * @interrupt: distinguish between interrupt and control transfers
  *
  * This is data entry for lower layers.
  */
index b05f602..c40afc5 100644 (file)
@@ -132,12 +132,12 @@ static struct hid_driver twinhan_driver = {
        .input_mapping = twinhan_input_mapping,
 };
 
-static int twinhan_init(void)
+static int __init twinhan_init(void)
 {
        return hid_register_driver(&twinhan_driver);
 }
 
-static void twinhan_exit(void)
+static void __exit twinhan_exit(void)
 {
        hid_unregister_driver(&twinhan_driver);
 }
index ba05275..cdd1369 100644 (file)
@@ -48,10 +48,9 @@ static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count,
        char *report;
        DECLARE_WAITQUEUE(wait, current);
 
-       while (ret == 0) {
-
-               mutex_lock(&list->read_mutex);
+       mutex_lock(&list->read_mutex);
 
+       while (ret == 0) {
                if (list->head == list->tail) {
                        add_wait_queue(&list->hidraw->wait, &wait);
                        set_current_state(TASK_INTERRUPTIBLE);
index 23e76fe..376f1ab 100644 (file)
@@ -130,7 +130,7 @@ struct mapped_device {
        /*
         * A list of ios that arrived while we were suspended.
         */
-       atomic_t pending;
+       atomic_t pending[2];
        wait_queue_head_t wait;
        struct work_struct work;
        struct bio_list deferred;
@@ -453,13 +453,14 @@ static void start_io_acct(struct dm_io *io)
 {
        struct mapped_device *md = io->md;
        int cpu;
+       int rw = bio_data_dir(io->bio);
 
        io->start_time = jiffies;
 
        cpu = part_stat_lock();
        part_round_stats(cpu, &dm_disk(md)->part0);
        part_stat_unlock();
-       dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
+       dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]);
 }
 
 static void end_io_acct(struct dm_io *io)
@@ -479,8 +480,9 @@ static void end_io_acct(struct dm_io *io)
         * After this is decremented the bio must not be touched if it is
         * a barrier.
         */
-       dm_disk(md)->part0.in_flight = pending =
-               atomic_dec_return(&md->pending);
+       dm_disk(md)->part0.in_flight[rw] = pending =
+               atomic_dec_return(&md->pending[rw]);
+       pending += atomic_read(&md->pending[rw^0x1]);
 
        /* nudge anyone waiting on suspend queue */
        if (!pending)
@@ -1785,7 +1787,8 @@ static struct mapped_device *alloc_dev(int minor)
        if (!md->disk)
                goto bad_disk;
 
-       atomic_set(&md->pending, 0);
+       atomic_set(&md->pending[0], 0);
+       atomic_set(&md->pending[1], 0);
        init_waitqueue_head(&md->wait);
        INIT_WORK(&md->work, dm_wq_work);
        init_waitqueue_head(&md->eventq);
@@ -2088,7 +2091,8 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
                                break;
                        }
                        spin_unlock_irqrestore(q->queue_lock, flags);
-               } else if (!atomic_read(&md->pending))
+               } else if (!atomic_read(&md->pending[0]) &&
+                                       !atomic_read(&md->pending[1]))
                        break;
 
                if (interruptible == TASK_INTERRUPTIBLE &&
index e424cf6..e832e97 100644 (file)
@@ -480,7 +480,6 @@ static int
 add_children(struct twl4030_platform_data *pdata, unsigned long features)
 {
        struct device   *child;
-       struct device   *usb_transceiver = NULL;
 
        if (twl_has_bci() && pdata->bci && !(features & TPS_SUBSET)) {
                child = add_child(3, "twl4030_bci",
@@ -532,16 +531,61 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
        }
 
        if (twl_has_usb() && pdata->usb) {
+
+               static struct regulator_consumer_supply usb1v5 = {
+                       .supply =       "usb1v5",
+               };
+               static struct regulator_consumer_supply usb1v8 = {
+                       .supply =       "usb1v8",
+               };
+               static struct regulator_consumer_supply usb3v1 = {
+                       .supply =       "usb3v1",
+               };
+
+       /* First add the regulators so that they can be used by transceiver */
+               if (twl_has_regulator()) {
+                       /* this is a template that gets copied */
+                       struct regulator_init_data usb_fixed = {
+                               .constraints.valid_modes_mask =
+                                       REGULATOR_MODE_NORMAL
+                                       | REGULATOR_MODE_STANDBY,
+                               .constraints.valid_ops_mask =
+                                       REGULATOR_CHANGE_MODE
+                                       | REGULATOR_CHANGE_STATUS,
+                       };
+
+                       child = add_regulator_linked(TWL4030_REG_VUSB1V5,
+                                                     &usb_fixed, &usb1v5, 1);
+                       if (IS_ERR(child))
+                               return PTR_ERR(child);
+
+                       child = add_regulator_linked(TWL4030_REG_VUSB1V8,
+                                                     &usb_fixed, &usb1v8, 1);
+                       if (IS_ERR(child))
+                               return PTR_ERR(child);
+
+                       child = add_regulator_linked(TWL4030_REG_VUSB3V1,
+                                                     &usb_fixed, &usb3v1, 1);
+                       if (IS_ERR(child))
+                               return PTR_ERR(child);
+
+               }
+
                child = add_child(0, "twl4030_usb",
                                pdata->usb, sizeof(*pdata->usb),
                                true,
                                /* irq0 = USB_PRES, irq1 = USB */
                                pdata->irq_base + 8 + 2, pdata->irq_base + 4);
+
                if (IS_ERR(child))
                        return PTR_ERR(child);
 
                /* we need to connect regulators to this transceiver */
-               usb_transceiver = child;
+               if (twl_has_regulator() && child) {
+                       usb1v5.dev = child;
+                       usb1v8.dev = child;
+                       usb3v1.dev = child;
+               }
        }
 
        if (twl_has_watchdog()) {
@@ -580,47 +624,6 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
                        return PTR_ERR(child);
        }
 
-       if (twl_has_regulator() && usb_transceiver) {
-               static struct regulator_consumer_supply usb1v5 = {
-                       .supply =       "usb1v5",
-               };
-               static struct regulator_consumer_supply usb1v8 = {
-                       .supply =       "usb1v8",
-               };
-               static struct regulator_consumer_supply usb3v1 = {
-                       .supply =       "usb3v1",
-               };
-
-               /* this is a template that gets copied */
-               struct regulator_init_data usb_fixed = {
-                       .constraints.valid_modes_mask =
-                                 REGULATOR_MODE_NORMAL
-                               | REGULATOR_MODE_STANDBY,
-                       .constraints.valid_ops_mask =
-                                 REGULATOR_CHANGE_MODE
-                               | REGULATOR_CHANGE_STATUS,
-               };
-
-               usb1v5.dev = usb_transceiver;
-               usb1v8.dev = usb_transceiver;
-               usb3v1.dev = usb_transceiver;
-
-               child = add_regulator_linked(TWL4030_REG_VUSB1V5, &usb_fixed,
-                               &usb1v5, 1);
-               if (IS_ERR(child))
-                       return PTR_ERR(child);
-
-               child = add_regulator_linked(TWL4030_REG_VUSB1V8, &usb_fixed,
-                               &usb1v8, 1);
-               if (IS_ERR(child))
-                       return PTR_ERR(child);
-
-               child = add_regulator_linked(TWL4030_REG_VUSB3V1, &usb_fixed,
-                               &usb3v1, 1);
-               if (IS_ERR(child))
-                       return PTR_ERR(child);
-       }
-
        /* maybe add LDOs that are omitted on cost-reduced parts */
        if (twl_has_regulator() && !(features & TPS_SUBSET)) {
                child = add_regulator(TWL4030_REG_VPLL2, pdata->vpll2);
index 5e0b152..b00d673 100644 (file)
@@ -693,7 +693,7 @@ static int pxamci_probe(struct platform_device *pdev)
        if (gpio_is_valid(gpio_ro)) {
                ret = gpio_request(gpio_ro, "mmc card read only");
                if (ret) {
-                       dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_power);
+                       dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
                        goto err_gpio_ro;
                }
                gpio_direction_input(gpio_ro);
@@ -701,7 +701,7 @@ static int pxamci_probe(struct platform_device *pdev)
        if (gpio_is_valid(gpio_cd)) {
                ret = gpio_request(gpio_cd, "mmc card detect");
                if (ret) {
-                       dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_power);
+                       dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd);
                        goto err_gpio_cd;
                }
                gpio_direction_input(gpio_cd);
index 9693b0f..0bd898c 100644 (file)
@@ -16,6 +16,7 @@
 
 #include <linux/module.h>
 #include <linux/kernel.h>
+#include <linux/capability.h>
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <linux/string.h>
index 83da596..58c6681 100644 (file)
@@ -18,6 +18,7 @@
 
 #include <linux/module.h>
 #include <linux/kernel.h>
+#include <linux/capability.h>
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <linux/fcntl.h>
index a52f29c..f1340fa 100644 (file)
@@ -16,6 +16,7 @@
 
 #include <linux/module.h>
 #include <linux/kernel.h>
+#include <linux/capability.h>
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <linux/fcntl.h>
index 2b7ae36..5df60a6 100644 (file)
@@ -35,12 +35,23 @@ static size_t buffer_pos;
 /* atomic_t because wait_event checks it outside of buffer_mutex */
 static atomic_t buffer_ready = ATOMIC_INIT(0);
 
-/* Add an entry to the event buffer. When we
- * get near to the end we wake up the process
- * sleeping on the read() of the file.
+/*
+ * Add an entry to the event buffer. When we get near to the end we
+ * wake up the process sleeping on the read() of the file. To protect
+ * the event_buffer this function may only be called when buffer_mutex
+ * is set.
  */
 void add_event_entry(unsigned long value)
 {
+       /*
+        * This shouldn't happen since all workqueues or handlers are
+        * canceled or flushed before the event buffer is freed.
+        */
+       if (!event_buffer) {
+               WARN_ON_ONCE(1);
+               return;
+       }
+
        if (buffer_pos == buffer_size) {
                atomic_inc(&oprofile_stats.event_lost_overflow);
                return;
@@ -69,7 +80,6 @@ void wake_up_buffer_waiter(void)
 
 int alloc_event_buffer(void)
 {
-       int err = -ENOMEM;
        unsigned long flags;
 
        spin_lock_irqsave(&oprofilefs_lock, flags);
@@ -80,21 +90,22 @@ int alloc_event_buffer(void)
        if (buffer_watershed >= buffer_size)
                return -EINVAL;
 
+       buffer_pos = 0;
        event_buffer = vmalloc(sizeof(unsigned long) * buffer_size);
        if (!event_buffer)
-               goto out;
+               return -ENOMEM;
 
-       err = 0;
-out:
-       return err;
+       return 0;
 }
 
 
 void free_event_buffer(void)
 {
+       mutex_lock(&buffer_mutex);
        vfree(event_buffer);
-
+       buffer_pos = 0;
        event_buffer = NULL;
+       mutex_unlock(&buffer_mutex);
 }
 
 
@@ -167,6 +178,12 @@ static ssize_t event_buffer_read(struct file *file, char __user *buf,
 
        mutex_lock(&buffer_mutex);
 
+       /* May happen if the buffer is freed during pending reads. */
+       if (!event_buffer) {
+               retval = -EINTR;
+               goto out;
+       }
+
        atomic_set(&buffer_ready, 0);
 
        retval = -EFAULT;
index 14bbaa1..22b02c6 100644 (file)
@@ -354,6 +354,7 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
        struct acpi_dmar_hardware_unit *drhd;
        struct acpi_dmar_reserved_memory *rmrr;
        struct acpi_dmar_atsr *atsr;
+       struct acpi_dmar_rhsa *rhsa;
 
        switch (header->type) {
        case ACPI_DMAR_TYPE_HARDWARE_UNIT:
@@ -375,6 +376,12 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
                atsr = container_of(header, struct acpi_dmar_atsr, header);
                printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
                break;
+       case ACPI_DMAR_HARDWARE_AFFINITY:
+               rhsa = container_of(header, struct acpi_dmar_rhsa, header);
+               printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
+                      (unsigned long long)rhsa->base_address,
+                      rhsa->proximity_domain);
+               break;
        }
 }
 
@@ -459,9 +466,13 @@ parse_dmar_table(void)
                        ret = dmar_parse_one_atsr(entry_header);
 #endif
                        break;
+               case ACPI_DMAR_HARDWARE_AFFINITY:
+                       /* We don't do anything with RHSA (yet?) */
+                       break;
                default:
                        printk(KERN_WARNING PREFIX
-                               "Unknown DMAR structure type\n");
+                               "Unknown DMAR structure type %d\n",
+                               entry_header->type);
                        ret = 0; /* for forward compatibility */
                        break;
                }
index 5383600..9c6a9fd 100644 (file)
@@ -32,6 +32,7 @@
 #include <asm/io.h>            /* for read? and write? functions */
 #include <linux/delay.h>       /* for delays */
 #include <linux/mutex.h>
+#include <linux/sched.h>       /* for signal_pending() */
 
 #define MY_NAME        "cpqphp"
 
index 855dd7c..b1e97e6 100644 (file)
@@ -48,6 +48,7 @@
 
 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
+#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
 
 #define IOAPIC_RANGE_START     (0xfee00000)
 #define IOAPIC_RANGE_END       (0xfeefffff)
@@ -94,6 +95,7 @@ static inline unsigned long virt_to_dma_pfn(void *p)
 /* global iommu list, set NULL for ignored DMAR units */
 static struct intel_iommu **g_iommus;
 
+static void __init check_tylersburg_isoch(void);
 static int rwbf_quirk;
 
 /*
@@ -1934,6 +1936,9 @@ error:
 }
 
 static int iommu_identity_mapping;
+#define IDENTMAP_ALL           1
+#define IDENTMAP_GFX           2
+#define IDENTMAP_AZALIA                4
 
 static int iommu_domain_identity_map(struct dmar_domain *domain,
                                     unsigned long long start,
@@ -2151,8 +2156,14 @@ static int domain_add_dev_info(struct dmar_domain *domain,
 
 static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
 {
-       if (iommu_identity_mapping == 2)
-               return IS_GFX_DEVICE(pdev);
+       if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
+               return 1;
+
+       if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
+               return 1;
+
+       if (!(iommu_identity_mapping & IDENTMAP_ALL))
+               return 0;
 
        /*
         * We want to start off with all devices in the 1:1 domain, and
@@ -2332,11 +2343,14 @@ int __init init_dmars(void)
        }
 
        if (iommu_pass_through)
-               iommu_identity_mapping = 1;
+               iommu_identity_mapping |= IDENTMAP_ALL;
+
 #ifdef CONFIG_DMAR_BROKEN_GFX_WA
-       else
-               iommu_identity_mapping = 2;
+       iommu_identity_mapping |= IDENTMAP_GFX;
 #endif
+
+       check_tylersburg_isoch();
+
        /*
         * If pass through is not set or not enabled, setup context entries for
         * identity mappings for rmrr, gfx, and isa and may fall back to static
@@ -3670,3 +3684,61 @@ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
 }
 
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
+
+/* On Tylersburg chipsets, some BIOSes have been known to enable the
+   ISOCH DMAR unit for the Azalia sound device, but not give it any
+   TLB entries, which causes it to deadlock. Check for that.  We do
+   this in a function called from init_dmars(), instead of in a PCI
+   quirk, because we don't want to print the obnoxious "BIOS broken"
+   message if VT-d is actually disabled.
+*/
+static void __init check_tylersburg_isoch(void)
+{
+       struct pci_dev *pdev;
+       uint32_t vtisochctrl;
+
+       /* If there's no Azalia in the system anyway, forget it. */
+       pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
+       if (!pdev)
+               return;
+       pci_dev_put(pdev);
+
+       /* System Management Registers. Might be hidden, in which case
+          we can't do the sanity check. But that's OK, because the
+          known-broken BIOSes _don't_ actually hide it, so far. */
+       pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
+       if (!pdev)
+               return;
+
+       if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
+               pci_dev_put(pdev);
+               return;
+       }
+
+       pci_dev_put(pdev);
+
+       /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
+       if (vtisochctrl & 1)
+               return;
+
+       /* Drop all bits other than the number of TLB entries */
+       vtisochctrl &= 0x1c;
+
+       /* If we have the recommended number of TLB entries (16), fine. */
+       if (vtisochctrl == 0x10)
+               return;
+
+       /* Zero TLB entries? You get to ride the short bus to school. */
+       if (!vtisochctrl) {
+               WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
+                    "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+                    dmi_get_system_info(DMI_BIOS_VENDOR),
+                    dmi_get_system_info(DMI_BIOS_VERSION),
+                    dmi_get_system_info(DMI_PRODUCT_VERSION));
+               iommu_identity_mapping |= IDENTMAP_AZALIA;
+               return;
+       }
+       
+       printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
+              vtisochctrl);
+}
index 6edecff..4e4c295 100644 (file)
@@ -513,7 +513,11 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
        else if (state == PCI_D2 || dev->current_state == PCI_D2)
                udelay(PCI_PM_D2_DELAY);
 
-       dev->current_state = state;
+       pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+       dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
+       if (dev->current_state != state && printk_ratelimit())
+               dev_info(&dev->dev, "Refused to change power state, "
+                       "currently in D%d\n", dev->current_state);
 
        /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
         * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
@@ -2542,10 +2546,10 @@ int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
 
 /**
  * pci_set_vga_state - set VGA decode state on device and parents if requested
- * @dev the PCI device
- * @decode - true = enable decoding, false = disable decoding
- * @command_bits PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
- * @change_bridge - traverse ancestors and change bridges
+ * @dev: the PCI device
+ * @decode: true = enable decoding, false = disable decoding
+ * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
+ * @change_bridge: traverse ancestors and change bridges
  */
 int pci_set_vga_state(struct pci_dev *dev, bool decode,
                      unsigned int command_bits, bool change_bridge)
@@ -2719,17 +2723,6 @@ int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
        return 1;
 }
 
-static int __devinit pci_init(void)
-{
-       struct pci_dev *dev = NULL;
-
-       while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
-               pci_fixup_device(pci_fixup_final, dev);
-       }
-
-       return 0;
-}
-
 static int __init pci_setup(char *str)
 {
        while (str) {
@@ -2767,8 +2760,6 @@ static int __init pci_setup(char *str)
 }
 early_param("pci", pci_setup);
 
-device_initcall(pci_init);
-
 EXPORT_SYMBOL(pci_reenable_device);
 EXPORT_SYMBOL(pci_enable_device_io);
 EXPORT_SYMBOL(pci_enable_device_mem);
index d49ecc9..40c3cc5 100644 (file)
@@ -53,7 +53,7 @@ static struct pci_error_handlers aer_error_handlers = {
 
 static struct pcie_port_service_driver aerdriver = {
        .name           = "aer",
-       .port_type      = PCIE_ANY_PORT,
+       .port_type      = PCIE_RC_PORT,
        .service        = PCIE_PORT_SERVICE_AER,
 
        .probe          = aer_probe,
index 6df5c98..f635e47 100644 (file)
@@ -30,7 +30,6 @@ MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE("GPL");
 
 /* global data */
-static const char device_name[] = "pcieport-driver";
 
 static int pcie_portdrv_restore_config(struct pci_dev *dev)
 {
@@ -262,7 +261,7 @@ static struct pci_error_handlers pcie_portdrv_err_handler = {
 };
 
 static struct pci_driver pcie_portdriver = {
-       .name           = (char *)device_name,
+       .name           = "pcieport",
        .id_table       = &port_pci_ids[0],
 
        .probe          = pcie_portdrv_probe,
index 6099fac..a790b17 100644 (file)
@@ -670,6 +670,25 @@ static void __devinit quirk_vt8235_acpi(struct pci_dev *dev)
 }
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,    PCI_DEVICE_ID_VIA_8235, quirk_vt8235_acpi);
 
+/*
+ * TI XIO2000a PCIe-PCI Bridge erroneously reports it supports fast back-to-back:
+ *     Disable fast back-to-back on the secondary bus segment
+ */
+static void __devinit quirk_xio2000a(struct pci_dev *dev)
+{
+       struct pci_dev *pdev;
+       u16 command;
+
+       dev_warn(&dev->dev, "TI XIO2000a quirk detected; "
+               "secondary bus fast back-to-back transfers disabled\n");
+       list_for_each_entry(pdev, &dev->subordinate->devices, bus_list) {
+               pci_read_config_word(pdev, PCI_COMMAND, &command);
+               if (command & PCI_COMMAND_FAST_BACK)
+                       pci_write_config_word(pdev, PCI_COMMAND, command & ~PCI_COMMAND_FAST_BACK);
+       }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A,
+                       quirk_xio2000a);
 
 #ifdef CONFIG_X86_IO_APIC 
 
@@ -2572,6 +2591,19 @@ void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
        }
        pci_do_fixups(dev, start, end);
 }
+
+static int __init pci_apply_final_quirks(void)
+{
+       struct pci_dev *dev = NULL;
+
+       while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+               pci_fixup_device(pci_fixup_final, dev);
+       }
+
+       return 0;
+}
+
+fs_initcall_sync(pci_apply_final_quirks);
 #else
 void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {}
 #endif
index cb1a027..0959430 100644 (file)
@@ -299,8 +299,17 @@ static struct resource *find_free_bus_resource(struct pci_bus *bus, unsigned lon
                r = bus->resource[i];
                if (r == &ioport_resource || r == &iomem_resource)
                        continue;
-               if (r && (r->flags & type_mask) == type && !r->parent)
-                       return r;
+               if (r && (r->flags & type_mask) == type) {
+                       if (!r->parent)
+                               return r;
+                       /*
+                        * if there is no child under that, we should release
+                        * and use it. don't need to reset it, pbus_size_* will
+                        * set it again
+                        */
+                       if (!r->child && !release_resource(r))
+                               return r;
+               }
        }
        return NULL;
 }
index 706f82d..c54526b 100644 (file)
@@ -205,43 +205,6 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
        return ret;
 }
 
-#if 0
-int pci_assign_resource_fixed(struct pci_dev *dev, int resno)
-{
-       struct pci_bus *bus = dev->bus;
-       struct resource *res = dev->resource + resno;
-       unsigned int type_mask;
-       int i, ret = -EBUSY;
-
-       type_mask = IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH;
-
-       for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
-               struct resource *r = bus->resource[i];
-               if (!r)
-                       continue;
-
-               /* type_mask must match */
-               if ((res->flags ^ r->flags) & type_mask)
-                       continue;
-
-               ret = request_resource(r, res);
-
-               if (ret == 0)
-                       break;
-       }
-
-       if (ret) {
-               dev_err(&dev->dev, "BAR %d: can't allocate %s resource %pR\n",
-                       resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res);
-       } else if (resno < PCI_BRIDGE_RESOURCES) {
-               pci_update_resource(dev, resno);
-       }
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(pci_assign_resource_fixed);
-#endif
-
 /* Sort resources by alignment */
 void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
 {
index 53b8c25..aaccc8e 100644 (file)
@@ -2533,6 +2533,7 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
 {
        struct dasd_ccw_req *cqr;
        struct ccw1 *ccw;
+       unsigned long *idaw;
 
        cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device);
 
@@ -2546,9 +2547,17 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
 
        ccw = cqr->cpaddr;
        ccw->cmd_code = CCW_CMD_RDC;
-       ccw->cda = (__u32)(addr_t)rdc_buffer;
-       ccw->count = rdc_buffer_size;
+       if (idal_is_needed(rdc_buffer, rdc_buffer_size)) {
+               idaw = (unsigned long *) (cqr->data);
+               ccw->cda = (__u32)(addr_t) idaw;
+               ccw->flags = CCW_FLAG_IDA;
+               idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size);
+       } else {
+               ccw->cda = (__u32)(addr_t) rdc_buffer;
+               ccw->flags = 0;
+       }
 
+       ccw->count = rdc_buffer_size;
        cqr->startdev = device;
        cqr->memdev = device;
        cqr->expires = 10*HZ;
index 0be7c15..417b97c 100644 (file)
@@ -3216,6 +3216,7 @@ int dasd_eckd_restore_device(struct dasd_device *device)
        struct dasd_eckd_characteristics temp_rdc_data;
        int is_known, rc;
        struct dasd_uid temp_uid;
+       unsigned long flags;
 
        private = (struct dasd_eckd_private *) device->private;
 
@@ -3228,7 +3229,8 @@ int dasd_eckd_restore_device(struct dasd_device *device)
        rc = dasd_eckd_generate_uid(device, &private->uid);
        dasd_get_uid(device->cdev, &temp_uid);
        if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0)
-               dev_err(&device->cdev->dev, "The UID of the DASD has changed\n");
+               dev_err(&device->cdev->dev, "The UID of the DASD has "
+                       "changed\n");
        if (rc)
                goto out_err;
        dasd_set_uid(device->cdev, &private->uid);
@@ -3256,9 +3258,9 @@ int dasd_eckd_restore_device(struct dasd_device *device)
                          "device: %s", rc, dev_name(&device->cdev->dev));
                goto out_err;
        }
-       spin_lock(get_ccwdev_lock(device->cdev));
+       spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
        memcpy(&private->rdc_data, &temp_rdc_data, sizeof(temp_rdc_data));
-       spin_unlock(get_ccwdev_lock(device->cdev));
+       spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
 
        /* add device to alias management */
        dasd_alias_add_device(device);
index daaec18..a4f68e5 100644 (file)
@@ -62,7 +62,7 @@ static struct notifier_block call_home_panic_nb = {
        .priority = INT_MAX,
 };
 
-static int proc_handler_callhome(ctl_table *ctl, int write, struct file *filp,
+static int proc_handler_callhome(struct ctl_table *ctl, int write,
                                 void __user *buffer, size_t *count,
                                 loff_t *ppos)
 {
@@ -100,7 +100,7 @@ static struct ctl_table callhome_table[] = {
        {
                .procname       = "callhome",
                .mode           = 0644,
-               .proc_handler   = &proc_handler_callhome,
+               .proc_handler   = proc_handler_callhome,
        },
        { .ctl_name = 0 }
 };
index 178724f..b9d2a00 100644 (file)
@@ -705,21 +705,6 @@ out_driver:
 }
 __initcall(sclp_vt220_tty_init);
 
-#ifdef CONFIG_SCLP_VT220_CONSOLE
-
-static void
-sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count)
-{
-       __sclp_vt220_write((const unsigned char *) buf, count, 1, 1, 0);
-}
-
-static struct tty_driver *
-sclp_vt220_con_device(struct console *c, int *index)
-{
-       *index = 0;
-       return sclp_vt220_driver;
-}
-
 static void __sclp_vt220_flush_buffer(void)
 {
        unsigned long flags;
@@ -776,6 +761,21 @@ static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
        }
 }
 
+#ifdef CONFIG_SCLP_VT220_CONSOLE
+
+static void
+sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count)
+{
+       __sclp_vt220_write((const unsigned char *) buf, count, 1, 1, 0);
+}
+
+static struct tty_driver *
+sclp_vt220_con_device(struct console *c, int *index)
+{
+       *index = 0;
+       return sclp_vt220_driver;
+}
+
 static int
 sclp_vt220_notify(struct notifier_block *self,
                          unsigned long event, void *data)
index 64f57ef..0c0705b 100644 (file)
@@ -162,9 +162,10 @@ tapeblock_requeue(struct work_struct *work) {
        spin_lock_irq(&device->blk_data.request_queue_lock);
        while (
                !blk_queue_plugged(queue) &&
-               (req = blk_fetch_request(queue)) &&
+               blk_peek_request(queue) &&
                nr_queued < TAPEBLOCK_MIN_REQUEUE
        ) {
+               req = blk_fetch_request(queue);
                if (rq_data_dir(req) == WRITE) {
                        DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
                        spin_unlock_irq(&device->blk_data.request_queue_lock);
index 2ee093e..2490b74 100644 (file)
@@ -1250,8 +1250,7 @@ static int io_subchannel_probe(struct subchannel *sch)
        unsigned long flags;
        struct ccw_dev_id dev_id;
 
-       cdev = sch_get_cdev(sch);
-       if (cdev) {
+       if (cio_is_console(sch->schid)) {
                rc = sysfs_create_group(&sch->dev.kobj,
                                        &io_subchannel_attr_group);
                if (rc)
@@ -1260,13 +1259,13 @@ static int io_subchannel_probe(struct subchannel *sch)
                                      "0.%x.%04x (rc=%d)\n",
                                      sch->schid.ssid, sch->schid.sch_no, rc);
                /*
-                * This subchannel already has an associated ccw_device.
+                * The console subchannel already has an associated ccw_device.
                 * Throw the delayed uevent for the subchannel, register
-                * the ccw_device and exit. This happens for all early
-                * devices, e.g. the console.
+                * the ccw_device and exit.
                 */
                dev_set_uevent_suppress(&sch->dev, 0);
                kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+               cdev = sch_get_cdev(sch);
                cdev->dev.groups = ccwdev_attr_groups;
                device_initialize(&cdev->dev);
                ccw_device_register(cdev);
index 1689bda..dcc7244 100644 (file)
@@ -1270,6 +1270,9 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
 
        BUG_ON(!kernel_locked());
 
+       if (!state)
+               return;
+
        uport = state->uart_port;
        port = &state->port;
 
@@ -1316,9 +1319,9 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
         */
        if (port->flags & ASYNC_INITIALIZED) {
                unsigned long flags;
-               spin_lock_irqsave(&port->lock, flags);
+               spin_lock_irqsave(&uport->lock, flags);
                uport->ops->stop_rx(uport);
-               spin_unlock_irqrestore(&port->lock, flags);
+               spin_unlock_irqrestore(&uport->lock, flags);
                /*
                 * Before we drop DTR, make sure the UART transmitter
                 * has completely drained; this is especially
index 958a3ff..ff5bbb9 100644 (file)
@@ -1826,7 +1826,7 @@ static struct amba_id pl022_ids[] = {
                 * ST Micro derivative, this has 32bit wide
                 * and 32 locations deep TX/RX FIFO
                 */
-               .id     = 0x00108022,
+               .id     = 0x01080022,
                .mask   = 0xffffffff,
                .data   = &vendor_st,
        },
index 760e727..b84abd8 100644 (file)
@@ -9,7 +9,7 @@ comment "Enable Host or Gadget support to see Inventra options"
 # (M)HDRC = (Multipoint) Highspeed Dual-Role Controller
 config USB_MUSB_HDRC
        depends on (USB || USB_GADGET)
-       depends on !SUPERH
+       depends on (ARM || BLACKFIN)
        select NOP_USB_XCEIV if ARCH_DAVINCI
        select TWL4030_USB if MACH_OMAP_3430SDP
        select NOP_USB_XCEIV if MACH_OMAP3EVM
index 1e8f02f..d3c824d 100644 (file)
@@ -206,7 +206,7 @@ static int __devinit riowd_probe(struct of_device *op,
 
        dev_set_drvdata(&op->dev, p);
        riowd_device = p;
-       err = 0;
+       return 0;
 
 out_iounmap:
        of_iounmap(&op->resource[0], p->regs, 2);
index 72743d3..7a520a8 100644 (file)
@@ -2321,7 +2321,18 @@ static int ext3_commit_super(struct super_block *sb,
 
        if (!sbh)
                return error;
-       es->s_wtime = cpu_to_le32(get_seconds());
+       /*
+        * If the file system is mounted read-only, don't update the
+        * superblock write time.  This avoids updating the superblock
+        * write time when we are mounting the root file system
+        * read/only but we need to replay the journal; at that point,
+        * for people who are east of GMT and who make their clock
+        * tick in localtime for Windows bug-for-bug compatibility,
+        * the clock is set in the future, and this will cause e2fsck
+        * to complain and force a full file system check.
+        */
+       if (!(sb->s_flags & MS_RDONLY))
+               es->s_wtime = cpu_to_le32(get_seconds());
        es->s_free_blocks_count = cpu_to_le32(ext3_count_free_blocks(sb));
        es->s_free_inodes_count = cpu_to_le32(ext3_count_free_inodes(sb));
        BUFFER_TRACE(sbh, "marking dirty");
index 6dabf6f..a2c18ac 100644 (file)
@@ -1848,8 +1848,8 @@ nfs_compare_remount_data(struct nfs_server *nfss,
            data->timeo != (10U * nfss->client->cl_timeout->to_initval / HZ) ||
            data->nfs_server.port != nfss->port ||
            data->nfs_server.addrlen != nfss->nfs_client->cl_addrlen ||
-           !rpc_cmp_addr(&data->nfs_server.address,
-                   &nfss->nfs_client->cl_addr))
+           !rpc_cmp_addr((struct sockaddr *)&data->nfs_server.address,
+                         (struct sockaddr *)&nfss->nfs_client->cl_addr))
                return -EINVAL;
 
        return 0;
index f38fee0..7b685e1 100644 (file)
@@ -248,11 +248,19 @@ ssize_t part_stat_show(struct device *dev,
                part_stat_read(p, merges[WRITE]),
                (unsigned long long)part_stat_read(p, sectors[WRITE]),
                jiffies_to_msecs(part_stat_read(p, ticks[WRITE])),
-               p->in_flight,
+               part_in_flight(p),
                jiffies_to_msecs(part_stat_read(p, io_ticks)),
                jiffies_to_msecs(part_stat_read(p, time_in_queue)));
 }
 
+ssize_t part_inflight_show(struct device *dev,
+                       struct device_attribute *attr, char *buf)
+{
+       struct hd_struct *p = dev_to_part(dev);
+
+       return sprintf(buf, "%8u %8u\n", p->in_flight[0], p->in_flight[1]);
+}
+
 #ifdef CONFIG_FAIL_MAKE_REQUEST
 ssize_t part_fail_show(struct device *dev,
                       struct device_attribute *attr, char *buf)
@@ -281,6 +289,7 @@ static DEVICE_ATTR(start, S_IRUGO, part_start_show, NULL);
 static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
 static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL);
 static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
+static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL);
 #ifdef CONFIG_FAIL_MAKE_REQUEST
 static struct device_attribute dev_attr_fail =
        __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store);
@@ -292,6 +301,7 @@ static struct attribute *part_attrs[] = {
        &dev_attr_size.attr,
        &dev_attr_alignment_offset.attr,
        &dev_attr_stat.attr,
+       &dev_attr_inflight.attr,
 #ifdef CONFIG_FAIL_MAKE_REQUEST
        &dev_attr_fail.attr,
 #endif
index 2511904..221cecd 100644 (file)
@@ -1172,11 +1172,7 @@ static inline void put_dev_sector(Sector p)
 }
 
 struct work_struct;
-struct delayed_work;
 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
-int kblockd_schedule_delayed_work(struct request_queue *q,
-                                       struct delayed_work *work,
-                                       unsigned long delay);
 
 #define MODULE_ALIAS_BLOCKDEV(major,minor) \
        MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
index 4ec5e67..d117704 100644 (file)
@@ -144,7 +144,7 @@ extern char                 *trace_profile_buf_nmi;
 #define MAX_FILTER_STR_VAL     256     /* Should handle KSYM_SYMBOL_LEN */
 
 extern void destroy_preds(struct ftrace_event_call *call);
-extern int filter_match_preds(struct ftrace_event_call *call, void *rec);
+extern int filter_match_preds(struct event_filter *filter, void *rec);
 extern int filter_current_check_discard(struct ring_buffer *buffer,
                                        struct ftrace_event_call *call,
                                        void *rec,
@@ -186,4 +186,13 @@ do {                                                                       \
                __trace_printk(ip, fmt, ##args);                        \
 } while (0)
 
+#ifdef CONFIG_EVENT_PROFILE
+struct perf_event;
+extern int ftrace_profile_enable(int event_id);
+extern void ftrace_profile_disable(int event_id);
+extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
+                                    char *filter_str);
+extern void ftrace_profile_free_filter(struct perf_event *event);
+#endif
+
 #endif /* _LINUX_FTRACE_EVENT_H */
index 7beaa21..297df45 100644 (file)
@@ -98,7 +98,7 @@ struct hd_struct {
        int make_it_fail;
 #endif
        unsigned long stamp;
-       int in_flight;
+       int in_flight[2];
 #ifdef CONFIG_SMP
        struct disk_stats *dkstats;
 #else
@@ -322,18 +322,23 @@ static inline void free_part_stats(struct hd_struct *part)
 #define part_stat_sub(cpu, gendiskp, field, subnd)                     \
        part_stat_add(cpu, gendiskp, field, -subnd)
 
-static inline void part_inc_in_flight(struct hd_struct *part)
+static inline void part_inc_in_flight(struct hd_struct *part, int rw)
 {
-       part->in_flight++;
+       part->in_flight[rw]++;
        if (part->partno)
-               part_to_disk(part)->part0.in_flight++;
+               part_to_disk(part)->part0.in_flight[rw]++;
 }
 
-static inline void part_dec_in_flight(struct hd_struct *part)
+static inline void part_dec_in_flight(struct hd_struct *part, int rw)
 {
-       part->in_flight--;
+       part->in_flight[rw]--;
        if (part->partno)
-               part_to_disk(part)->part0.in_flight--;
+               part_to_disk(part)->part0.in_flight[rw]--;
+}
+
+static inline int part_in_flight(struct hd_struct *part)
+{
+       return part->in_flight[0] + part->in_flight[1];
 }
 
 /* block/blk-core.c */
@@ -546,6 +551,8 @@ extern ssize_t part_size_show(struct device *dev,
                              struct device_attribute *attr, char *buf);
 extern ssize_t part_stat_show(struct device *dev,
                              struct device_attribute *attr, char *buf);
+extern ssize_t part_inflight_show(struct device *dev,
+                             struct device_attribute *attr, char *buf);
 #ifdef CONFIG_FAIL_MAKE_REQUEST
 extern ssize_t part_fail_show(struct device *dev,
                              struct device_attribute *attr, char *buf);
index d3cd23f..f4e3184 100644 (file)
@@ -659,6 +659,12 @@ extern int do_sysinfo(struct sysinfo *info);
 
 #endif /* __KERNEL__ */
 
+#ifndef __EXPORTED_HEADERS__
+#ifndef __KERNEL__
+#warning Attempt to use kernel headers from user space, see http://kernelnewbies.org/KernelHeaders
+#endif /* __KERNEL__ */
+#endif /* __EXPORTED_HEADERS__ */
+
 #define SI_LOAD_SHIFT  16
 struct sysinfo {
        long uptime;                    /* Seconds since boot */
index da1fda8..f490e7a 100644 (file)
 #define PCI_DEVICE_ID_TI_X515          0x8036
 #define PCI_DEVICE_ID_TI_XX12          0x8039
 #define PCI_DEVICE_ID_TI_XX12_FM       0x803b
+#define PCI_DEVICE_ID_TI_XIO2000A      0x8231
 #define PCI_DEVICE_ID_TI_1130          0xac12
 #define PCI_DEVICE_ID_TI_1031          0xac13
 #define PCI_DEVICE_ID_TI_1131          0xac15
index 7b7fbf4..91a2b43 100644 (file)
@@ -225,6 +225,7 @@ struct perf_counter_attr {
 #define PERF_COUNTER_IOC_RESET         _IO ('$', 3)
 #define PERF_COUNTER_IOC_PERIOD                _IOW('$', 4, u64)
 #define PERF_COUNTER_IOC_SET_OUTPUT    _IO ('$', 5)
+#define PERF_COUNTER_IOC_SET_FILTER    _IOW('$', 6, char *)
 
 enum perf_counter_ioc_flags {
        PERF_IOC_FLAG_GROUP             = 1U << 0,
index 2e6d95f..df9d964 100644 (file)
@@ -221,6 +221,7 @@ struct perf_event_attr {
 #define PERF_EVENT_IOC_RESET           _IO ('$', 3)
 #define PERF_EVENT_IOC_PERIOD          _IOW('$', 4, u64)
 #define PERF_EVENT_IOC_SET_OUTPUT      _IO ('$', 5)
+#define PERF_EVENT_IOC_SET_FILTER      _IOW('$', 6, char *)
 
 enum perf_event_ioc_flags {
        PERF_IOC_FLAG_GROUP             = 1U << 0,
@@ -633,7 +634,12 @@ struct perf_event {
 
        struct pid_namespace            *ns;
        u64                             id;
+
+#ifdef CONFIG_EVENT_PROFILE
+       struct event_filter             *filter;
 #endif
+
+#endif /* CONFIG_PERF_EVENTS */
 };
 
 /**
index 813be59..2ea1dd1 100644 (file)
@@ -24,8 +24,21 @@ static inline int reacquire_kernel_lock(struct task_struct *task)
        return 0;
 }
 
-extern void __lockfunc lock_kernel(void)       __acquires(kernel_lock);
-extern void __lockfunc unlock_kernel(void)     __releases(kernel_lock);
+extern void __lockfunc
+_lock_kernel(const char *func, const char *file, int line)
+__acquires(kernel_lock);
+
+extern void __lockfunc
+_unlock_kernel(const char *func, const char *file, int line)
+__releases(kernel_lock);
+
+#define lock_kernel() do {                                     \
+       _lock_kernel(__func__, __FILE__, __LINE__);             \
+} while (0)
+
+#define unlock_kernel()        do {                                    \
+       _unlock_kernel(__func__, __FILE__, __LINE__);           \
+} while (0)
 
 /*
  * Various legacy drivers don't really need the BKL in a specific
@@ -41,8 +54,8 @@ static inline void cycle_kernel_lock(void)
 
 #else
 
-#define lock_kernel()                          do { } while(0)
-#define unlock_kernel()                                do { } while(0)
+#define lock_kernel()
+#define unlock_kernel()
 #define release_kernel_lock(task)              do { } while(0)
 #define cycle_kernel_lock()                    do { } while(0)
 #define reacquire_kernel_lock(task)            0
index 7ef0c7b..cf24c20 100644 (file)
@@ -207,6 +207,7 @@ extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
 
 extern void flush_workqueue(struct workqueue_struct *wq);
 extern void flush_scheduled_work(void);
+extern void flush_delayed_work(struct delayed_work *work);
 
 extern int schedule_work(struct work_struct *work);
 extern int schedule_work_on(int cpu, struct work_struct *work);
diff --git a/include/trace/events/bkl.h b/include/trace/events/bkl.h
new file mode 100644 (file)
index 0000000..1af72dc
--- /dev/null
@@ -0,0 +1,61 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM bkl
+
+#if !defined(_TRACE_BKL_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_BKL_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(lock_kernel,
+
+       TP_PROTO(const char *func, const char *file, int line),
+
+       TP_ARGS(func, file, line),
+
+       TP_STRUCT__entry(
+               __field(        int,            depth                   )
+               __field_ext(    const char *,   func, FILTER_PTR_STRING )
+               __field_ext(    const char *,   file, FILTER_PTR_STRING )
+               __field(        int,            line                    )
+       ),
+
+       TP_fast_assign(
+               /* We want to record the lock_depth after lock is acquired */
+               __entry->depth = current->lock_depth + 1;
+               __entry->func = func;
+               __entry->file = file;
+               __entry->line = line;
+       ),
+
+       TP_printk("depth=%d file:line=%s:%d func=%s()", __entry->depth,
+                 __entry->file, __entry->line, __entry->func)
+);
+
+TRACE_EVENT(unlock_kernel,
+
+       TP_PROTO(const char *func, const char *file, int line),
+
+       TP_ARGS(func, file, line),
+
+       TP_STRUCT__entry(
+               __field(int,            depth           )
+               __field(const char *,   func            )
+               __field(const char *,   file            )
+               __field(int,            line            )
+       ),
+
+       TP_fast_assign(
+               __entry->depth = current->lock_depth;
+               __entry->func = func;
+               __entry->file = file;
+               __entry->line = line;
+       ),
+
+       TP_printk("depth=%d file:line=%s:%d func=%s()", __entry->depth,
+                 __entry->file, __entry->line, __entry->func)
+);
+
+#endif /* _TRACE_BKL_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
index b89f9db..dcfcd44 100644 (file)
@@ -48,7 +48,7 @@ TRACE_EVENT(irq_handler_entry,
                __assign_str(name, action->name);
        ),
 
-       TP_printk("irq=%d handler=%s", __entry->irq, __get_str(name))
+       TP_printk("irq=%d name=%s", __entry->irq, __get_str(name))
 );
 
 /**
@@ -78,7 +78,7 @@ TRACE_EVENT(irq_handler_exit,
                __entry->ret    = ret;
        ),
 
-       TP_printk("irq=%d return=%s",
+       TP_printk("irq=%d ret=%s",
                  __entry->irq, __entry->ret ? "handled" : "unhandled")
 );
 
@@ -107,7 +107,7 @@ TRACE_EVENT(softirq_entry,
                __entry->vec = (int)(h - vec);
        ),
 
-       TP_printk("softirq=%d action=%s", __entry->vec,
+       TP_printk("vec=%d [action=%s]", __entry->vec,
                  show_softirq_name(__entry->vec))
 );
 
@@ -136,7 +136,7 @@ TRACE_EVENT(softirq_exit,
                __entry->vec = (int)(h - vec);
        ),
 
-       TP_printk("softirq=%d action=%s", __entry->vec,
+       TP_printk("vec=%d [action=%s]", __entry->vec,
                  show_softirq_name(__entry->vec))
 );
 
index ea6d579..9bb96e5 100644 (file)
@@ -16,8 +16,6 @@ enum {
 };
 #endif
 
-
-
 TRACE_EVENT(power_start,
 
        TP_PROTO(unsigned int type, unsigned int state),
index 4069c43..b50b985 100644 (file)
@@ -26,7 +26,7 @@ TRACE_EVENT(sched_kthread_stop,
                __entry->pid    = t->pid;
        ),
 
-       TP_printk("task %s:%d", __entry->comm, __entry->pid)
+       TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
 );
 
 /*
@@ -46,7 +46,7 @@ TRACE_EVENT(sched_kthread_stop_ret,
                __entry->ret    = ret;
        ),
 
-       TP_printk("ret %d", __entry->ret)
+       TP_printk("ret=%d", __entry->ret)
 );
 
 /*
@@ -73,7 +73,7 @@ TRACE_EVENT(sched_wait_task,
                __entry->prio   = p->prio;
        ),
 
-       TP_printk("task %s:%d [%d]",
+       TP_printk("comm=%s pid=%d prio=%d",
                  __entry->comm, __entry->pid, __entry->prio)
 );
 
@@ -94,7 +94,7 @@ TRACE_EVENT(sched_wakeup,
                __field(        pid_t,  pid                     )
                __field(        int,    prio                    )
                __field(        int,    success                 )
-               __field(        int,    cpu                     )
+               __field(        int,    target_cpu              )
        ),
 
        TP_fast_assign(
@@ -102,12 +102,12 @@ TRACE_EVENT(sched_wakeup,
                __entry->pid            = p->pid;
                __entry->prio           = p->prio;
                __entry->success        = success;
-               __entry->cpu            = task_cpu(p);
+               __entry->target_cpu     = task_cpu(p);
        ),
 
-       TP_printk("task %s:%d [%d] success=%d [%03d]",
+       TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
                  __entry->comm, __entry->pid, __entry->prio,
-                 __entry->success, __entry->cpu)
+                 __entry->success, __entry->target_cpu)
 );
 
 /*
@@ -127,7 +127,7 @@ TRACE_EVENT(sched_wakeup_new,
                __field(        pid_t,  pid                     )
                __field(        int,    prio                    )
                __field(        int,    success                 )
-               __field(        int,    cpu                     )
+               __field(        int,    target_cpu              )
        ),
 
        TP_fast_assign(
@@ -135,12 +135,12 @@ TRACE_EVENT(sched_wakeup_new,
                __entry->pid            = p->pid;
                __entry->prio           = p->prio;
                __entry->success        = success;
-               __entry->cpu            = task_cpu(p);
+               __entry->target_cpu     = task_cpu(p);
        ),
 
-       TP_printk("task %s:%d [%d] success=%d [%03d]",
+       TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
                  __entry->comm, __entry->pid, __entry->prio,
-                 __entry->success, __entry->cpu)
+                 __entry->success, __entry->target_cpu)
 );
 
 /*
@@ -176,7 +176,7 @@ TRACE_EVENT(sched_switch,
                __entry->next_prio      = next->prio;
        ),
 
-       TP_printk("task %s:%d [%d] (%s) ==> %s:%d [%d]",
+       TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_pid=%d next_prio=%d",
                __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
                __entry->prev_state ?
                  __print_flags(__entry->prev_state, "|",
@@ -211,7 +211,7 @@ TRACE_EVENT(sched_migrate_task,
                __entry->dest_cpu       = dest_cpu;
        ),
 
-       TP_printk("task %s:%d [%d] from: %d  to: %d",
+       TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
                  __entry->comm, __entry->pid, __entry->prio,
                  __entry->orig_cpu, __entry->dest_cpu)
 );
@@ -237,7 +237,7 @@ TRACE_EVENT(sched_process_free,
                __entry->prio           = p->prio;
        ),
 
-       TP_printk("task %s:%d [%d]",
+       TP_printk("comm=%s pid=%d prio=%d",
                  __entry->comm, __entry->pid, __entry->prio)
 );
 
@@ -262,7 +262,7 @@ TRACE_EVENT(sched_process_exit,
                __entry->prio           = p->prio;
        ),
 
-       TP_printk("task %s:%d [%d]",
+       TP_printk("comm=%s pid=%d prio=%d",
                  __entry->comm, __entry->pid, __entry->prio)
 );
 
@@ -287,7 +287,7 @@ TRACE_EVENT(sched_process_wait,
                __entry->prio           = current->prio;
        ),
 
-       TP_printk("task %s:%d [%d]",
+       TP_printk("comm=%s pid=%d prio=%d",
                  __entry->comm, __entry->pid, __entry->prio)
 );
 
@@ -314,7 +314,7 @@ TRACE_EVENT(sched_process_fork,
                __entry->child_pid      = child->pid;
        ),
 
-       TP_printk("parent %s:%d  child %s:%d",
+       TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
                __entry->parent_comm, __entry->parent_pid,
                __entry->child_comm, __entry->child_pid)
 );
@@ -340,7 +340,7 @@ TRACE_EVENT(sched_signal_send,
                __entry->sig    = sig;
        ),
 
-       TP_printk("sig: %d  task %s:%d",
+       TP_printk("sig=%d comm=%s pid=%d",
                  __entry->sig, __entry->comm, __entry->pid)
 );
 
@@ -374,7 +374,7 @@ TRACE_EVENT(sched_stat_wait,
                __perf_count(delay);
        ),
 
-       TP_printk("task: %s:%d wait: %Lu [ns]",
+       TP_printk("comm=%s pid=%d delay=%Lu [ns]",
                        __entry->comm, __entry->pid,
                        (unsigned long long)__entry->delay)
 );
@@ -406,7 +406,7 @@ TRACE_EVENT(sched_stat_runtime,
                __perf_count(runtime);
        ),
 
-       TP_printk("task: %s:%d runtime: %Lu [ns], vruntime: %Lu [ns]",
+       TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
                        __entry->comm, __entry->pid,
                        (unsigned long long)__entry->runtime,
                        (unsigned long long)__entry->vruntime)
@@ -437,7 +437,7 @@ TRACE_EVENT(sched_stat_sleep,
                __perf_count(delay);
        ),
 
-       TP_printk("task: %s:%d sleep: %Lu [ns]",
+       TP_printk("comm=%s pid=%d delay=%Lu [ns]",
                        __entry->comm, __entry->pid,
                        (unsigned long long)__entry->delay)
 );
@@ -467,7 +467,7 @@ TRACE_EVENT(sched_stat_iowait,
                __perf_count(delay);
        ),
 
-       TP_printk("task: %s:%d iowait: %Lu [ns]",
+       TP_printk("comm=%s pid=%d delay=%Lu [ns]",
                        __entry->comm, __entry->pid,
                        (unsigned long long)__entry->delay)
 );
index 1844c48..e5ce87a 100644 (file)
@@ -26,7 +26,7 @@ TRACE_EVENT(timer_init,
                __entry->timer  = timer;
        ),
 
-       TP_printk("timer %p", __entry->timer)
+       TP_printk("timer=%p", __entry->timer)
 );
 
 /**
@@ -54,7 +54,7 @@ TRACE_EVENT(timer_start,
                __entry->now            = jiffies;
        ),
 
-       TP_printk("timer %p: func %pf, expires %lu, timeout %ld",
+       TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld]",
                  __entry->timer, __entry->function, __entry->expires,
                  (long)__entry->expires - __entry->now)
 );
@@ -81,7 +81,7 @@ TRACE_EVENT(timer_expire_entry,
                __entry->now            = jiffies;
        ),
 
-       TP_printk("timer %p: now %lu", __entry->timer, __entry->now)
+       TP_printk("timer=%p now=%lu", __entry->timer, __entry->now)
 );
 
 /**
@@ -108,7 +108,7 @@ TRACE_EVENT(timer_expire_exit,
                __entry->timer  = timer;
        ),
 
-       TP_printk("timer %p", __entry->timer)
+       TP_printk("timer=%p", __entry->timer)
 );
 
 /**
@@ -129,7 +129,7 @@ TRACE_EVENT(timer_cancel,
                __entry->timer  = timer;
        ),
 
-       TP_printk("timer %p", __entry->timer)
+       TP_printk("timer=%p", __entry->timer)
 );
 
 /**
@@ -140,24 +140,24 @@ TRACE_EVENT(timer_cancel,
  */
 TRACE_EVENT(hrtimer_init,
 
-       TP_PROTO(struct hrtimer *timer, clockid_t clockid,
+       TP_PROTO(struct hrtimer *hrtimer, clockid_t clockid,
                 enum hrtimer_mode mode),
 
-       TP_ARGS(timer, clockid, mode),
+       TP_ARGS(hrtimer, clockid, mode),
 
        TP_STRUCT__entry(
-               __field( void *,                timer           )
+               __field( void *,                hrtimer         )
                __field( clockid_t,             clockid         )
                __field( enum hrtimer_mode,     mode            )
        ),
 
        TP_fast_assign(
-               __entry->timer          = timer;
+               __entry->hrtimer        = hrtimer;
                __entry->clockid        = clockid;
                __entry->mode           = mode;
        ),
 
-       TP_printk("hrtimer %p, clockid %s, mode %s", __entry->timer,
+       TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer,
                  __entry->clockid == CLOCK_REALTIME ?
                        "CLOCK_REALTIME" : "CLOCK_MONOTONIC",
                  __entry->mode == HRTIMER_MODE_ABS ?
@@ -170,26 +170,26 @@ TRACE_EVENT(hrtimer_init,
  */
 TRACE_EVENT(hrtimer_start,
 
-       TP_PROTO(struct hrtimer *timer),
+       TP_PROTO(struct hrtimer *hrtimer),
 
-       TP_ARGS(timer),
+       TP_ARGS(hrtimer),
 
        TP_STRUCT__entry(
-               __field( void *,        timer           )
+               __field( void *,        hrtimer         )
                __field( void *,        function        )
                __field( s64,           expires         )
                __field( s64,           softexpires     )
        ),
 
        TP_fast_assign(
-               __entry->timer          = timer;
-               __entry->function       = timer->function;
-               __entry->expires        = hrtimer_get_expires(timer).tv64;
-               __entry->softexpires    = hrtimer_get_softexpires(timer).tv64;
+               __entry->hrtimer        = hrtimer;
+               __entry->function       = hrtimer->function;
+               __entry->expires        = hrtimer_get_expires(hrtimer).tv64;
+               __entry->softexpires    = hrtimer_get_softexpires(hrtimer).tv64;
        ),
 
-       TP_printk("hrtimer %p, func %pf, expires %llu, softexpires %llu",
-                 __entry->timer, __entry->function,
+       TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu",
+                 __entry->hrtimer, __entry->function,
                  (unsigned long long)ktime_to_ns((ktime_t) {
                                  .tv64 = __entry->expires }),
                  (unsigned long long)ktime_to_ns((ktime_t) {
@@ -206,23 +206,22 @@ TRACE_EVENT(hrtimer_start,
  */
 TRACE_EVENT(hrtimer_expire_entry,
 
-       TP_PROTO(struct hrtimer *timer, ktime_t *now),
+       TP_PROTO(struct hrtimer *hrtimer, ktime_t *now),
 
-       TP_ARGS(timer, now),
+       TP_ARGS(hrtimer, now),
 
        TP_STRUCT__entry(
-               __field( void *,        timer   )
+               __field( void *,        hrtimer )
                __field( s64,           now     )
        ),
 
        TP_fast_assign(
-               __entry->timer  = timer;
-               __entry->now    = now->tv64;
+               __entry->hrtimer        = hrtimer;
+               __entry->now            = now->tv64;
        ),
 
-       TP_printk("hrtimer %p, now %llu", __entry->timer,
-                 (unsigned long long)ktime_to_ns((ktime_t) {
-                                 .tv64 = __entry->now }))
+       TP_printk("hrtimer=%p now=%llu", __entry->hrtimer,
+                 (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now }))
  );
 
 /**
@@ -234,40 +233,40 @@ TRACE_EVENT(hrtimer_expire_entry,
  */
 TRACE_EVENT(hrtimer_expire_exit,
 
-       TP_PROTO(struct hrtimer *timer),
+       TP_PROTO(struct hrtimer *hrtimer),
 
-       TP_ARGS(timer),
+       TP_ARGS(hrtimer),
 
        TP_STRUCT__entry(
-               __field( void *,        timer   )
+               __field( void *,        hrtimer )
        ),
 
        TP_fast_assign(
-               __entry->timer  = timer;
+               __entry->hrtimer        = hrtimer;
        ),
 
-       TP_printk("hrtimer %p", __entry->timer)
+       TP_printk("hrtimer=%p", __entry->hrtimer)
 );
 
 /**
  * hrtimer_cancel - called when the hrtimer is canceled
- * @timer:     pointer to struct hrtimer
+ * @hrtimer:   pointer to struct hrtimer
  */
 TRACE_EVENT(hrtimer_cancel,
 
-       TP_PROTO(struct hrtimer *timer),
+       TP_PROTO(struct hrtimer *hrtimer),
 
-       TP_ARGS(timer),
+       TP_ARGS(hrtimer),
 
        TP_STRUCT__entry(
-               __field( voi