Merge branch 'i2c-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jdelvar...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 10 Feb 2010 15:34:46 +0000 (07:34 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 10 Feb 2010 15:34:46 +0000 (07:34 -0800)
* 'i2c-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jdelvare/staging:
  i2c-tiny-usb: Fix on big-endian systems

162 files changed:
Documentation/ABI/testing/ima_policy
Documentation/cpu-freq/governors.txt
MAINTAINERS
Makefile
arch/avr32/mach-at32ap/at32ap700x.c
arch/microblaze/kernel/setup.c
arch/powerpc/mm/tlb_hash64.c
arch/powerpc/platforms/pseries/xics.c
arch/s390/include/asm/lowcore.h
arch/sh/kernel/cpu/sh3/entry.S
arch/sh/kernel/dwarf.c
arch/sh/kernel/entry-common.S
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
arch/x86/kvm/i8254.c
arch/x86/kvm/x86.c
drivers/ata/ahci.c
drivers/ata/libata-scsi.c
drivers/ata/libata-sff.c
drivers/bluetooth/btmrvl_sdio.c
drivers/char/tty_io.c
drivers/cpufreq/cpufreq_ondemand.c
drivers/gpu/drm/ati_pcigart.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r300.c
drivers/gpu/drm/radeon/r420.c
drivers/gpu/drm/radeon/r520.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_audio.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.h
drivers/gpu/drm/radeon/radeon_combios.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/rs400.c
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/rs690.c
drivers/gpu/drm/radeon/rv515.c
drivers/gpu/drm/radeon/rv770.c
drivers/hwmon/adt7462.c
drivers/hwmon/lm78.c
drivers/hwmon/w83781d.c
drivers/md/md.c
drivers/md/raid5.c
drivers/media/dvb/dvb-core/dmxdev.c
drivers/media/dvb/dvb-core/dvb_demux.c
drivers/net/ax88796.c
drivers/net/cxgb3/sge.c
drivers/net/igb/igb_main.c
drivers/net/ixgbe/ixgbe_main.c
drivers/net/netxen/netxen_nic_main.c
drivers/net/sky2.c
drivers/pci/quirks.c
drivers/s390/cio/qdio_main.c
drivers/usb/host/r8a66597-hcd.c
fs/9p/v9fs.c
fs/9p/v9fs_vfs.h
fs/9p/vfs_file.c
fs/9p/vfs_inode.c
fs/befs/linuxvfs.c
fs/block_dev.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/relocation.c
fs/cifs/CHANGES
fs/cifs/cifsfs.h
fs/cifs/cifsglob.h
fs/cifs/connect.c
fs/cifs/inode.c
fs/cifs/readdir.c
fs/cifs/sess.c
fs/fcntl.c
fs/file_table.c
fs/namei.c
fs/nfsd/export.c
fs/nfsd/vfs.c
fs/ocfs2/aops.c
fs/ocfs2/buffer_head_io.c
fs/ocfs2/cluster/heartbeat.c
fs/ocfs2/cluster/tcp.c
fs/ocfs2/cluster/tcp_internal.h
fs/ocfs2/dlm/dlmapi.h
fs/ocfs2/dlm/dlmast.c
fs/ocfs2/dlm/dlmconvert.c
fs/ocfs2/dlm/dlmdebug.c
fs/ocfs2/dlm/dlmdomain.c
fs/ocfs2/dlm/dlmlock.c
fs/ocfs2/dlm/dlmmaster.c
fs/ocfs2/dlm/dlmrecovery.c
fs/ocfs2/dlm/dlmunlock.c
fs/ocfs2/dlmglue.c
fs/ocfs2/export.c
fs/ocfs2/extent_map.c
fs/ocfs2/file.c
fs/ocfs2/inode.c
fs/ocfs2/ioctl.c
fs/ocfs2/journal.c
fs/ocfs2/ocfs2.h
fs/ocfs2/ocfs2_fs.h
fs/ocfs2/refcounttree.c
fs/ocfs2/stack_o2cb.c
fs/ocfs2/super.c
fs/ocfs2/symlink.c
fs/ocfs2/uptodate.c
include/linux/ata.h
include/linux/compiler.h
include/linux/ima.h
include/net/netns/conntrack.h
include/net/netns/ipv4.h
init/main.c
kernel/time/timekeeping.c
mm/migrate.c
net/9p/client.c
net/9p/trans_fd.c
net/9p/trans_rdma.c
net/9p/trans_virtio.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_event.c
net/bluetooth/hidp/core.c
net/bluetooth/hidp/hidp.h
net/bluetooth/rfcomm/core.c
net/core/dst.c
net/core/pktgen.c
net/dccp/ccid.c
net/dccp/ccid.h
net/dccp/probe.c
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
net/ipv4/netfilter/nf_nat_core.c
net/ipv6/netfilter/ip6_tables.c
net/irda/irnet/irnet_ppp.c
net/key/af_key.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_expect.c
net/netfilter/nf_conntrack_helper.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_standalone.c
net/netlink/af_netlink.c
net/sched/Kconfig
security/integrity/ima/ima.h
security/integrity/ima/ima_api.c
security/integrity/ima/ima_iint.c
security/integrity/ima/ima_main.c
security/integrity/ima/ima_policy.c
security/security.c
sound/pci/ctxfi/ctatc.c
sound/pci/ctxfi/ctvmem.c
sound/pci/ctxfi/ctvmem.h
sound/pci/hda/hda_intel.c
sound/pci/ice1712/aureon.c
sound/soc/omap/omap3pandora.c

index 6434f0df012e3cd72d8e373bc30c406b7f0b3873..6cd6daefaaedeb160a6f1ac1d616de7871b38965 100644 (file)
@@ -20,7 +20,7 @@ Description:
                        lsm:    [[subj_user=] [subj_role=] [subj_type=]
                                 [obj_user=] [obj_role=] [obj_type=]]
 
-               base:   func:= [BPRM_CHECK][FILE_MMAP][INODE_PERMISSION]
+               base:   func:= [BPRM_CHECK][FILE_MMAP][FILE_CHECK]
                        mask:= [MAY_READ] [MAY_WRITE] [MAY_APPEND] [MAY_EXEC]
                        fsmagic:= hex value
                        uid:= decimal value
@@ -40,11 +40,11 @@ Description:
 
                        measure func=BPRM_CHECK
                        measure func=FILE_MMAP mask=MAY_EXEC
-                       measure func=INODE_PERM mask=MAY_READ uid=0
+                       measure func=FILE_CHECK mask=MAY_READ uid=0
 
                The default policy measures all executables in bprm_check,
                all files mmapped executable in file_mmap, and all files
-               open for read by root in inode_permission.
+               open for read by root in do_filp_open.
 
                Examples of LSM specific definitions:
 
@@ -54,8 +54,8 @@ Description:
 
                        dont_measure obj_type=var_log_t
                        dont_measure obj_type=auditd_log_t
-                       measure subj_user=system_u func=INODE_PERM mask=MAY_READ
-                       measure subj_role=system_r func=INODE_PERM mask=MAY_READ
+                       measure subj_user=system_u func=FILE_CHECK mask=MAY_READ
+                       measure subj_role=system_r func=FILE_CHECK mask=MAY_READ
 
                Smack:
-                       measure subj_user=_ func=INODE_PERM mask=MAY_READ
+                       measure subj_user=_ func=FILE_CHECK mask=MAY_READ
index aed082f49d09894be277ebcbfef898cf75ee1074..737988fca64d37712a7b5c7dfc2d316a57be9082 100644 (file)
@@ -145,8 +145,8 @@ show_sampling_rate_max: THIS INTERFACE IS DEPRECATED, DON'T USE IT.
 up_threshold: defines what the average CPU usage between the samplings
 of 'sampling_rate' needs to be for the kernel to make a decision on
 whether it should increase the frequency.  For example when it is set
-to its default value of '80' it means that between the checking
-intervals the CPU needs to be on average more than 80% in use to then
+to its default value of '95' it means that between the checking
+intervals the CPU needs to be on average more than 95% in use to then
 decide that the CPU frequency needs to be increased.  
 
 ignore_nice_load: this parameter takes a value of '0' or '1'. When
index 03f38c18f3236718bf4bff40fa5ffaffce5bbcbe..602022d2c7a51597ae79a2f17835b8ff2f1f75cf 100644 (file)
@@ -3836,6 +3836,7 @@ NETWORKING DRIVERS
 L:     netdev@vger.kernel.org
 W:     http://www.linuxfoundation.org/en/Net
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6.git
 S:     Odd Fixes
 F:     drivers/net/
 F:     include/linux/if_*
index 394aec712c7df254f6f9db139f80cc054eb99973..f8e02e9491d02a786f6c1284463b335909c47177 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 33
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
 NAME = Man-Eating Seals of Antiquity
 
 # *DOCUMENTATION*
index 1aa1ea5e92127984428e06747289f3e2f055f1d1..b13d1879e51b9f0d961fdff8af8c15b9950dcd0e 100644 (file)
@@ -1325,7 +1325,7 @@ struct platform_device *__init
 at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
 {
        struct platform_device          *pdev;
-       struct mci_dma_slave            *slave;
+       struct mci_dma_data             *slave;
        u32                             pioa_mask;
        u32                             piob_mask;
 
@@ -1344,7 +1344,9 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
                                ARRAY_SIZE(atmel_mci0_resource)))
                goto fail;
 
-       slave = kzalloc(sizeof(struct mci_dma_slave), GFP_KERNEL);
+       slave = kzalloc(sizeof(struct mci_dma_data), GFP_KERNEL);
+       if (!slave)
+               goto fail;
 
        slave->sdata.dma_dev = &dw_dmac0_device.dev;
        slave->sdata.reg_width = DW_DMA_SLAVE_WIDTH_32BIT;
@@ -1357,7 +1359,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
 
        if (platform_device_add_data(pdev, data,
                                sizeof(struct mci_platform_data)))
-               goto fail;
+               goto fail_free;
 
        /* CLK line is common to both slots */
        pioa_mask = 1 << 10;
@@ -1381,7 +1383,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
                /* Slot is unused */
                break;
        default:
-               goto fail;
+               goto fail_free;
        }
 
        select_peripheral(PIOA, pioa_mask, PERIPH_A, 0);
@@ -1408,7 +1410,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
                break;
        default:
                if (!data->slot[0].bus_width)
-                       goto fail;
+                       goto fail_free;
 
                data->slot[1].bus_width = 0;
                break;
@@ -1419,9 +1421,10 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
        platform_device_add(pdev);
        return pdev;
 
+fail_free:
+       kfree(slave);
 fail:
        data->dma_slave = NULL;
-       kfree(slave);
        platform_device_put(pdev);
        return NULL;
 }
index 5372b24ad049cf92cfd881927c21f5e559a54089..bb8c4b9ccb8019bb54cb585a5e8d1f2e77f3acb9 100644 (file)
@@ -54,6 +54,7 @@ void __init setup_arch(char **cmdline_p)
 
        microblaze_cache_init();
 
+       invalidate_dcache();
        enable_dcache();
 
        invalidate_icache();
index 282d9306361f58f3ba3443648f73dd6955a75fd4..1ec06576f619bc8e3e73fecc9d04cabeb7263240 100644 (file)
@@ -63,15 +63,21 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
        if (huge) {
 #ifdef CONFIG_HUGETLB_PAGE
                psize = get_slice_psize(mm, addr);
+               /* Mask the address for the correct page size */
+               addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
 #else
                BUG();
                psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
 #endif
-       } else
+       } else {
                psize = pte_pagesize_index(mm, addr, pte);
+               /* Mask the address for the standard page size.  If we
+                * have a 64k page kernel, but the hardware does not
+                * support 64k pages, this might be different from the
+                * hardware page size encoded in the slice table. */
+               addr &= PAGE_MASK;
+       }
 
-       /* Mask the address for the correct page size */
-       addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
 
        /* Build full vaddr */
        if (!is_kernel_addr(addr)) {
index 1ee66db003bec6809b8446e8b46b89602d032550..f5f79196721c5d9b540ddee8eaade847d0026094 100644 (file)
@@ -784,9 +784,13 @@ static void xics_set_cpu_priority(unsigned char cppr)
 {
        struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
 
-       BUG_ON(os_cppr->index != 0);
+       /*
+        * we only really want to set the priority when there's
+        * just one cppr value on the stack
+        */
+       WARN_ON(os_cppr->index != 0);
 
-       os_cppr->stack[os_cppr->index] = cppr;
+       os_cppr->stack[0] = cppr;
 
        if (firmware_has_feature(FW_FEATURE_LPAR))
                lpar_cppr_info(cppr);
@@ -821,8 +825,14 @@ void xics_setup_cpu(void)
 
 void xics_teardown_cpu(void)
 {
+       struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
        int cpu = smp_processor_id();
 
+       /*
+        * we have to reset the cppr index to 0 because we're
+        * not going to return from the IPI
+        */
+       os_cppr->index = 0;
        xics_set_cpu_priority(0);
 
        /* Clear any pending IPI request */
index f2ef4b619ce1a7415966169b89b5cb3779b198ab..c25dfac7dd769cb4a8d3b8e38280bc7b173ee1f6 100644 (file)
@@ -293,12 +293,12 @@ struct _lowcore
        __u64   clock_comparator;               /* 0x02d0 */
        __u32   machine_flags;                  /* 0x02d8 */
        __u32   ftrace_func;                    /* 0x02dc */
-       __u8    pad_0x02f0[0x0300-0x02f0];      /* 0x02f0 */
+       __u8    pad_0x02e0[0x0300-0x02e0];      /* 0x02e0 */
 
        /* Interrupt response block */
        __u8    irb[64];                        /* 0x0300 */
 
-       __u8    pad_0x0400[0x0e00-0x0400];      /* 0x0400 */
+       __u8    pad_0x0340[0x0e00-0x0340];      /* 0x0340 */
 
        /*
         * 0xe00 contains the address of the IPL Parameter Information
index 3f7e2a22c7c2a6bd1d4303ac5427b5461ff43d6c..f6a389c996cbbb16ce9b8cf5daaf45b34db16188 100644 (file)
@@ -132,7 +132,6 @@ ENTRY(tlb_protection_violation_store)
         mov    #1, r5
 
 call_handle_tlbmiss:
-       setup_frame_reg
        mov.l   1f, r0
        mov     r5, r8
        mov.l   @r0, r6
@@ -365,6 +364,8 @@ handle_exception:
         mov.l  @k2, k2         ! read out vector and keep in k2
 
 handle_exception_special:
+       setup_frame_reg
+
        ! Setup return address and jump to exception handler
        mov.l   7f, r9          ! fetch return address
        stc     r2_bank, r0     ! k2 (vector)
index 88d28ec3780a621ecfcb4dba2ea9f670fda86f3a..e51168064e5669b114d903f6917979e3f2786d4e 100644 (file)
@@ -540,6 +540,8 @@ void dwarf_free_frame(struct dwarf_frame *frame)
        mempool_free(frame, dwarf_frame_pool);
 }
 
+extern void ret_from_irq(void);
+
 /**
  *     dwarf_unwind_stack - unwind the stack
  *
@@ -678,6 +680,24 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
        addr = frame->cfa + reg->addr;
        frame->return_addr = __raw_readl(addr);
 
+       /*
+        * Ah, the joys of unwinding through interrupts.
+        *
+        * Interrupts are tricky - the DWARF info needs to be _really_
+        * accurate and unfortunately I'm seeing a lot of bogus DWARF
+        * info. For example, I've seen interrupts occur in epilogues
+        * just after the frame pointer (r14) had been restored. The
+        * problem was that the DWARF info claimed that the CFA could be
+        * reached by using the value of the frame pointer before it was
+        * restored.
+        *
+        * So until the compiler can be trusted to produce reliable
+        * DWARF info when it really matters, let's stop unwinding once
+        * we've calculated the function that was interrupted.
+        */
+       if (prev && prev->pc == (unsigned long)ret_from_irq)
+               frame->return_addr = 0;
+
        return frame;
 
 bail:
index f0abd58c3a69c4c439e485653f3b86a4e66b94fe..2b15ae60c3a0257ed1ea48b34da0044c6a4bfaf6 100644 (file)
@@ -70,8 +70,14 @@ ret_from_exception:
        CFI_STARTPROC simple
        CFI_DEF_CFA r14, 0
        CFI_REL_OFFSET 17, 64
-       CFI_REL_OFFSET 15, 0
+       CFI_REL_OFFSET 15, 60
        CFI_REL_OFFSET 14, 56
+       CFI_REL_OFFSET 13, 52
+       CFI_REL_OFFSET 12, 48
+       CFI_REL_OFFSET 11, 44
+       CFI_REL_OFFSET 10, 40
+       CFI_REL_OFFSET 9, 36
+       CFI_REL_OFFSET 8, 32
        preempt_stop()
 ENTRY(ret_from_irq)
        !
index f125e5c551c0db827d33110dfd0d25688053e3c7..6e44519960c8f6413c3f0f54ba85ed591f172e81 100644 (file)
@@ -1356,6 +1356,7 @@ static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol)
 
        kfree(data->powernow_table);
        kfree(data);
+       per_cpu(powernow_data, pol->cpu) = NULL;
 
        return 0;
 }
@@ -1375,7 +1376,7 @@ static unsigned int powernowk8_get(unsigned int cpu)
        int err;
 
        if (!data)
-               return -EINVAL;
+               return 0;
 
        smp_call_function_single(cpu, query_values_on_cpu, &err, true);
        if (err)
index 296aba49472ae3cdcf7580bc79d60d83ed48ce3e..15578f180e596bee481f10451ec375b6ceb37f26 100644 (file)
@@ -467,6 +467,9 @@ static int pit_ioport_read(struct kvm_io_device *this,
                return -EOPNOTSUPP;
 
        addr &= KVM_PIT_CHANNEL_MASK;
+       if (addr == 3)
+               return 0;
+
        s = &pit_state->channels[addr];
 
        mutex_lock(&pit_state->lock);
index 1ddcad452add0808b3c3aac39c291db32c3e620a..a1e1bc9d412dadca87e4a7be6c7903ab0768a8a8 100644 (file)
@@ -670,7 +670,7 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
 {
        static int version;
        struct pvclock_wall_clock wc;
-       struct timespec now, sys, boot;
+       struct timespec boot;
 
        if (!wall_clock)
                return;
@@ -685,9 +685,7 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
         * wall clock specified here.  guest system time equals host
         * system time for us, thus we must fill in host boot time here.
         */
-       now = current_kernel_time();
-       ktime_get_ts(&sys);
-       boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys));
+       getboottime(&boot);
 
        wc.sec = boot.tv_sec;
        wc.nsec = boot.tv_nsec;
@@ -762,6 +760,7 @@ static void kvm_write_guest_time(struct kvm_vcpu *v)
        local_irq_save(flags);
        kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
        ktime_get_ts(&ts);
+       monotonic_to_bootbased(&ts);
        local_irq_restore(flags);
 
        /* With all the info we got, fill in the values */
index b8bea100a160439516b3ebbf662dfd7f0b690a2d..b34390347c1685b624f36c13cc864e1c03ccf8ee 100644 (file)
@@ -2868,6 +2868,21 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
                        },
                        .driver_data = "F.23",  /* cutoff BIOS version */
                },
+               /*
+                * Acer eMachines G725 has the same problem.  BIOS
+                * V1.03 is known to be broken.  V3.04 is known to
+                * work.  Inbetween, there are V1.06, V2.06 and V3.03
+                * that we don't have much idea about.  For now,
+                * blacklist anything older than V3.04.
+                */
+               {
+                       .ident = "G725",
+                       .matches = {
+                               DMI_MATCH(DMI_SYS_VENDOR, "eMachines"),
+                               DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"),
+                       },
+                       .driver_data = "V3.04", /* cutoff BIOS version */
+               },
                { }     /* terminate list */
        };
        const struct dmi_system_id *dmi = dmi_first_match(sysids);
index f4ea5a8c325bf5a618450b37ae229f603c30c8de..d096fbcbc771a61a0537e28842d04f1149dcc7a7 100644 (file)
@@ -2875,7 +2875,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
         * write indication (used for PIO/DMA setup), result TF is
         * copied back and we don't whine too much about its failure.
         */
-       tf->flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+       tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
        if (scmd->sc_data_direction == DMA_TO_DEVICE)
                tf->flags |= ATA_TFLAG_WRITE;
 
index 741065c9da6700245a2ddb70f19908e3589f85c1..730ef3c384ca190818857c89ab044006dccfe75d 100644 (file)
@@ -893,6 +893,9 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
                                       do_write);
        }
 
+       if (!do_write)
+               flush_dcache_page(page);
+
        qc->curbytes += qc->sect_size;
        qc->cursg_ofs += qc->sect_size;
 
index f36defa3776482ec291546e454b1676339b9afec..57d965b7f52199775b738c78139672b8598cab09 100644 (file)
@@ -808,6 +808,7 @@ static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv,
 
 exit:
        sdio_release_host(card->func);
+       kfree(tmpbuf);
 
        return ret;
 }
index c6f3b48be9dd68cc37c4847eb8cab4b268ec4c90..dcb9083ecde0982dd061720be8e2e324d3bde5ed 100644 (file)
@@ -1951,8 +1951,10 @@ static int tty_fasync(int fd, struct file *filp, int on)
                        pid = task_pid(current);
                        type = PIDTYPE_PID;
                }
-               retval = __f_setown(filp, pid, type, 0);
+               get_pid(pid);
                spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+               retval = __f_setown(filp, pid, type, 0);
+               put_pid(pid);
                if (retval)
                        goto out;
        } else {
index 4b34ade2332baaa50bb1ca1af9c45e1a9893d321..bd444dc93cf2ebf55c3e8545be249c6f025b7308 100644 (file)
@@ -554,6 +554,9 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
                                (dbs_tuners_ins.up_threshold -
                                 dbs_tuners_ins.down_differential);
 
+               if (freq_next < policy->min)
+                       freq_next = policy->min;
+
                if (!dbs_tuners_ins.powersave_bias) {
                        __cpufreq_driver_target(policy, freq_next,
                                        CPUFREQ_RELATION_L);
index a1fce68e3bbe65aded6adf7bb05f3c465b3e5d92..17be051b7aa3109c4aef78b320823da2eecb5fc8 100644 (file)
@@ -113,7 +113,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
 
                if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) {
                        DRM_ERROR("fail to set dma mask to 0x%Lx\n",
-                                 gart_info->table_mask);
+                                 (unsigned long long)gart_info->table_mask);
                        ret = 1;
                        goto done;
                }
index 46d88965852af5d19001ba24d237bbe293552bf4..ecac882e1d54374bc9cead7812a91a5ded29b740 100644 (file)
@@ -120,7 +120,7 @@ const static struct intel_device_info intel_gm45_info = {
 
 const static struct intel_device_info intel_pineview_info = {
        .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
-       .has_pipe_cxsr = 1,
+       .need_gfx_hws = 1,
        .has_hotplug = 1,
 };
 
index dda787aafcc626ce064c3d59daf183c588fb5b69..b4c8c0230689038ba4c841ea4ce48b038a83c585 100644 (file)
@@ -3564,6 +3564,9 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
        uint32_t reloc_count = 0, i;
        int ret = 0;
 
+       if (relocs == NULL)
+           return 0;
+
        for (i = 0; i < buffer_count; i++) {
                struct drm_i915_gem_relocation_entry __user *user_relocs;
                int unwritten;
@@ -3653,7 +3656,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        struct drm_gem_object *batch_obj;
        struct drm_i915_gem_object *obj_priv;
        struct drm_clip_rect *cliprects = NULL;
-       struct drm_i915_gem_relocation_entry *relocs;
+       struct drm_i915_gem_relocation_entry *relocs = NULL;
        int ret = 0, ret2, i, pinned = 0;
        uint64_t exec_offset;
        uint32_t seqno, flush_domains, reloc_index;
@@ -3722,6 +3725,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                if (object_list[i] == NULL) {
                        DRM_ERROR("Invalid object handle %d at index %d\n",
                                   exec_list[i].handle, i);
+                       /* prevent error path from reading uninitialized data */
+                       args->buffer_count = i + 1;
                        ret = -EBADF;
                        goto err;
                }
@@ -3730,6 +3735,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                if (obj_priv->in_execbuffer) {
                        DRM_ERROR("Object %p appears more than once in object list\n",
                                   object_list[i]);
+                       /* prevent error path from reading uninitialized data */
+                       args->buffer_count = i + 1;
                        ret = -EBADF;
                        goto err;
                }
@@ -3926,6 +3933,7 @@ err:
 
        mutex_unlock(&dev->struct_mutex);
 
+pre_mutex_err:
        /* Copy the updated relocations out regardless of current error
         * state.  Failure to update the relocs would mean that the next
         * time userland calls execbuf, it would do so with presumed offset
@@ -3940,7 +3948,6 @@ err:
                        ret = ret2;
        }
 
-pre_mutex_err:
        drm_free_large(object_list);
        kfree(cliprects);
 
index 89a071a3e6fb83233ec6443f5bafe7030ef8aa96..50ddf4a95c5e0a587c62b9c1a74905ac61e850ec 100644 (file)
@@ -309,6 +309,22 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
        if (de_iir & DE_GSE)
                ironlake_opregion_gse_intr(dev);
 
+       if (de_iir & DE_PLANEA_FLIP_DONE)
+               intel_prepare_page_flip(dev, 0);
+
+       if (de_iir & DE_PLANEB_FLIP_DONE)
+               intel_prepare_page_flip(dev, 1);
+
+       if (de_iir & DE_PIPEA_VBLANK) {
+               drm_handle_vblank(dev, 0);
+               intel_finish_page_flip(dev, 0);
+       }
+
+       if (de_iir & DE_PIPEB_VBLANK) {
+               drm_handle_vblank(dev, 1);
+               intel_finish_page_flip(dev, 1);
+       }
+
        /* check event from PCH */
        if ((de_iir & DE_PCH_EVENT) &&
            (pch_iir & SDE_HOTPLUG_MASK)) {
@@ -844,11 +860,11 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
        if (!(pipeconf & PIPEACONF_ENABLE))
                return -EINVAL;
 
-       if (IS_IRONLAKE(dev))
-               return 0;
-
        spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
-       if (IS_I965G(dev))
+       if (IS_IRONLAKE(dev))
+               ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 
+                                           DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
+       else if (IS_I965G(dev))
                i915_enable_pipestat(dev_priv, pipe,
                                     PIPE_START_VBLANK_INTERRUPT_ENABLE);
        else
@@ -866,13 +882,14 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
 
-       if (IS_IRONLAKE(dev))
-               return;
-
        spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
-       i915_disable_pipestat(dev_priv, pipe,
-                             PIPE_VBLANK_INTERRUPT_ENABLE |
-                             PIPE_START_VBLANK_INTERRUPT_ENABLE);
+       if (IS_IRONLAKE(dev))
+               ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 
+                                            DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
+       else
+               i915_disable_pipestat(dev_priv, pipe,
+                                     PIPE_VBLANK_INTERRUPT_ENABLE |
+                                     PIPE_START_VBLANK_INTERRUPT_ENABLE);
        spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
 }
 
@@ -1015,13 +1032,14 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        /* enable kind of interrupts always enabled */
-       u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT;
+       u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
+                          DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
        u32 render_mask = GT_USER_INTERRUPT;
        u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
                           SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
 
        dev_priv->irq_mask_reg = ~display_mask;
-       dev_priv->de_irq_enable_reg = display_mask;
+       dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
 
        /* should always can generate irq */
        I915_WRITE(DEIIR, I915_READ(DEIIR));
index ddefc871edfe4806714a07d44444f1e08e5422af..79dd4026586fa55f6072b05bb27a9fce2734099a 100644 (file)
@@ -157,6 +157,9 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
        adpa = I915_READ(PCH_ADPA);
 
        adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+       /* disable HPD first */
+       I915_WRITE(PCH_ADPA, adpa);
+       (void)I915_READ(PCH_ADPA);
 
        adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
                        ADPA_CRT_HOTPLUG_WARMUP_10MS |
index 45da78ef4a926c4f0d2e84c812bd6f870a2f425d..12775df1bbfd195716630d0146c99c03ab57a4ca 100644 (file)
@@ -1638,6 +1638,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
        case DRM_MODE_DPMS_OFF:
                DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
 
+               drm_vblank_off(dev, pipe);
                /* Disable display plane */
                temp = I915_READ(dspcntr_reg);
                if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
@@ -2519,6 +2520,10 @@ static void g4x_update_wm(struct drm_device *dev,  int planea_clock,
                sr_entries = roundup(sr_entries / cacheline_size, 1);
                DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
                I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+       } else {
+               /* Turn off self refresh if both pipes are enabled */
+               I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
+                                       & ~FW_BLC_SELF_EN);
        }
 
        DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
@@ -2562,6 +2567,10 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
                        srwm = 1;
                srwm &= 0x3f;
                I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+       } else {
+               /* Turn off self refresh if both pipes are enabled */
+               I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
+                                       & ~FW_BLC_SELF_EN);
        }
 
        DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
@@ -2630,6 +2639,10 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
                if (srwm < 0)
                        srwm = 1;
                I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f));
+       } else {
+               /* Turn off self refresh if both pipes are enabled */
+               I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
+                                       & ~FW_BLC_SELF_EN);
        }
 
        DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
@@ -3984,6 +3997,12 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
        spin_lock_irqsave(&dev->event_lock, flags);
        work = intel_crtc->unpin_work;
        if (work == NULL || !work->pending) {
+               if (work && !work->pending) {
+                       obj_priv = work->obj->driver_private;
+                       DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
+                                        obj_priv,
+                                        atomic_read(&obj_priv->pending_flip));
+               }
                spin_unlock_irqrestore(&dev->event_lock, flags);
                return;
        }
@@ -4005,7 +4024,10 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
        spin_unlock_irqrestore(&dev->event_lock, flags);
 
        obj_priv = work->obj->driver_private;
-       if (atomic_dec_and_test(&obj_priv->pending_flip))
+
+       /* Initial scanout buffer will have a 0 pending flip count */
+       if ((atomic_read(&obj_priv->pending_flip) == 0) ||
+           atomic_dec_and_test(&obj_priv->pending_flip))
                DRM_WAKEUP(&dev_priv->pending_flip_queue);
        schedule_work(&work->work);
 }
@@ -4018,8 +4040,11 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
        unsigned long flags;
 
        spin_lock_irqsave(&dev->event_lock, flags);
-       if (intel_crtc->unpin_work)
+       if (intel_crtc->unpin_work) {
                intel_crtc->unpin_work->pending = 1;
+       } else {
+               DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
+       }
        spin_unlock_irqrestore(&dev->event_lock, flags);
 }
 
@@ -4053,6 +4078,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        /* We borrow the event spin lock for protecting unpin_work */
        spin_lock_irqsave(&dev->event_lock, flags);
        if (intel_crtc->unpin_work) {
+               DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
                spin_unlock_irqrestore(&dev->event_lock, flags);
                kfree(work);
                mutex_unlock(&dev->struct_mutex);
@@ -4066,7 +4092,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 
        ret = intel_pin_and_fence_fb_obj(dev, obj);
        if (ret != 0) {
+               DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
+                         obj->driver_private);
                kfree(work);
+               intel_crtc->unpin_work = NULL;
                mutex_unlock(&dev->struct_mutex);
                return ret;
        }
index aa74e59bec61b0f19c40817fce49bde445d461ae..b1d0acbae4e458270377f400691db4766384f5a5 100644 (file)
@@ -611,7 +611,7 @@ static const struct dmi_system_id bad_lid_status[] = {
        {
                .ident = "Samsung SX20S",
                .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "Phoenix Technologies LTD"),
+                       DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"),
                        DMI_MATCH(DMI_BOARD_NAME, "SX20S"),
                },
        },
@@ -622,6 +622,13 @@ static const struct dmi_system_id bad_lid_status[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"),
                },
        },
+       {
+               .ident = "Aspire 1810T",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1810T"),
+               },
+       },
        {
                .ident = "PC-81005",
                .matches = {
@@ -643,7 +650,7 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect
 {
        enum drm_connector_status status = connector_status_connected;
 
-       if (!acpi_lid_open() && !dmi_check_system(bad_lid_status))
+       if (!dmi_check_system(bad_lid_status) && !acpi_lid_open())
                status = connector_status_disconnected;
 
        return status;
index eaacfd0920df7403f2e3d09b9479a411e9f581c8..82678d30ab06504ba51840f0d5c773c53f330ae8 100644 (file)
@@ -2345,6 +2345,14 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
                connector->connector_type = DRM_MODE_CONNECTOR_VGA;
                intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
                                        (1 << INTEL_ANALOG_CLONE_BIT);
+       } else if (flags & SDVO_OUTPUT_CVBS0) {
+
+               sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0;
+               encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
+               connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+               sdvo_priv->is_tv = true;
+               intel_output->needs_tv_clock = true;
+               intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
        } else if (flags & SDVO_OUTPUT_LVDS0) {
 
                sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
index 11c9a3fe6810daeb40ea066c2d929b8edce3f5a8..c0d4650cdb794103a3fa7fe79d0a43c4806713c9 100644 (file)
@@ -354,11 +354,17 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
                return RREG32(RADEON_CRTC2_CRNT_FRAME);
 }
 
+/* Who ever call radeon_fence_emit should call ring_lock and ask
+ * for enough space (today caller are ib schedule and buffer move) */
 void r100_fence_ring_emit(struct radeon_device *rdev,
                          struct radeon_fence *fence)
 {
-       /* Who ever call radeon_fence_emit should call ring_lock and ask
-        * for enough space (today caller are ib schedule and buffer move) */
+       /* We have to make sure that caches are flushed before
+        * CPU might read something from VRAM. */
+       radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
+       radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL);
+       radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
+       radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
        /* Wait until IDLE & CLEAN */
        radeon_ring_write(rdev, PACKET0(0x1720, 0));
        radeon_ring_write(rdev, (1 << 16) | (1 << 17));
@@ -3369,7 +3375,6 @@ int r100_suspend(struct radeon_device *rdev)
 
 void r100_fini(struct radeon_device *rdev)
 {
-       r100_suspend(rdev);
        r100_cp_fini(rdev);
        r100_wb_fini(rdev);
        r100_ib_fini(rdev);
@@ -3481,13 +3486,12 @@ int r100_init(struct radeon_device *rdev)
        if (r) {
                /* Somethings want wront with the accel init stop accel */
                dev_err(rdev->dev, "Disabling GPU acceleration\n");
-               r100_suspend(rdev);
                r100_cp_fini(rdev);
                r100_wb_fini(rdev);
                r100_ib_fini(rdev);
+               radeon_irq_kms_fini(rdev);
                if (rdev->flags & RADEON_IS_PCI)
                        r100_pci_gart_fini(rdev);
-               radeon_irq_kms_fini(rdev);
                rdev->accel_working = false;
        }
        return 0;
index 0051d11b907c162e011e8ed7795507f4ca0e610f..43b55a030b4d5c35ec86e3b79cb86e01314ee67e 100644 (file)
@@ -506,11 +506,14 @@ void r300_vram_info(struct radeon_device *rdev)
 
        /* DDR for all card after R300 & IGP */
        rdev->mc.vram_is_ddr = true;
+
        tmp = RREG32(RADEON_MEM_CNTL);
-       if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
-               rdev->mc.vram_width = 128;
-       } else {
-               rdev->mc.vram_width = 64;
+       tmp &= R300_MEM_NUM_CHANNELS_MASK;
+       switch (tmp) {
+       case 0: rdev->mc.vram_width = 64; break;
+       case 1: rdev->mc.vram_width = 128; break;
+       case 2: rdev->mc.vram_width = 256; break;
+       default:  rdev->mc.vram_width = 128; break;
        }
 
        r100_vram_init_sizes(rdev);
@@ -1327,7 +1330,6 @@ int r300_suspend(struct radeon_device *rdev)
 
 void r300_fini(struct radeon_device *rdev)
 {
-       r300_suspend(rdev);
        r100_cp_fini(rdev);
        r100_wb_fini(rdev);
        r100_ib_fini(rdev);
@@ -1418,15 +1420,15 @@ int r300_init(struct radeon_device *rdev)
        if (r) {
                /* Somethings want wront with the accel init stop accel */
                dev_err(rdev->dev, "Disabling GPU acceleration\n");
-               r300_suspend(rdev);
                r100_cp_fini(rdev);
                r100_wb_fini(rdev);
                r100_ib_fini(rdev);
+               radeon_irq_kms_fini(rdev);
                if (rdev->flags & RADEON_IS_PCIE)
                        rv370_pcie_gart_fini(rdev);
                if (rdev->flags & RADEON_IS_PCI)
                        r100_pci_gart_fini(rdev);
-               radeon_irq_kms_fini(rdev);
+               radeon_agp_fini(rdev);
                rdev->accel_working = false;
        }
        return 0;
index 4526faaacca87e06a56b47f404c96d992d482c35..d9373246c97f523513ee4a844ea3cb957b497944 100644 (file)
@@ -389,16 +389,15 @@ int r420_init(struct radeon_device *rdev)
        if (r) {
                /* Somethings want wront with the accel init stop accel */
                dev_err(rdev->dev, "Disabling GPU acceleration\n");
-               r420_suspend(rdev);
                r100_cp_fini(rdev);
                r100_wb_fini(rdev);
                r100_ib_fini(rdev);
+               radeon_irq_kms_fini(rdev);
                if (rdev->flags & RADEON_IS_PCIE)
                        rv370_pcie_gart_fini(rdev);
                if (rdev->flags & RADEON_IS_PCI)
                        r100_pci_gart_fini(rdev);
                radeon_agp_fini(rdev);
-               radeon_irq_kms_fini(rdev);
                rdev->accel_working = false;
        }
        return 0;
index 9a189072f2b93e09299ff63b1464d32e260ba657..ddf5731eba0d8f294294a7a3bc4674ce2040a52e 100644 (file)
@@ -294,13 +294,12 @@ int r520_init(struct radeon_device *rdev)
        if (r) {
                /* Somethings want wront with the accel init stop accel */
                dev_err(rdev->dev, "Disabling GPU acceleration\n");
-               rv515_suspend(rdev);
                r100_cp_fini(rdev);
                r100_wb_fini(rdev);
                r100_ib_fini(rdev);
+               radeon_irq_kms_fini(rdev);
                rv370_pcie_gart_fini(rdev);
                radeon_agp_fini(rdev);
-               radeon_irq_kms_fini(rdev);
                rdev->accel_working = false;
        }
        return 0;
index 1b6d0001b20e86fad42fa10bd2b6483200088397..a1198d99cdf97ac7f99ea26f6a73a583e3859eb0 100644 (file)
@@ -1654,6 +1654,12 @@ void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
        rdev->cp.align_mask = 16 - 1;
 }
 
+void r600_cp_fini(struct radeon_device *rdev)
+{
+       r600_cp_stop(rdev);
+       radeon_ring_fini(rdev);
+}
+
 
 /*
  * GPU scratch registers helpers function.
@@ -1861,6 +1867,12 @@ int r600_startup(struct radeon_device *rdev)
                        return r;
        }
        r600_gpu_init(rdev);
+       r = r600_blit_init(rdev);
+       if (r) {
+               r600_blit_fini(rdev);
+               rdev->asic->copy = NULL;
+               dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+       }
        /* pin copy shader into vram */
        if (rdev->r600_blit.shader_obj) {
                r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
@@ -2045,19 +2057,15 @@ int r600_init(struct radeon_device *rdev)
        r = r600_pcie_gart_init(rdev);
        if (r)
                return r;
-       r = r600_blit_init(rdev);
-       if (r) {
-               r600_blit_fini(rdev);
-               rdev->asic->copy = NULL;
-               dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
-       }
 
        rdev->accel_working = true;
        r = r600_startup(rdev);
        if (r) {
-               r600_suspend(rdev);
+               dev_err(rdev->dev, "disabling GPU acceleration\n");
+               r600_cp_fini(rdev);
                r600_wb_fini(rdev);
-               radeon_ring_fini(rdev);
+               r600_irq_fini(rdev);
+               radeon_irq_kms_fini(rdev);
                r600_pcie_gart_fini(rdev);
                rdev->accel_working = false;
        }
@@ -2083,20 +2091,17 @@ int r600_init(struct radeon_device *rdev)
 
 void r600_fini(struct radeon_device *rdev)
 {
-       /* Suspend operations */
-       r600_suspend(rdev);
-
        r600_audio_fini(rdev);
        r600_blit_fini(rdev);
+       r600_cp_fini(rdev);
+       r600_wb_fini(rdev);
        r600_irq_fini(rdev);
        radeon_irq_kms_fini(rdev);
-       radeon_ring_fini(rdev);
-       r600_wb_fini(rdev);
        r600_pcie_gart_fini(rdev);
+       radeon_agp_fini(rdev);
        radeon_gem_fini(rdev);
        radeon_fence_driver_fini(rdev);
        radeon_clocks_fini(rdev);
-       radeon_agp_fini(rdev);
        radeon_bo_fini(rdev);
        radeon_atombios_fini(rdev);
        kfree(rdev->bios);
@@ -2900,3 +2905,18 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev)
        return 0;
 #endif
 }
+
+/**
+ * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
+ * rdev: radeon device structure
+ * bo: buffer object struct which userspace is waiting for idle
+ *
+ * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
+ * through ring buffer, this leads to corruption in rendering, see
+ * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
+ * directly perform HDP flush by writing register through MMIO.
+ */
+void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
+{
+       WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+}
index 99e2c3891a7dea017c681f136b52e15dfda2ec6c..b1c1d343345464927942c4926338ddc300765935 100644 (file)
@@ -35,7 +35,7 @@
  */
 static int r600_audio_chipset_supported(struct radeon_device *rdev)
 {
-       return rdev->family >= CHIP_R600
+       return (rdev->family >= CHIP_R600 && rdev->family < CHIP_RV710)
                || rdev->family == CHIP_RS600
                || rdev->family == CHIP_RS690
                || rdev->family == CHIP_RS740;
index 2d5f2bfa72016ccccacec110019b59dd9cc51ed2..f57480ba135521f7516790fc1e25d592d69f589a 100644 (file)
@@ -661,6 +661,13 @@ struct radeon_asic {
        void (*hpd_fini)(struct radeon_device *rdev);
        bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
        void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+       /* ioctl hw specific callback. Some hw might want to perform special
+        * operation on specific ioctl. For instance on wait idle some hw
+        * might want to perform and HDP flush through MMIO as it seems that
+        * some R6XX/R7XX hw doesn't take HDP flush into account if programmed
+        * through ring.
+        */
+       void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
 };
 
 /*
@@ -1143,6 +1150,7 @@ extern bool r600_card_posted(struct radeon_device *rdev);
 extern void r600_cp_stop(struct radeon_device *rdev);
 extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
 extern int r600_cp_resume(struct radeon_device *rdev);
+extern void r600_cp_fini(struct radeon_device *rdev);
 extern int r600_count_pipe_bits(uint32_t val);
 extern int r600_gart_clear_page(struct radeon_device *rdev, int i);
 extern int r600_mc_wait_for_idle(struct radeon_device *rdev);
index f2fbd2e4e9df59479660f26811d04a92b8955b95..05ee1aeac3fdce21cd72f3735c156be2c0ab2198 100644 (file)
@@ -117,6 +117,7 @@ static struct radeon_asic r100_asic = {
        .hpd_fini = &r100_hpd_fini,
        .hpd_sense = &r100_hpd_sense,
        .hpd_set_polarity = &r100_hpd_set_polarity,
+       .ioctl_wait_idle = NULL,
 };
 
 
@@ -176,6 +177,7 @@ static struct radeon_asic r300_asic = {
        .hpd_fini = &r100_hpd_fini,
        .hpd_sense = &r100_hpd_sense,
        .hpd_set_polarity = &r100_hpd_set_polarity,
+       .ioctl_wait_idle = NULL,
 };
 
 /*
@@ -219,6 +221,7 @@ static struct radeon_asic r420_asic = {
        .hpd_fini = &r100_hpd_fini,
        .hpd_sense = &r100_hpd_sense,
        .hpd_set_polarity = &r100_hpd_set_polarity,
+       .ioctl_wait_idle = NULL,
 };
 
 
@@ -267,6 +270,7 @@ static struct radeon_asic rs400_asic = {
        .hpd_fini = &r100_hpd_fini,
        .hpd_sense = &r100_hpd_sense,
        .hpd_set_polarity = &r100_hpd_set_polarity,
+       .ioctl_wait_idle = NULL,
 };
 
 
@@ -323,6 +327,7 @@ static struct radeon_asic rs600_asic = {
        .hpd_fini = &rs600_hpd_fini,
        .hpd_sense = &rs600_hpd_sense,
        .hpd_set_polarity = &rs600_hpd_set_polarity,
+       .ioctl_wait_idle = NULL,
 };
 
 
@@ -370,6 +375,7 @@ static struct radeon_asic rs690_asic = {
        .hpd_fini = &rs600_hpd_fini,
        .hpd_sense = &rs600_hpd_sense,
        .hpd_set_polarity = &rs600_hpd_set_polarity,
+       .ioctl_wait_idle = NULL,
 };
 
 
@@ -421,6 +427,7 @@ static struct radeon_asic rv515_asic = {
        .hpd_fini = &rs600_hpd_fini,
        .hpd_sense = &rs600_hpd_sense,
        .hpd_set_polarity = &rs600_hpd_set_polarity,
+       .ioctl_wait_idle = NULL,
 };
 
 
@@ -463,6 +470,7 @@ static struct radeon_asic r520_asic = {
        .hpd_fini = &rs600_hpd_fini,
        .hpd_sense = &rs600_hpd_sense,
        .hpd_set_polarity = &rs600_hpd_set_polarity,
+       .ioctl_wait_idle = NULL,
 };
 
 /*
@@ -504,6 +512,7 @@ void r600_hpd_fini(struct radeon_device *rdev);
 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
 void r600_hpd_set_polarity(struct radeon_device *rdev,
                           enum radeon_hpd_id hpd);
+extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
 
 static struct radeon_asic r600_asic = {
        .init = &r600_init,
@@ -538,6 +547,7 @@ static struct radeon_asic r600_asic = {
        .hpd_fini = &r600_hpd_fini,
        .hpd_sense = &r600_hpd_sense,
        .hpd_set_polarity = &r600_hpd_set_polarity,
+       .ioctl_wait_idle = r600_ioctl_wait_idle,
 };
 
 /*
@@ -582,6 +592,7 @@ static struct radeon_asic rv770_asic = {
        .hpd_fini = &r600_hpd_fini,
        .hpd_sense = &r600_hpd_sense,
        .hpd_set_polarity = &r600_hpd_set_polarity,
+       .ioctl_wait_idle = r600_ioctl_wait_idle,
 };
 
 #endif
index 579c8920e08144b4f6eea6311e706d0aa921ca5b..e7b19440102efc61f5ca99701840c7b0c0e15573 100644 (file)
@@ -971,8 +971,7 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
                         lvds->native_mode.vdisplay);
 
                lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c);
-               if (lvds->panel_vcc_delay > 2000 || lvds->panel_vcc_delay < 0)
-                       lvds->panel_vcc_delay = 2000;
+               lvds->panel_vcc_delay = min_t(u16, lvds->panel_vcc_delay, 2000);
 
                lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24);
                lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf;
index 55266416fa478821db5f1b6360e285aa8138ca1c..2d8e5a70f284dac43baec6f59e7f4fef8d7ba152 100644 (file)
@@ -1343,7 +1343,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
                                radeon_connector->dac_load_detect = false;
                        drm_connector_attach_property(&radeon_connector->base,
                                                      rdev->mode_info.load_detect_property,
-                                                     1);
+                                                     radeon_connector->dac_load_detect);
                        drm_connector_attach_property(&radeon_connector->base,
                                                      rdev->mode_info.tv_std_property,
                                                      radeon_combios_get_tv_info(rdev));
index 0e1325e1853464d4e46552a2a6cfd523d406e4e6..db8e9a355a01624c32c16bb3ac002b90c97277b3 100644 (file)
@@ -308,6 +308,9 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
        }
        robj = gobj->driver_private;
        r = radeon_bo_wait(robj, NULL, false);
+       /* callback hw specific functions if any */
+       if (robj->rdev->asic->ioctl_wait_idle)
+               robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
        mutex_lock(&dev->struct_mutex);
        drm_gem_object_unreference(gobj);
        mutex_unlock(&dev->struct_mutex);
index 9f5418983e2a5f30af108287aaa18f19d6bc70b3..287fcebfb4e67733ff27aef939380b5b92f3a742 100644 (file)
@@ -223,15 +223,31 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
        return 0;
 }
 
+int rs400_mc_wait_for_idle(struct radeon_device *rdev)
+{
+       unsigned i;
+       uint32_t tmp;
+
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               /* read MC_STATUS */
+               tmp = RREG32(0x0150);
+               if (tmp & (1 << 2)) {
+                       return 0;
+               }
+               DRM_UDELAY(1);
+       }
+       return -1;
+}
+
 void rs400_gpu_init(struct radeon_device *rdev)
 {
        /* FIXME: HDP same place on rs400 ? */
        r100_hdp_reset(rdev);
        /* FIXME: is this correct ? */
        r420_pipes_init(rdev);
-       if (r300_mc_wait_for_idle(rdev)) {
-               printk(KERN_WARNING "Failed to wait MC idle while "
-                      "programming pipes. Bad things might happen.\n");
+       if (rs400_mc_wait_for_idle(rdev)) {
+               printk(KERN_WARNING "rs400: Failed to wait MC idle while "
+                      "programming pipes. Bad things might happen. %08x\n", RREG32(0x150));
        }
 }
 
@@ -370,8 +386,8 @@ void rs400_mc_program(struct radeon_device *rdev)
        r100_mc_stop(rdev, &save);
 
        /* Wait for mc idle */
-       if (r300_mc_wait_for_idle(rdev))
-               dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
+       if (rs400_mc_wait_for_idle(rdev))
+               dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n");
        WREG32(R_000148_MC_FB_LOCATION,
                S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
                S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
@@ -448,7 +464,6 @@ int rs400_suspend(struct radeon_device *rdev)
 
 void rs400_fini(struct radeon_device *rdev)
 {
-       rs400_suspend(rdev);
        r100_cp_fini(rdev);
        r100_wb_fini(rdev);
        r100_ib_fini(rdev);
@@ -527,7 +542,6 @@ int rs400_init(struct radeon_device *rdev)
        if (r) {
                /* Somethings want wront with the accel init stop accel */
                dev_err(rdev->dev, "Disabling GPU acceleration\n");
-               rs400_suspend(rdev);
                r100_cp_fini(rdev);
                r100_wb_fini(rdev);
                r100_ib_fini(rdev);
index d5255751e7b365b5c987561f9f3bebb74e87a441..c3818562a13eb64f647ec74922605bf673bdf0e6 100644 (file)
@@ -610,7 +610,6 @@ int rs600_suspend(struct radeon_device *rdev)
 
 void rs600_fini(struct radeon_device *rdev)
 {
-       rs600_suspend(rdev);
        r100_cp_fini(rdev);
        r100_wb_fini(rdev);
        r100_ib_fini(rdev);
@@ -689,7 +688,6 @@ int rs600_init(struct radeon_device *rdev)
        if (r) {
                /* Somethings want wront with the accel init stop accel */
                dev_err(rdev->dev, "Disabling GPU acceleration\n");
-               rs600_suspend(rdev);
                r100_cp_fini(rdev);
                r100_wb_fini(rdev);
                r100_ib_fini(rdev);
index cd31da913771d898c3d68946f0f8f3399becf382..06e2771aee5a8443b94e44e80da3c3b7928590b5 100644 (file)
@@ -676,7 +676,6 @@ int rs690_suspend(struct radeon_device *rdev)
 
 void rs690_fini(struct radeon_device *rdev)
 {
-       rs690_suspend(rdev);
        r100_cp_fini(rdev);
        r100_wb_fini(rdev);
        r100_ib_fini(rdev);
@@ -756,7 +755,6 @@ int rs690_init(struct radeon_device *rdev)
        if (r) {
                /* Somethings want wront with the accel init stop accel */
                dev_err(rdev->dev, "Disabling GPU acceleration\n");
-               rs690_suspend(rdev);
                r100_cp_fini(rdev);
                r100_wb_fini(rdev);
                r100_ib_fini(rdev);
index 62756717b0449bdbc3be45ec40f4cd2bc99e1058..0e1e6b8632b86c0e1b6d9665f831f684c11c1dcc 100644 (file)
@@ -537,7 +537,6 @@ void rv515_set_safe_registers(struct radeon_device *rdev)
 
 void rv515_fini(struct radeon_device *rdev)
 {
-       rv515_suspend(rdev);
        r100_cp_fini(rdev);
        r100_wb_fini(rdev);
        r100_ib_fini(rdev);
@@ -615,13 +614,12 @@ int rv515_init(struct radeon_device *rdev)
        if (r) {
                /* Somethings want wront with the accel init stop accel */
                dev_err(rdev->dev, "Disabling GPU acceleration\n");
-               rv515_suspend(rdev);
                r100_cp_fini(rdev);
                r100_wb_fini(rdev);
                r100_ib_fini(rdev);
+               radeon_irq_kms_fini(rdev);
                rv370_pcie_gart_fini(rdev);
                radeon_agp_fini(rdev);
-               radeon_irq_kms_fini(rdev);
                rdev->accel_working = false;
        }
        return 0;
index afd9e8213c297adbe1ba5467bbf3b1e58b794708..5943d561fd1e2685803175c176c0624912ff9b99 100644 (file)
@@ -887,6 +887,12 @@ static int rv770_startup(struct radeon_device *rdev)
                        return r;
        }
        rv770_gpu_init(rdev);
+       r = r600_blit_init(rdev);
+       if (r) {
+               r600_blit_fini(rdev);
+               rdev->asic->copy = NULL;
+               dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+       }
        /* pin copy shader into vram */
        if (rdev->r600_blit.shader_obj) {
                r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
@@ -1055,19 +1061,15 @@ int rv770_init(struct radeon_device *rdev)
        r = r600_pcie_gart_init(rdev);
        if (r)
                return r;
-       r = r600_blit_init(rdev);
-       if (r) {
-               r600_blit_fini(rdev);
-               rdev->asic->copy = NULL;
-               dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
-       }
 
        rdev->accel_working = true;
        r = rv770_startup(rdev);
        if (r) {
-               rv770_suspend(rdev);
+               dev_err(rdev->dev, "disabling GPU acceleration\n");
+               r600_cp_fini(rdev);
                r600_wb_fini(rdev);
-               radeon_ring_fini(rdev);
+               r600_irq_fini(rdev);
+               radeon_irq_kms_fini(rdev);
                rv770_pcie_gart_fini(rdev);
                rdev->accel_working = false;
        }
@@ -1089,13 +1091,11 @@ int rv770_init(struct radeon_device *rdev)
 
 void rv770_fini(struct radeon_device *rdev)
 {
-       rv770_suspend(rdev);
-
        r600_blit_fini(rdev);
+       r600_cp_fini(rdev);
+       r600_wb_fini(rdev);
        r600_irq_fini(rdev);
        radeon_irq_kms_fini(rdev);
-       radeon_ring_fini(rdev);
-       r600_wb_fini(rdev);
        rv770_pcie_gart_fini(rdev);
        radeon_gem_fini(rdev);
        radeon_fence_driver_fini(rdev);
index a31e77c776aeee3d0f14409710e5439de44f605b..b8156b4893bb51f107b83da463a5906f01641553 100644 (file)
@@ -179,7 +179,7 @@ static const unsigned short normal_i2c[] = { 0x58, 0x5C, I2C_CLIENT_END };
  *
  * Some, but not all, of these voltages have low/high limits.
  */
-#define ADT7462_VOLT_COUNT     12
+#define ADT7462_VOLT_COUNT     13
 
 #define ADT7462_VENDOR         0x41
 #define ADT7462_DEVICE         0x62
index cadcbd90ff3bc4f0c89db6cdd9c9da82becc46f1..72ff2c4e757d5255fd4c90daf72ec6f39c39745b 100644 (file)
@@ -851,17 +851,16 @@ static struct lm78_data *lm78_update_device(struct device *dev)
 static int __init lm78_isa_found(unsigned short address)
 {
        int val, save, found = 0;
-
-       /* We have to request the region in two parts because some
-          boards declare base+4 to base+7 as a PNP device */
-       if (!request_region(address, 4, "lm78")) {
-               pr_debug("lm78: Failed to request low part of region\n");
-               return 0;
-       }
-       if (!request_region(address + 4, 4, "lm78")) {
-               pr_debug("lm78: Failed to request high part of region\n");
-               release_region(address, 4);
-               return 0;
+       int port;
+
+       /* Some boards declare base+0 to base+7 as a PNP device, some base+4
+        * to base+7 and some base+5 to base+6. So we better request each port
+        * individually for the probing phase. */
+       for (port = address; port < address + LM78_EXTENT; port++) {
+               if (!request_region(port, 1, "lm78")) {
+                       pr_debug("lm78: Failed to request port 0x%x\n", port);
+                       goto release;
+               }
        }
 
 #define REALLY_SLOW_IO
@@ -925,8 +924,8 @@ static int __init lm78_isa_found(unsigned short address)
                        val & 0x80 ? "LM79" : "LM78", (int)address);
 
  release:
-       release_region(address + 4, 4);
-       release_region(address, 4);
+       for (port--; port >= address; port--)
+               release_region(port, 1);
        return found;
 }
 
index 05f9225b6f944dd7007c752eb9c15786f41c6740..32d4adee73db650fc5ba2b2cc42586b12b642cd3 100644 (file)
@@ -1793,17 +1793,17 @@ static int __init
 w83781d_isa_found(unsigned short address)
 {
        int val, save, found = 0;
-
-       /* We have to request the region in two parts because some
-          boards declare base+4 to base+7 as a PNP device */
-       if (!request_region(address, 4, "w83781d")) {
-               pr_debug("w83781d: Failed to request low part of region\n");
-               return 0;
-       }
-       if (!request_region(address + 4, 4, "w83781d")) {
-               pr_debug("w83781d: Failed to request high part of region\n");
-               release_region(address, 4);
-               return 0;
+       int port;
+
+       /* Some boards declare base+0 to base+7 as a PNP device, some base+4
+        * to base+7 and some base+5 to base+6. So we better request each port
+        * individually for the probing phase. */
+       for (port = address; port < address + W83781D_EXTENT; port++) {
+               if (!request_region(port, 1, "w83781d")) {
+                       pr_debug("w83781d: Failed to request port 0x%x\n",
+                                port);
+                       goto release;
+               }
        }
 
 #define REALLY_SLOW_IO
@@ -1877,8 +1877,8 @@ w83781d_isa_found(unsigned short address)
                        val == 0x30 ? "W83782D" : "W83781D", (int)address);
 
  release:
-       release_region(address + 4, 4);
-       release_region(address, 4);
+       for (port--; port >= address; port--)
+               release_region(port, 1);
        return found;
 }
 
index dd3dfe42d5a9d081c590018f1ac6325cfecdaf12..a20a71e5efd3ce0607993a84795e943a1706359d 100644 (file)
@@ -4075,8 +4075,10 @@ static void mddev_delayed_delete(struct work_struct *ws)
 {
        mddev_t *mddev = container_of(ws, mddev_t, del_work);
 
-       if (mddev->private == &md_redundancy_group) {
+       if (mddev->private) {
                sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
+               if (mddev->private != (void*)1)
+                       sysfs_remove_group(&mddev->kobj, mddev->private);
                if (mddev->sysfs_action)
                        sysfs_put(mddev->sysfs_action);
                mddev->sysfs_action = NULL;
@@ -4287,10 +4289,7 @@ static int do_md_run(mddev_t * mddev)
                sysfs_notify_dirent(rdev->sysfs_state);
        }
 
-       md_probe(mddev->unit, NULL, NULL);
        disk = mddev->gendisk;
-       if (!disk)
-               return -ENOMEM;
 
        spin_lock(&pers_lock);
        pers = find_pers(mddev->level, mddev->clevel);
@@ -4530,8 +4529,8 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
                        mddev->queue->unplug_fn = NULL;
                        mddev->queue->backing_dev_info.congested_fn = NULL;
                        module_put(mddev->pers->owner);
-                       if (mddev->pers->sync_request)
-                               mddev->private = &md_redundancy_group;
+                       if (mddev->pers->sync_request && mddev->private == NULL)
+                               mddev->private = (void*)1;
                        mddev->pers = NULL;
                        /* tell userspace to handle 'inactive' */
                        sysfs_notify_dirent(mddev->sysfs_state);
@@ -4578,9 +4577,6 @@ out:
                }
                mddev->bitmap_info.offset = 0;
 
-               /* make sure all md_delayed_delete calls have finished */
-               flush_scheduled_work();
-
                export_array(mddev);
 
                mddev->array_sectors = 0;
index e84204eb12dff9c87daaf8ecbdba4462eda8971f..ceb24afdc147aada31741b10213443375617257f 100644 (file)
@@ -5136,9 +5136,8 @@ static int stop(mddev_t *mddev)
        mddev->thread = NULL;
        mddev->queue->backing_dev_info.congested_fn = NULL;
        blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
-       sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
        free_conf(conf);
-       mddev->private = NULL;
+       mddev->private = &raid5_attrs_group;
        return 0;
 }
 
@@ -5464,11 +5463,11 @@ static int raid5_start_reshape(mddev_t *mddev)
                    !test_bit(Faulty, &rdev->flags)) {
                        if (raid5_add_disk(mddev, rdev) == 0) {
                                char nm[20];
-                               if (rdev->raid_disk >= conf->previous_raid_disks)
+                               if (rdev->raid_disk >= conf->previous_raid_disks) {
                                        set_bit(In_sync, &rdev->flags);
-                               else
+                                       added_devices++;
+                               } else
                                        rdev->recovery_offset = 0;
-                               added_devices++;
                                sprintf(nm, "rd%d", rdev->raid_disk);
                                if (sysfs_create_link(&mddev->kobj,
                                                      &rdev->kobj, nm))
@@ -5480,9 +5479,12 @@ static int raid5_start_reshape(mddev_t *mddev)
                                break;
                }
 
+       /* When a reshape changes the number of devices, ->degraded
+        * is measured against the large of the pre and post number of
+        * devices.*/
        if (mddev->delta_disks > 0) {
                spin_lock_irqsave(&conf->device_lock, flags);
-               mddev->degraded = (conf->raid_disks - conf->previous_raid_disks)
+               mddev->degraded += (conf->raid_disks - conf->previous_raid_disks)
                        - added_devices;
                spin_unlock_irqrestore(&conf->device_lock, flags);
        }
index c37790ad92d0b501e470d4c0a6de0a1713fc592a..9ddc57909d492199097bdb184f2184dff7c1fbde 100644 (file)
@@ -761,7 +761,6 @@ static int dvb_demux_open(struct inode *inode, struct file *file)
        dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192);
        dmxdevfilter->type = DMXDEV_TYPE_NONE;
        dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
-       INIT_LIST_HEAD(&dmxdevfilter->feed.ts);
        init_timer(&dmxdevfilter->timer);
 
        dvbdev->users++;
@@ -887,6 +886,7 @@ static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev,
        dmxdevfilter->type = DMXDEV_TYPE_PES;
        memcpy(&dmxdevfilter->params, params,
               sizeof(struct dmx_pes_filter_params));
+       INIT_LIST_HEAD(&dmxdevfilter->feed.ts);
 
        dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
 
index b78cfb7d1897dcb956508f034cdfffb1e5af3174..67f189b7aa1ffb79706d9bcd8fb10cd250dcd8c9 100644 (file)
@@ -426,16 +426,7 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
                };
        };
 
-       if (dvb_demux_tscheck) {
-               if (!demux->cnt_storage)
-                       demux->cnt_storage = vmalloc(MAX_PID + 1);
-
-               if (!demux->cnt_storage) {
-                       printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n");
-                       dvb_demux_tscheck = 0;
-                       goto no_dvb_demux_tscheck;
-               }
-
+       if (demux->cnt_storage) {
                /* check pkt counter */
                if (pid < MAX_PID) {
                        if (buf[1] & 0x80)
@@ -454,7 +445,6 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
                };
                /* end check */
        };
-no_dvb_demux_tscheck:
 
        list_for_each_entry(feed, &demux->feed_list, list_head) {
                if ((feed->pid != pid) && (feed->pid != 0x2000))
@@ -1246,6 +1236,7 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux)
        dvbdemux->feed = vmalloc(dvbdemux->feednum * sizeof(struct dvb_demux_feed));
        if (!dvbdemux->feed) {
                vfree(dvbdemux->filter);
+               dvbdemux->filter = NULL;
                return -ENOMEM;
        }
        for (i = 0; i < dvbdemux->filternum; i++) {
@@ -1257,6 +1248,13 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux)
                dvbdemux->feed[i].index = i;
        }
 
+       if (dvb_demux_tscheck) {
+               dvbdemux->cnt_storage = vmalloc(MAX_PID + 1);
+
+               if (!dvbdemux->cnt_storage)
+                       printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n");
+       }
+
        INIT_LIST_HEAD(&dvbdemux->frontend_list);
 
        for (i = 0; i < DMX_TS_PES_OTHER; i++) {
index 62d9c9cc5671d6a7ae4bd7de66806c018e599629..1dd4403247ca02d972fdd0f9e2a3651c862a5ce7 100644 (file)
@@ -921,7 +921,7 @@ static int ax_probe(struct platform_device *pdev)
                size = (res->end - res->start) + 1;
 
                ax->mem2 = request_mem_region(res->start, size, pdev->name);
-               if (ax->mem == NULL) {
+               if (ax->mem2 == NULL) {
                        dev_err(&pdev->dev, "cannot reserve registers\n");
                        ret = -ENXIO;
                        goto exit_mem1;
index bdbd14727e4b232987632fb19f114510e6068837..318a018ca7c58dda1b75cef9de3d8165995ebd12 100644 (file)
@@ -2079,6 +2079,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
                         struct sge_fl *fl, int len, int complete)
 {
        struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
+       struct port_info *pi = netdev_priv(qs->netdev);
        struct sk_buff *skb = NULL;
        struct cpl_rx_pkt *cpl;
        struct skb_frag_struct *rx_frag;
@@ -2116,11 +2117,18 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
 
        if (!nr_frags) {
                offset = 2 + sizeof(struct cpl_rx_pkt);
-               qs->lro_va = sd->pg_chunk.va + 2;
-       }
-       len -= offset;
+               cpl = qs->lro_va = sd->pg_chunk.va + 2;
 
-       prefetch(qs->lro_va);
+               if ((pi->rx_offload & T3_RX_CSUM) &&
+                    cpl->csum_valid && cpl->csum == htons(0xffff)) {
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+                       qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
+               } else
+                       skb->ip_summed = CHECKSUM_NONE;
+       } else
+               cpl = qs->lro_va;
+
+       len -= offset;
 
        rx_frag += nr_frags;
        rx_frag->page = sd->pg_chunk.page;
@@ -2136,12 +2144,8 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
                return;
 
        skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
-       skb->ip_summed = CHECKSUM_UNNECESSARY;
-       cpl = qs->lro_va;
 
        if (unlikely(cpl->vlan_valid)) {
-               struct net_device *dev = qs->netdev;
-               struct port_info *pi = netdev_priv(dev);
                struct vlan_group *grp = pi->vlan_grp;
 
                if (likely(grp != NULL)) {
index 997124d2992a38e7817bc2ed7a97815e5fd7fe25..c881347cb26dfc1117dee51506e0e1104593a4b1 100644 (file)
@@ -421,6 +421,8 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
                        msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
                if (tx_queue > IGB_N0_QUEUE)
                        msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
+               if (!adapter->msix_entries && msix_vector == 0)
+                       msixbm |= E1000_EIMS_OTHER;
                array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
                q_vector->eims_value = msixbm;
                break;
@@ -877,7 +879,6 @@ static int igb_request_irq(struct igb_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
        struct pci_dev *pdev = adapter->pdev;
-       struct e1000_hw *hw = &adapter->hw;
        int err = 0;
 
        if (adapter->msix_entries) {
@@ -909,20 +910,7 @@ static int igb_request_irq(struct igb_adapter *adapter)
                igb_setup_all_tx_resources(adapter);
                igb_setup_all_rx_resources(adapter);
        } else {
-               switch (hw->mac.type) {
-               case e1000_82575:
-                       wr32(E1000_MSIXBM(0),
-                            (E1000_EICR_RX_QUEUE0 |
-                             E1000_EICR_TX_QUEUE0 |
-                             E1000_EIMS_OTHER));
-                       break;
-               case e1000_82580:
-               case e1000_82576:
-                       wr32(E1000_IVAR0, E1000_IVAR_VALID);
-                       break;
-               default:
-                       break;
-               }
+               igb_assign_vector(adapter->q_vector[0], 0);
        }
 
        if (adapter->flags & IGB_FLAG_HAS_MSI) {
@@ -1140,6 +1128,8 @@ int igb_up(struct igb_adapter *adapter)
        }
        if (adapter->msix_entries)
                igb_configure_msix(adapter);
+       else
+               igb_assign_vector(adapter->q_vector[0], 0);
 
        /* Clear any pending interrupts. */
        rd32(E1000_ICR);
index b5f64ad6797510fe872a2f62e9254b310d2a1ff4..7b7c8486c0bf8dee604ea17949a1d945abeabf9c 100644 (file)
@@ -5179,7 +5179,7 @@ dma_error:
                ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
        }
 
-       return count;
+       return 0;
 }
 
 static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
@@ -5329,8 +5329,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
        struct ixgbe_adapter *adapter = netdev_priv(dev);
        int txq = smp_processor_id();
 
-       if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
+       if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
+               while (unlikely(txq >= dev->real_num_tx_queues))
+                       txq -= dev->real_num_tx_queues;
                return txq;
+       }
 
 #ifdef IXGBE_FCOE
        if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
index 9f9d6081959b6f64ebfebba1384ec353f175ed64..24279e6e55f5046711a938cfeab782624d11718b 100644 (file)
@@ -1941,7 +1941,7 @@ static void netxen_tx_timeout_task(struct work_struct *work)
                netif_wake_queue(adapter->netdev);
 
                clear_bit(__NX_RESETTING, &adapter->state);
-
+               return;
        } else {
                clear_bit(__NX_RESETTING, &adapter->state);
                if (!netxen_nic_reset_context(adapter)) {
@@ -2240,7 +2240,9 @@ netxen_detach_work(struct work_struct *work)
 
        netxen_nic_down(adapter, netdev);
 
+       rtnl_lock();
        netxen_nic_detach(adapter);
+       rtnl_unlock();
 
        status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1);
 
index d760650c5c04678df33fb54edb0aa404eb002db4..67249c3c9f5046a471f1790745b2b0ccc83fb0c9 100644 (file)
@@ -1025,11 +1025,8 @@ static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr,
 static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot)
 {
        struct sky2_tx_le *le = sky2->tx_le + *slot;
-       struct tx_ring_info *re = sky2->tx_ring + *slot;
 
        *slot = RING_NEXT(*slot, sky2->tx_ring_size);
-       re->flags = 0;
-       re->skb = NULL;
        le->ctrl = 0;
        return le;
 }
@@ -1622,8 +1619,7 @@ static unsigned tx_le_req(const struct sk_buff *skb)
        return count;
 }
 
-static void sky2_tx_unmap(struct pci_dev *pdev,
-                         const struct tx_ring_info *re)
+static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re)
 {
        if (re->flags & TX_MAP_SINGLE)
                pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr),
@@ -1633,6 +1629,7 @@ static void sky2_tx_unmap(struct pci_dev *pdev,
                pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr),
                               pci_unmap_len(re, maplen),
                               PCI_DMA_TODEVICE);
+       re->flags = 0;
 }
 
 /*
@@ -1839,6 +1836,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
                        dev->stats.tx_packets++;
                        dev->stats.tx_bytes += skb->len;
 
+                       re->skb = NULL;
                        dev_kfree_skb_any(skb);
 
                        sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size);
index c74694345b6ea6321fe643c2720c86812220f363..d58b94030ef38ad8295388dc5fae6cf000fab0d7 100644 (file)
@@ -338,6 +338,23 @@ static void __devinit quirk_s3_64M(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3,     PCI_DEVICE_ID_S3_868,           quirk_s3_64M);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3,     PCI_DEVICE_ID_S3_968,           quirk_s3_64M);
 
+/*
+ * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS
+ * ver. 1.33  20070103) don't set the correct ISA PCI region header info.
+ * BAR0 should be 8 bytes; instead, it may be set to something like 8k
+ * (which conflicts w/ BAR1's memory range).
+ */
+static void __devinit quirk_cs5536_vsa(struct pci_dev *dev)
+{
+       if (pci_resource_len(dev, 0) != 8) {
+               struct resource *res = &dev->resource[0];
+               res->end = res->start + 8 - 1;
+               dev_info(&dev->dev, "CS5536 ISA bridge bug detected "
+                               "(incorrect header); workaround applied.\n");
+       }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa);
+
 static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region,
        unsigned size, int nr, const char *name)
 {
index 999fe80c40516983d68a6435b5718c5433af1737..62b654af9237f17fd6aa2fb970072a366116e74e 100644 (file)
@@ -531,7 +531,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
        qdio_siga_sync_q(q);
        get_buf_state(q, q->first_to_check, &state, 0);
 
-       if (state == SLSB_P_INPUT_PRIMED)
+       if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
                /* more work coming */
                return 0;
 
@@ -960,6 +960,8 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
                        qdio_handle_activate_check(cdev, intparm, cstat,
                                                   dstat);
                break;
+       case QDIO_IRQ_STATE_STOPPED:
+               break;
        default:
                WARN_ON(1);
        }
index 0ceec123ddfd09928c86cc55fca0ef522937afae..bee558aed42778ce153b5a1db9c4c55376575431 100644 (file)
@@ -35,7 +35,9 @@
 #include <linux/usb.h>
 #include <linux/platform_device.h>
 #include <linux/io.h>
+#include <linux/mm.h>
 #include <linux/irq.h>
+#include <asm/cacheflush.h>
 
 #include "../core/hcd.h"
 #include "r8a66597.h"
@@ -820,6 +822,26 @@ static void enable_r8a66597_pipe(struct r8a66597 *r8a66597, struct urb *urb,
        enable_r8a66597_pipe_dma(r8a66597, dev, pipe, urb);
 }
 
+static void r8a66597_urb_done(struct r8a66597 *r8a66597, struct urb *urb,
+                             int status)
+__releases(r8a66597->lock)
+__acquires(r8a66597->lock)
+{
+       if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) {
+               void *ptr;
+
+               for (ptr = urb->transfer_buffer;
+                    ptr < urb->transfer_buffer + urb->transfer_buffer_length;
+                    ptr += PAGE_SIZE)
+                       flush_dcache_page(virt_to_page(ptr));
+       }
+
+       usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb);
+       spin_unlock(&r8a66597->lock);
+       usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb, status);
+       spin_lock(&r8a66597->lock);
+}
+
 /* this function must be called with interrupt disabled */
 static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address)
 {
@@ -838,15 +860,9 @@ static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address)
                list_del(&td->queue);
                kfree(td);
 
-               if (urb) {
-                       usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597),
-                                       urb);
+               if (urb)
+                       r8a66597_urb_done(r8a66597, urb, -ENODEV);
 
-                       spin_unlock(&r8a66597->lock);
-                       usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb,
-                                       -ENODEV);
-                       spin_lock(&r8a66597->lock);
-               }
                break;
        }
 }
@@ -1006,6 +1022,8 @@ static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port,
 /* this function must be called with interrupt disabled */
 static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port,
                                        u16 syssts)
+__releases(r8a66597->lock)
+__acquires(r8a66597->lock)
 {
        if (syssts == SE0) {
                r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port));
@@ -1023,7 +1041,9 @@ static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port,
                        usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597));
        }
 
+       spin_unlock(&r8a66597->lock);
        usb_hcd_poll_rh_status(r8a66597_to_hcd(r8a66597));
+       spin_lock(&r8a66597->lock);
 }
 
 /* this function must be called with interrupt disabled */
@@ -1283,10 +1303,7 @@ __releases(r8a66597->lock) __acquires(r8a66597->lock)
                if (usb_pipeisoc(urb->pipe))
                        urb->start_frame = r8a66597_get_frame(hcd);
 
-               usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb);
-               spin_unlock(&r8a66597->lock);
-               usb_hcd_giveback_urb(hcd, urb, status);
-               spin_lock(&r8a66597->lock);
+               r8a66597_urb_done(r8a66597, urb, status);
        }
 
        if (restart) {
index cf62b05e296a67af103178e0b86e673cb1d16031..7d6c2139891db0d04673c6078d6a3f85c446dbb6 100644 (file)
@@ -84,7 +84,7 @@ static const match_table_t tokens = {
 
 static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
 {
-       char *options;
+       char *options, *tmp_options;
        substring_t args[MAX_OPT_ARGS];
        char *p;
        int option = 0;
@@ -102,9 +102,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
        if (!opts)
                return 0;
 
-       options = kstrdup(opts, GFP_KERNEL);
-       if (!options)
+       tmp_options = kstrdup(opts, GFP_KERNEL);
+       if (!tmp_options) {
+               ret = -ENOMEM;
                goto fail_option_alloc;
+       }
+       options = tmp_options;
 
        while ((p = strsep(&options, ",")) != NULL) {
                int token;
@@ -159,8 +162,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
                        break;
                case Opt_cache:
                        s = match_strdup(&args[0]);
-                       if (!s)
-                               goto fail_option_alloc;
+                       if (!s) {
+                               ret = -ENOMEM;
+                               P9_DPRINTK(P9_DEBUG_ERROR,
+                                 "problem allocating copy of cache arg\n");
+                               goto free_and_return;
+                       }
 
                        if (strcmp(s, "loose") == 0)
                                v9ses->cache = CACHE_LOOSE;
@@ -173,8 +180,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
 
                case Opt_access:
                        s = match_strdup(&args[0]);
-                       if (!s)
-                               goto fail_option_alloc;
+                       if (!s) {
+                               ret = -ENOMEM;
+                               P9_DPRINTK(P9_DEBUG_ERROR,
+                                 "problem allocating copy of access arg\n");
+                               goto free_and_return;
+                       }
 
                        v9ses->flags &= ~V9FS_ACCESS_MASK;
                        if (strcmp(s, "user") == 0)
@@ -194,13 +205,11 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
                        continue;
                }
        }
-       kfree(options);
-       return ret;
 
+free_and_return:
+       kfree(tmp_options);
 fail_option_alloc:
-       P9_DPRINTK(P9_DEBUG_ERROR,
-                  "failed to allocate copy of option argument\n");
-       return -ENOMEM;
+       return ret;
 }
 
 /**
index 3a7560e358657f059897e6f27aaa3ce4a20c0953..ed835836e0dc35f9eab51e2a86d611d8c5d6808f 100644 (file)
@@ -60,3 +60,4 @@ void v9fs_dentry_release(struct dentry *);
 int v9fs_uflags2omode(int uflags, int extended);
 
 ssize_t v9fs_file_readn(struct file *, char *, char __user *, u32, u64);
+void v9fs_blank_wstat(struct p9_wstat *wstat);
index 3902bf43a0883bfe4b92d384170362e53c910e0d..74a0461a9ac0dfabd95ca360fa2fa58e32d2e779 100644 (file)
@@ -257,6 +257,23 @@ v9fs_file_write(struct file *filp, const char __user * data,
        return total;
 }
 
+static int v9fs_file_fsync(struct file *filp, struct dentry *dentry,
+                                       int datasync)
+{
+       struct p9_fid *fid;
+       struct p9_wstat wstat;
+       int retval;
+
+       P9_DPRINTK(P9_DEBUG_VFS, "filp %p dentry %p datasync %x\n", filp,
+                                               dentry, datasync);
+
+       fid = filp->private_data;
+       v9fs_blank_wstat(&wstat);
+
+       retval = p9_client_wstat(fid, &wstat);
+       return retval;
+}
+
 static const struct file_operations v9fs_cached_file_operations = {
        .llseek = generic_file_llseek,
        .read = do_sync_read,
@@ -266,6 +283,7 @@ static const struct file_operations v9fs_cached_file_operations = {
        .release = v9fs_dir_release,
        .lock = v9fs_file_lock,
        .mmap = generic_file_readonly_mmap,
+       .fsync = v9fs_file_fsync,
 };
 
 const struct file_operations v9fs_file_operations = {
@@ -276,4 +294,5 @@ const struct file_operations v9fs_file_operations = {
        .release = v9fs_dir_release,
        .lock = v9fs_file_lock,
        .mmap = generic_file_readonly_mmap,
+       .fsync = v9fs_file_fsync,
 };
index 9d03d1ebca6ffc226234b9fa354cab806f514998..a407fa3388c0c560a7ff754066dfa9606380bef5 100644 (file)
@@ -176,7 +176,7 @@ int v9fs_uflags2omode(int uflags, int extended)
  *
  */
 
-static void
+void
 v9fs_blank_wstat(struct p9_wstat *wstat)
 {
        wstat->type = ~0;
index 33baf27fac78e5c4fbc36e12e3a4a16b2322afe4..34ddda888e631e8ad338b1271a25efe10e93477d 100644 (file)
@@ -873,6 +873,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
        brelse(bh);
 
       unacquire_priv_sbp:
+       kfree(befs_sb->mount_opts.iocharset);
        kfree(sb->s_fs_info);
 
       unacquire_none:
index 73d6a735b8f311cc62fe5b22c626a7a4baa9c452..d11d0289f3d24bf9489acba6059c5d8ada9eda52 100644 (file)
@@ -246,7 +246,8 @@ struct super_block *freeze_bdev(struct block_device *bdev)
        if (!sb)
                goto out;
        if (sb->s_flags & MS_RDONLY) {
-               deactivate_locked_super(sb);
+               sb->s_frozen = SB_FREEZE_TRANS;
+               up_write(&sb->s_umount);
                mutex_unlock(&bdev->bd_fsfreeze_mutex);
                return sb;
        }
@@ -307,7 +308,7 @@ int thaw_bdev(struct block_device *bdev, struct super_block *sb)
        BUG_ON(sb->s_bdev != bdev);
        down_write(&sb->s_umount);
        if (sb->s_flags & MS_RDONLY)
-               goto out_deactivate;
+               goto out_unfrozen;
 
        if (sb->s_op->unfreeze_fs) {
                error = sb->s_op->unfreeze_fs(sb);
@@ -321,11 +322,11 @@ int thaw_bdev(struct block_device *bdev, struct super_block *sb)
                }
        }
 
+out_unfrozen:
        sb->s_frozen = SB_UNFROZEN;
        smp_wmb();
        wake_up(&sb->s_wait_unfrozen);
 
-out_deactivate:
        if (sb)
                deactivate_locked_super(sb);
 out_unlock:
index 87b25543d7d145fb90b8f986c9f9f124547430b4..2b59201b955ca533bcb10a251df07ff2d7c549d2 100644 (file)
@@ -1982,7 +1982,12 @@ struct btrfs_root *open_ctree(struct super_block *sb,
 
        if (!(sb->s_flags & MS_RDONLY)) {
                ret = btrfs_recover_relocation(tree_root);
-               BUG_ON(ret);
+               if (ret < 0) {
+                       printk(KERN_WARNING
+                              "btrfs: failed to recover relocation\n");
+                       err = -EINVAL;
+                       goto fail_trans_kthread;
+               }
        }
 
        location.objectid = BTRFS_FS_TREE_OBJECTID;
index 432a2da4641ec55d0d2f963b2f9b276beb61aba1..559f72489b3bf02b4477369da854bf371cbbd0e4 100644 (file)
@@ -5402,10 +5402,6 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
        int ret;
 
        while (level >= 0) {
-               if (path->slots[level] >=
-                   btrfs_header_nritems(path->nodes[level]))
-                       break;
-
                ret = walk_down_proc(trans, root, path, wc, lookup_info);
                if (ret > 0)
                        break;
@@ -5413,6 +5409,10 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
                if (level == 0)
                        break;
 
+               if (path->slots[level] >=
+                   btrfs_header_nritems(path->nodes[level]))
+                       break;
+
                ret = do_walk_down(trans, root, path, wc, &lookup_info);
                if (ret > 0) {
                        path->slots[level]++;
index 96577e8bf9fdb62819ab2dbd5f9da91200624596..b177ed3196126d9fe8e5d0f1507075a421e535b9 100644 (file)
@@ -3165,10 +3165,9 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
                spin_unlock(&tree->buffer_lock);
                goto free_eb;
        }
-       spin_unlock(&tree->buffer_lock);
-
        /* add one reference for the tree */
        atomic_inc(&eb->refs);
+       spin_unlock(&tree->buffer_lock);
        return eb;
 
 free_eb:
index c02033596f02237aaeaabb2795960c2b29bd0e7b..9d08096299675548651069e4aa613712b827ba67 100644 (file)
@@ -1133,7 +1133,7 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
        }
        mutex_lock(&dentry->d_inode->i_mutex);
 out:
-       return ret > 0 ? EIO : ret;
+       return ret > 0 ? -EIO : ret;
 }
 
 static const struct vm_operations_struct btrfs_file_vm_ops = {
index 8cd109972fa6ba48d0a0909eb0362ccd96f85c4e..4deb280f8969b78e373257d394106895d0ea82a6 100644 (file)
@@ -1681,24 +1681,6 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
  * before we start the transaction.  It limits the amount of btree
  * reads required while inside the transaction.
  */
-static noinline void reada_csum(struct btrfs_root *root,
-                               struct btrfs_path *path,
-                               struct btrfs_ordered_extent *ordered_extent)
-{
-       struct btrfs_ordered_sum *sum;
-       u64 bytenr;
-
-       sum = list_entry(ordered_extent->list.next, struct btrfs_ordered_sum,
-                        list);
-       bytenr = sum->sums[0].bytenr;
-
-       /*
-        * we don't care about the results, the point of this search is
-        * just to get the btree leaves into ram
-        */
-       btrfs_lookup_csum(NULL, root->fs_info->csum_root, path, bytenr, 0);
-}
-
 /* as ordered data IO finishes, this gets called so we can finish
  * an ordered extent if the range of bytes in the file it covers are
  * fully written.
@@ -1709,7 +1691,6 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
        struct btrfs_trans_handle *trans;
        struct btrfs_ordered_extent *ordered_extent = NULL;
        struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
-       struct btrfs_path *path;
        int compressed = 0;
        int ret;
 
@@ -1717,32 +1698,9 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
        if (!ret)
                return 0;
 
-       /*
-        * before we join the transaction, try to do some of our IO.
-        * This will limit the amount of IO that we have to do with
-        * the transaction running.  We're unlikely to need to do any
-        * IO if the file extents are new, the disk_i_size checks
-        * covers the most common case.
-        */
-       if (start < BTRFS_I(inode)->disk_i_size) {
-               path = btrfs_alloc_path();
-               if (path) {
-                       ret = btrfs_lookup_file_extent(NULL, root, path,
-                                                      inode->i_ino,
-                                                      start, 0);
-                       ordered_extent = btrfs_lookup_ordered_extent(inode,
-                                                                    start);
-                       if (!list_empty(&ordered_extent->list)) {
-                               btrfs_release_path(root, path);
-                               reada_csum(root, path, ordered_extent);
-                       }
-                       btrfs_free_path(path);
-               }
-       }
-
-       if (!ordered_extent)
-               ordered_extent = btrfs_lookup_ordered_extent(inode, start);
+       ordered_extent = btrfs_lookup_ordered_extent(inode, start);
        BUG_ON(!ordered_extent);
+
        if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
                BUG_ON(!list_empty(&ordered_extent->list));
                ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
@@ -5841,7 +5799,9 @@ static int prealloc_file_range(struct inode *inode, u64 start, u64 end,
                inode->i_ctime = CURRENT_TIME;
                BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
                if (!(mode & FALLOC_FL_KEEP_SIZE) &&
-                   cur_offset > inode->i_size) {
+                       (actual_len > inode->i_size) &&
+                       (cur_offset > inode->i_size)) {
+
                        if (cur_offset > actual_len)
                                i_size  = actual_len;
                        else
index ed3e4a2ec2c833b3aac4faf6143c4c3f22584b3e..ab7ab53187452aa7794bfcdfb5596385d3cc77b2 100644 (file)
@@ -3764,7 +3764,8 @@ out:
                                       BTRFS_DATA_RELOC_TREE_OBJECTID);
                if (IS_ERR(fs_root))
                        err = PTR_ERR(fs_root);
-               btrfs_orphan_cleanup(fs_root);
+               else
+                       btrfs_orphan_cleanup(fs_root);
        }
        return err;
 }
index 7b2600b380d7c74f4aa398378012cf1e8bccfed7..49503d2edc7eb74ff8277f946979c7b183ec1def 100644 (file)
@@ -1,3 +1,7 @@
+Version 1.62
+------------
+Add sockopt=TCP_NODELAY mount option.
+
 Version 1.61
 ------------
 Fix append problem to Samba servers (files opened with O_APPEND could
index ac2b24c192f881d229ccf5fb64e99dfe07b2a76a..78c1b86d55f6fcf3a42eeec70968e9f7efc69b3b 100644 (file)
@@ -113,5 +113,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
 extern const struct export_operations cifs_export_ops;
 #endif /* EXPERIMENTAL */
 
-#define CIFS_VERSION   "1.61"
+#define CIFS_VERSION   "1.62"
 #endif                         /* _CIFSFS_H */
index 4b35f7ec0583148ac0fe00c3ebf388c83dbb3ebf..ed751bb657db1a1961a0c590a56f0386c926b8b0 100644 (file)
@@ -149,6 +149,7 @@ struct TCP_Server_Info {
        bool svlocal:1;                 /* local server or remote */
        bool noblocksnd;                /* use blocking sendmsg */
        bool noautotune;                /* do not autotune send buf sizes */
+       bool tcp_nodelay;
        atomic_t inFlight;  /* number of requests on the wire to server */
 #ifdef CONFIG_CIFS_STATS2
        atomic_t inSend; /* requests trying to send */
index 3bbcaa716b3c7afb18ee5826261ed11e5cb32aed..2e9e09ca0e30ef8eb2555cffd8298276e0bf04f0 100644 (file)
@@ -98,7 +98,7 @@ struct smb_vol {
        bool nostrictsync:1; /* do not force expensive SMBflush on every sync */
        unsigned int rsize;
        unsigned int wsize;
-       unsigned int sockopt;
+       bool sockopt_tcp_nodelay:1;
        unsigned short int port;
        char *prepath;
 };
@@ -1142,9 +1142,11 @@ cifs_parse_mount_options(char *options, const char *devname,
                                        simple_strtoul(value, &value, 0);
                        }
                } else if (strnicmp(data, "sockopt", 5) == 0) {
-                       if (value && *value) {
-                               vol->sockopt =
-                                       simple_strtoul(value, &value, 0);
+                       if (!value || !*value) {
+                               cERROR(1, ("no socket option specified"));
+                               continue;
+                       } else if (strnicmp(value, "TCP_NODELAY", 11) == 0) {
+                               vol->sockopt_tcp_nodelay = 1;
                        }
                } else if (strnicmp(data, "netbiosname", 4) == 0) {
                        if (!value || !*value || (*value == ' ')) {
@@ -1514,6 +1516,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
 
        tcp_ses->noblocksnd = volume_info->noblocksnd;
        tcp_ses->noautotune = volume_info->noautotune;
+       tcp_ses->tcp_nodelay = volume_info->sockopt_tcp_nodelay;
        atomic_set(&tcp_ses->inFlight, 0);
        init_waitqueue_head(&tcp_ses->response_q);
        init_waitqueue_head(&tcp_ses->request_q);
@@ -1764,6 +1767,7 @@ static int
 ipv4_connect(struct TCP_Server_Info *server)
 {
        int rc = 0;
+       int val;
        bool connected = false;
        __be16 orig_port = 0;
        struct socket *socket = server->ssocket;
@@ -1845,6 +1849,14 @@ ipv4_connect(struct TCP_Server_Info *server)
                        socket->sk->sk_rcvbuf = 140 * 1024;
        }
 
+       if (server->tcp_nodelay) {
+               val = 1;
+               rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY,
+                               (char *)&val, sizeof(val));
+               if (rc)
+                       cFYI(1, ("set TCP_NODELAY socket option error %d", rc));
+       }
+
         cFYI(1, ("sndbuf %d rcvbuf %d rcvtimeo 0x%lx",
                 socket->sk->sk_sndbuf,
                 socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo));
@@ -1916,6 +1928,7 @@ static int
 ipv6_connect(struct TCP_Server_Info *server)
 {
        int rc = 0;
+       int val;
        bool connected = false;
        __be16 orig_port = 0;
        struct socket *socket = server->ssocket;
@@ -1987,6 +2000,15 @@ ipv6_connect(struct TCP_Server_Info *server)
         */
        socket->sk->sk_rcvtimeo = 7 * HZ;
        socket->sk->sk_sndtimeo = 5 * HZ;
+
+       if (server->tcp_nodelay) {
+               val = 1;
+               rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY,
+                               (char *)&val, sizeof(val));
+               if (rc)
+                       cFYI(1, ("set TCP_NODELAY socket option error %d", rc));
+       }
+
        server->ssocket = socket;
 
        return rc;
index cf18ee7655902210240ea0c5281e546370b9d22d..e3fda978f4816eca17b9f5e4194fd161a224c5fe 100644 (file)
@@ -1762,8 +1762,18 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
                                        CIFS_MOUNT_MAP_SPECIAL_CHR);
        }
 
-       if (!rc)
+       if (!rc) {
                rc = inode_setattr(inode, attrs);
+
+               /* force revalidate when any of these times are set since some
+                  of the fs types (eg ext3, fat) do not have fine enough
+                  time granularity to match protocol, and we do not have a
+                  a way (yet) to query the server fs's time granularity (and
+                  whether it rounds times down).
+               */
+               if (!rc && (attrs->ia_valid & (ATTR_MTIME | ATTR_CTIME)))
+                       cifsInode->time = 0;
+       }
 out:
        kfree(args);
        kfree(full_path);
index f84062f9a9850a9fa4f38a5b4ec9f657c42ae474..c343b14ba2d3eea99e7de45c7eed87311668e873 100644 (file)
@@ -77,6 +77,11 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
 
        cFYI(1, ("For %s", name->name));
 
+       if (parent->d_op && parent->d_op->d_hash)
+               parent->d_op->d_hash(parent, name);
+       else
+               name->hash = full_name_hash(name->name, name->len);
+
        dentry = d_lookup(parent, name);
        if (dentry) {
                /* FIXME: check for inode number changes? */
@@ -666,12 +671,11 @@ static int cifs_get_name_from_search_buf(struct qstr *pqst,
                                           min(len, max_len), nlt,
                                           cifs_sb->mnt_cifs_flags &
                                                CIFS_MOUNT_MAP_SPECIAL_CHR);
+               pqst->len -= nls_nullsize(nlt);
        } else {
                pqst->name = filename;
                pqst->len = len;
        }
-       pqst->hash = full_name_hash(pqst->name, pqst->len);
-/*     cFYI(1, ("filldir on %s",pqst->name));  */
        return rc;
 }
 
index 7085a6275c4c9f0635383033dde2ee572bb046b0..aaa9c1c5a5bd243e398b96fe6a278c85595c64c9 100644 (file)
@@ -223,9 +223,9 @@ static void unicode_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
                /* null user mount */
                *bcc_ptr = 0;
                *(bcc_ptr+1) = 0;
-       } else { /* 300 should be long enough for any conceivable user name */
+       } else {
                bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->userName,
-                                         300, nls_cp);
+                                         MAX_USERNAME_SIZE, nls_cp);
        }
        bcc_ptr += 2 * bytes_ret;
        bcc_ptr += 2; /* account for null termination */
@@ -246,11 +246,10 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
        /* copy user */
        if (ses->userName == NULL) {
                /* BB what about null user mounts - check that we do this BB */
-       } else { /* 300 should be long enough for any conceivable user name */
-               strncpy(bcc_ptr, ses->userName, 300);
+       } else {
+               strncpy(bcc_ptr, ses->userName, MAX_USERNAME_SIZE);
        }
-       /* BB improve check for overflow */
-       bcc_ptr += strnlen(ses->userName, 300);
+       bcc_ptr += strnlen(ses->userName, MAX_USERNAME_SIZE);
        *bcc_ptr = 0;
        bcc_ptr++; /* account for null termination */
 
index 5ef953e6f908ea2d6f68061310a106fd0179d15a..97e01dc0d95fc4fe8464d729ce94d32b888b567d 100644 (file)
@@ -199,9 +199,7 @@ static int setfl(int fd, struct file * filp, unsigned long arg)
 static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
                      int force)
 {
-       unsigned long flags;
-
-       write_lock_irqsave(&filp->f_owner.lock, flags);
+       write_lock_irq(&filp->f_owner.lock);
        if (force || !filp->f_owner.pid) {
                put_pid(filp->f_owner.pid);
                filp->f_owner.pid = get_pid(pid);
@@ -213,7 +211,7 @@ static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
                        filp->f_owner.euid = cred->euid;
                }
        }
-       write_unlock_irqrestore(&filp->f_owner.lock, flags);
+       write_unlock_irq(&filp->f_owner.lock);
 }
 
 int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
index 69652c5bd5f0ad83b983324d4dc2d31b7a1cd689..b98404b5438385dc803498ba5e0c710a7244ed60 100644 (file)
@@ -253,6 +253,7 @@ void __fput(struct file *file)
        if (file->f_op && file->f_op->release)
                file->f_op->release(inode, file);
        security_file_free(file);
+       ima_file_free(file);
        if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL))
                cdev_put(inode->i_cdev);
        fops_put(file->f_op);
index 94a5e60779f917eb9336dc9d6e4bc8bde09482e4..d62fdc875f22800a3d5c1a8c0a2532b0b6d51cc6 100644 (file)
@@ -1736,8 +1736,7 @@ do_last:
                if (nd.root.mnt)
                        path_put(&nd.root);
                if (!IS_ERR(filp)) {
-                       error = ima_path_check(&filp->f_path, filp->f_mode &
-                                      (MAY_READ | MAY_WRITE | MAY_EXEC));
+                       error = ima_file_check(filp, acc_mode);
                        if (error) {
                                fput(filp);
                                filp = ERR_PTR(error);
@@ -1797,8 +1796,7 @@ ok:
        }
        filp = nameidata_to_filp(&nd);
        if (!IS_ERR(filp)) {
-               error = ima_path_check(&filp->f_path, filp->f_mode &
-                              (MAY_READ | MAY_WRITE | MAY_EXEC));
+               error = ima_file_check(filp, acc_mode);
                if (error) {
                        fput(filp);
                        filp = ERR_PTR(error);
index c487810a2366feed1d7018085296ccf371ab0947..a0c4016413f16117c1c5141fd01d3b27eabf871e 100644 (file)
@@ -1316,19 +1316,11 @@ rqst_exp_parent(struct svc_rqst *rqstp, struct path *path)
 
 static struct svc_export *find_fsidzero_export(struct svc_rqst *rqstp)
 {
-       struct svc_export *exp;
        u32 fsidv[2];
 
        mk_fsid(FSID_NUM, fsidv, 0, 0, 0, NULL);
 
-       exp = rqst_exp_find(rqstp, FSID_NUM, fsidv);
-       /*
-        * We shouldn't have accepting an nfsv4 request at all if we
-        * don't have a pseudoexport!:
-        */
-       if (IS_ERR(exp) && PTR_ERR(exp) == -ENOENT)
-               exp = ERR_PTR(-ESERVERFAULT);
-       return exp;
+       return rqst_exp_find(rqstp, FSID_NUM, fsidv);
 }
 
 /*
index c194793b642b8f5e35175ac52fed9c104606d274..97d79eff6b7f65af66688d52e96ed6b6a9217ec3 100644 (file)
@@ -752,6 +752,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
                            flags, current_cred());
        if (IS_ERR(*filp))
                host_err = PTR_ERR(*filp);
+       host_err = ima_file_check(*filp, access);
 out_nfserr:
        err = nfserrno(host_err);
 out:
@@ -2127,7 +2128,6 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
         */
        path.mnt = exp->ex_path.mnt;
        path.dentry = dentry;
-       err = ima_path_check(&path, acc & (MAY_READ | MAY_WRITE | MAY_EXEC));
 nfsd_out:
        return err? nfserrno(err) : 0;
 }
index 3dae4a13f6e48c968dcad4ddcf28672c807068c6..7e9df11260f40b4326947c96e65d5a5e2c44cc1c 100644 (file)
@@ -599,7 +599,7 @@ bail:
        return ret;
 }
 
-/* 
+/*
  * ocfs2_dio_end_io is called by the dio core when a dio is finished.  We're
  * particularly interested in the aio/dio case.  Like the core uses
  * i_alloc_sem, we use the rw_lock DLM lock to protect io on one node from
@@ -670,7 +670,7 @@ static ssize_t ocfs2_direct_IO(int rw,
 
        ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
                                            inode->i_sb->s_bdev, iov, offset,
-                                           nr_segs, 
+                                           nr_segs,
                                            ocfs2_direct_IO_get_blocks,
                                            ocfs2_dio_end_io);
 
index d43d34a1dd31aa3225673cdbcaf7ed07620a0388..21c808f752d8dc42381ce556547d7ae60c4c82bb 100644 (file)
@@ -368,7 +368,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
        }
        ocfs2_metadata_cache_io_unlock(ci);
 
-       mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n", 
+       mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n",
             (unsigned long long)block, nr,
             ((flags & OCFS2_BH_IGNORE_CACHE) || ignore_cache) ? "no" : "yes",
             flags);
index eda5b8bcddd5db3a43a56a7965b9dffeeb193e16..5c98900067082d6066b26e50cc33dec31e7a769d 100644 (file)
@@ -78,7 +78,7 @@ static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type);
 
 unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD;
 
-/* Only sets a new threshold if there are no active regions. 
+/* Only sets a new threshold if there are no active regions.
  *
  * No locking or otherwise interesting code is required for reading
  * o2hb_dead_threshold as it can't change once regions are active and
@@ -170,7 +170,7 @@ static void o2hb_write_timeout(struct work_struct *work)
 
        mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u "
             "milliseconds\n", reg->hr_dev_name,
-            jiffies_to_msecs(jiffies - reg->hr_last_timeout_start)); 
+            jiffies_to_msecs(jiffies - reg->hr_last_timeout_start));
        o2quo_disk_timeout();
 }
 
@@ -624,7 +624,7 @@ static int o2hb_check_slot(struct o2hb_region *reg,
             "seq %llu last %llu changed %u equal %u\n",
             slot->ds_node_num, (long long)slot->ds_last_generation,
             le32_to_cpu(hb_block->hb_cksum),
-            (unsigned long long)le64_to_cpu(hb_block->hb_seq), 
+            (unsigned long long)le64_to_cpu(hb_block->hb_seq),
             (unsigned long long)slot->ds_last_time, slot->ds_changed_samples,
             slot->ds_equal_samples);
 
index 334f231a422c732db5a9364596f9d9655f5dee79..d8d0c65ac03cc98feaefdd8a9df941613b482837 100644 (file)
@@ -485,7 +485,7 @@ static void o2net_set_nn_state(struct o2net_node *nn,
        }
 
        if (was_valid && !valid) {
-               printk(KERN_INFO "o2net: no longer connected to "
+               printk(KERN_NOTICE "o2net: no longer connected to "
                       SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc));
                o2net_complete_nodes_nsw(nn);
        }
@@ -493,7 +493,7 @@ static void o2net_set_nn_state(struct o2net_node *nn,
        if (!was_valid && valid) {
                o2quo_conn_up(o2net_num_from_nn(nn));
                cancel_delayed_work(&nn->nn_connect_expired);
-               printk(KERN_INFO "o2net: %s " SC_NODEF_FMT "\n",
+               printk(KERN_NOTICE "o2net: %s " SC_NODEF_FMT "\n",
                       o2nm_this_node() > sc->sc_node->nd_num ?
                                "connected to" : "accepted connection from",
                       SC_NODEF_ARGS(sc));
@@ -930,7 +930,7 @@ static void o2net_sendpage(struct o2net_sock_container *sc,
                        cond_resched();
                        continue;
                }
-               mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT 
+               mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT
                     " failed with %zd\n", size, SC_NODEF_ARGS(sc), ret);
                o2net_ensure_shutdown(nn, sc, 0);
                break;
@@ -1476,14 +1476,14 @@ static void o2net_idle_timer(unsigned long data)
 
        do_gettimeofday(&now);
 
-       printk(KERN_INFO "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u "
+       printk(KERN_NOTICE "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u "
             "seconds, shutting it down.\n", SC_NODEF_ARGS(sc),
                     o2net_idle_timeout() / 1000,
                     o2net_idle_timeout() % 1000);
        mlog(ML_NOTICE, "here are some times that might help debug the "
             "situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv "
             "%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n",
-            sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec, 
+            sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec,
             now.tv_sec, (long) now.tv_usec,
             sc->sc_tv_data_ready.tv_sec, (long) sc->sc_tv_data_ready.tv_usec,
             sc->sc_tv_advance_start.tv_sec,
index 8d58cfe410b13babe15d68c35b3f62a6be7331c3..96fa7ebc530cf8fce66f4ccae2aa1fcc53f5f340 100644 (file)
  * on their number */
 #define O2NET_QUORUM_DELAY_MS  ((o2hb_dead_threshold + 2) * O2HB_REGION_TIMEOUT_MS)
 
-/* 
+/*
  * This version number represents quite a lot, unfortunately.  It not
  * only represents the raw network message protocol on the wire but also
- * locking semantics of the file system using the protocol.  It should 
+ * locking semantics of the file system using the protocol.  It should
  * be somewhere else, I'm sure, but right now it isn't.
  *
  * With version 11, we separate out the filesystem locking portion.  The
index b5786a787fabe7d2a46930c278456f91d05343f8..3cfa114aa3910479f844648d92546878fff23c2d 100644 (file)
@@ -95,7 +95,7 @@ const char *dlm_errname(enum dlm_status err);
                mlog(ML_ERROR, "dlm status = %s\n", dlm_errname((st))); \
 } while (0)
 
-#define DLM_LKSB_UNUSED1           0x01  
+#define DLM_LKSB_UNUSED1           0x01
 #define DLM_LKSB_PUT_LVB           0x02
 #define DLM_LKSB_GET_LVB           0x04
 #define DLM_LKSB_UNUSED2           0x08
index 01cf8cc3d286f483d5c405d35dc574ec0fb095b7..dccc439fa087ce65a202ed7a5aa994dd79fa6e9e 100644 (file)
@@ -123,7 +123,7 @@ static void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
                dlm_lock_put(lock);
                /* free up the reserved bast that we are cancelling.
                 * guaranteed that this will not be the last reserved
-                * ast because *both* an ast and a bast were reserved 
+                * ast because *both* an ast and a bast were reserved
                 * to get to this point.  the res->spinlock will not be
                 * taken here */
                dlm_lockres_release_ast(dlm, res);
index ca96bce50e18cb6629343f6a0436b8bec2aa20a6..f283bce776b48e4d916cd4a6b4d84c7b38775c69 100644 (file)
@@ -396,7 +396,7 @@ static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm,
                        /* instead of logging the same network error over
                         * and over, sleep here and wait for the heartbeat
                         * to notice the node is dead.  times out after 5s. */
-                       dlm_wait_for_node_death(dlm, res->owner, 
+                       dlm_wait_for_node_death(dlm, res->owner,
                                                DLM_NODE_DEATH_WAIT_MAX);
                        ret = DLM_RECOVERING;
                        mlog(0, "node %u died so returning DLM_RECOVERING "
index 42b0bad7a612459c8f3e6857cbf829cab964bd57..0cd24cf543962dc5251622c531fb3ad9ad2d0324 100644 (file)
@@ -102,7 +102,7 @@ void __dlm_print_one_lock_resource(struct dlm_lock_resource *res)
        assert_spin_locked(&res->spinlock);
 
        stringify_lockname(res->lockname.name, res->lockname.len,
-                          buf, sizeof(buf) - 1);
+                          buf, sizeof(buf));
        printk("lockres: %s, owner=%u, state=%u\n",
               buf, res->owner, res->state);
        printk("  last used: %lu, refcnt: %u, on purge list: %s\n",
index 0334000676d3ad7f48765ff1fcde2e6d8e1f3a03..988c9055fd4e6c98888ab3a53072eed309f88b6d 100644 (file)
@@ -816,7 +816,7 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
        }
 
        /* Once the dlm ctxt is marked as leaving then we don't want
-        * to be put in someone's domain map. 
+        * to be put in someone's domain map.
         * Also, explicitly disallow joining at certain troublesome
         * times (ie. during recovery). */
        if (dlm && dlm->dlm_state != DLM_CTXT_LEAVING) {
index 437698e9465fd01114fd117228cd39095445f795..73333777267154750e5f505a0272f328e8d23455 100644 (file)
@@ -269,7 +269,7 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
                }
                dlm_revert_pending_lock(res, lock);
                dlm_lock_put(lock);
-       } else if (dlm_is_recovery_lock(res->lockname.name, 
+       } else if (dlm_is_recovery_lock(res->lockname.name,
                                        res->lockname.len)) {
                /* special case for the $RECOVERY lock.
                 * there will never be an AST delivered to put
index 03ccf9a7b1f48506fd659a46903f826bd1adcfbe..a659606dcb9592f1d0c719ed7167b1fe158296f2 100644 (file)
@@ -366,7 +366,7 @@ void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
        struct dlm_master_list_entry *mle;
 
        assert_spin_locked(&dlm->spinlock);
-       
+
        list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) {
                if (node_up)
                        dlm_mle_node_up(dlm, mle, NULL, idx);
@@ -833,7 +833,7 @@ lookup:
                __dlm_insert_mle(dlm, mle);
 
                /* still holding the dlm spinlock, check the recovery map
-                * to see if there are any nodes that still need to be 
+                * to see if there are any nodes that still need to be
                 * considered.  these will not appear in the mle nodemap
                 * but they might own this lockres.  wait on them. */
                bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
@@ -883,7 +883,7 @@ redo_request:
                                msleep(500);
                        }
                        continue;
-               } 
+               }
 
                dlm_kick_recovery_thread(dlm);
                msleep(1000);
@@ -939,8 +939,8 @@ wait:
                     res->lockname.name, blocked);
                if (++tries > 20) {
                        mlog(ML_ERROR, "%s:%.*s: spinning on "
-                            "dlm_wait_for_lock_mastery, blocked=%d\n", 
-                            dlm->name, res->lockname.len, 
+                            "dlm_wait_for_lock_mastery, blocked=%d\n",
+                            dlm->name, res->lockname.len,
                             res->lockname.name, blocked);
                        dlm_print_one_lock_resource(res);
                        dlm_print_one_mle(mle);
@@ -1029,7 +1029,7 @@ recheck:
                ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
                b = (mle->type == DLM_MLE_BLOCK);
                if ((*blocked && !b) || (!*blocked && b)) {
-                       mlog(0, "%s:%.*s: status change: old=%d new=%d\n", 
+                       mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
                             dlm->name, res->lockname.len, res->lockname.name,
                             *blocked, b);
                        *blocked = b;
@@ -1602,7 +1602,7 @@ send_response:
                }
                mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
                             dlm->node_num, res->lockname.len, res->lockname.name);
-               ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx, 
+               ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
                                                 DLM_ASSERT_MASTER_MLE_CLEANUP);
                if (ret < 0) {
                        mlog(ML_ERROR, "failed to dispatch assert master work\n");
@@ -1701,7 +1701,7 @@ again:
 
                if (r & DLM_ASSERT_RESPONSE_REASSERT) {
                        mlog(0, "%.*s: node %u create mles on other "
-                            "nodes and requests a re-assert\n", 
+                            "nodes and requests a re-assert\n",
                             namelen, lockname, to);
                        reassert = 1;
                }
@@ -1812,7 +1812,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
                                spin_unlock(&dlm->master_lock);
                                spin_unlock(&dlm->spinlock);
                                goto done;
-                       }       
+                       }
                }
        }
        spin_unlock(&dlm->master_lock);
@@ -1883,7 +1883,7 @@ ok:
                int extra_ref = 0;
                int nn = -1;
                int rr, err = 0;
-               
+
                spin_lock(&mle->spinlock);
                if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
                        extra_ref = 1;
@@ -1891,7 +1891,7 @@ ok:
                        /* MASTER mle: if any bits set in the response map
                         * then the calling node needs to re-assert to clear
                         * up nodes that this node contacted */
-                       while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, 
+                       while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
                                                    nn+1)) < O2NM_MAX_NODES) {
                                if (nn != dlm->node_num && nn != assert->node_idx)
                                        master_request = 1;
@@ -2002,7 +2002,7 @@ kill:
        __dlm_print_one_lock_resource(res);
        spin_unlock(&res->spinlock);
        spin_unlock(&dlm->spinlock);
-       *ret_data = (void *)res; 
+       *ret_data = (void *)res;
        dlm_put(dlm);
        return -EINVAL;
 }
@@ -2040,10 +2040,10 @@ int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
        item->u.am.request_from = request_from;
        item->u.am.flags = flags;
 
-       if (ignore_higher) 
-               mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len, 
+       if (ignore_higher)
+               mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
                     res->lockname.name);
-               
+
        spin_lock(&dlm->work_lock);
        list_add_tail(&item->list, &dlm->work_list);
        spin_unlock(&dlm->work_lock);
@@ -2133,7 +2133,7 @@ put:
  * think that $RECOVERY is currently mastered by a dead node.  If so,
  * we wait a short time to allow that node to get notified by its own
  * heartbeat stack, then check again.  All $RECOVERY lock resources
- * mastered by dead nodes are purged when the hearbeat callback is 
+ * mastered by dead nodes are purged when the hearbeat callback is
  * fired, so we can know for sure that it is safe to continue once
  * the node returns a live node or no node.  */
 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
@@ -2174,7 +2174,7 @@ static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
                                ret = -EAGAIN;
                        }
                        spin_unlock(&dlm->spinlock);
-                       mlog(0, "%s: reco lock master is %u\n", dlm->name, 
+                       mlog(0, "%s: reco lock master is %u\n", dlm->name,
                             master);
                        break;
                }
@@ -2602,7 +2602,7 @@ fail:
 
                        mlog(0, "%s:%.*s: timed out during migration\n",
                             dlm->name, res->lockname.len, res->lockname.name);
-                       /* avoid hang during shutdown when migrating lockres 
+                       /* avoid hang during shutdown when migrating lockres
                         * to a node which also goes down */
                        if (dlm_is_node_dead(dlm, target)) {
                                mlog(0, "%s:%.*s: expected migration "
@@ -2738,7 +2738,7 @@ static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
        can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
        spin_unlock(&res->spinlock);
 
-       /* target has died, so make the caller break out of the 
+       /* target has died, so make the caller break out of the
         * wait_event, but caller must recheck the domain_map */
        spin_lock(&dlm->spinlock);
        if (!test_bit(mig_target, dlm->domain_map))
index 2f9e4e19a4f2a3ca95bc82607ce369fd1dd43b59..344bcf90cbf4966af4821accc0e61183e6b4be56 100644 (file)
@@ -1050,7 +1050,7 @@ static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
                                if (lock->ml.node == dead_node) {
                                        mlog(0, "AHA! there was "
                                             "a $RECOVERY lock for dead "
-                                            "node %u (%s)!\n", 
+                                            "node %u (%s)!\n",
                                             dead_node, dlm->name);
                                        list_del_init(&lock->list);
                                        dlm_lock_put(lock);
@@ -1164,6 +1164,39 @@ static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
        mres->master = master;
 }
 
+static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock,
+                                         struct dlm_migratable_lockres *mres,
+                                         int queue)
+{
+       if (!lock->lksb)
+              return;
+
+       /* Ignore lvb in all locks in the blocked list */
+       if (queue == DLM_BLOCKED_LIST)
+               return;
+
+       /* Only consider lvbs in locks with granted EX or PR lock levels */
+       if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE)
+               return;
+
+       if (dlm_lvb_is_empty(mres->lvb)) {
+               memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
+               return;
+       }
+
+       /* Ensure the lvb copied for migration matches in other valid locks */
+       if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))
+               return;
+
+       mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, "
+            "node=%u\n",
+            dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+            dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
+            lock->lockres->lockname.len, lock->lockres->lockname.name,
+            lock->ml.node);
+       dlm_print_one_lock_resource(lock->lockres);
+       BUG();
+}
 
 /* returns 1 if this lock fills the network structure,
  * 0 otherwise */
@@ -1181,20 +1214,7 @@ static int dlm_add_lock_to_array(struct dlm_lock *lock,
        ml->list = queue;
        if (lock->lksb) {
                ml->flags = lock->lksb->flags;
-               /* send our current lvb */
-               if (ml->type == LKM_EXMODE ||
-                   ml->type == LKM_PRMODE) {
-                       /* if it is already set, this had better be a PR
-                        * and it has to match */
-                       if (!dlm_lvb_is_empty(mres->lvb) &&
-                           (ml->type == LKM_EXMODE ||
-                            memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) {
-                               mlog(ML_ERROR, "mismatched lvbs!\n");
-                               dlm_print_one_lock_resource(lock->lockres);
-                               BUG();
-                       }
-                       memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
-               }
+               dlm_prepare_lvb_for_migration(lock, mres, queue);
        }
        ml->node = lock->ml.node;
        mres->num_locks++;
@@ -1730,6 +1750,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
        struct dlm_lock *lock = NULL;
        u8 from = O2NM_MAX_NODES;
        unsigned int added = 0;
+       __be64 c;
 
        mlog(0, "running %d locks for this lockres\n", mres->num_locks);
        for (i=0; i<mres->num_locks; i++) {
@@ -1777,19 +1798,48 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
                        /* lock is always created locally first, and
                         * destroyed locally last.  it must be on the list */
                        if (!lock) {
-                               __be64 c = ml->cookie;
-                               mlog(ML_ERROR, "could not find local lock "
-                                              "with cookie %u:%llu!\n",
+                               c = ml->cookie;
+                               mlog(ML_ERROR, "Could not find local lock "
+                                              "with cookie %u:%llu, node %u, "
+                                              "list %u, flags 0x%x, type %d, "
+                                              "conv %d, highest blocked %d\n",
                                     dlm_get_lock_cookie_node(be64_to_cpu(c)),
-                                    dlm_get_lock_cookie_seq(be64_to_cpu(c)));
+                                    dlm_get_lock_cookie_seq(be64_to_cpu(c)),
+                                    ml->node, ml->list, ml->flags, ml->type,
+                                    ml->convert_type, ml->highest_blocked);
+                               __dlm_print_one_lock_resource(res);
+                               BUG();
+                       }
+
+                       if (lock->ml.node != ml->node) {
+                               c = lock->ml.cookie;
+                               mlog(ML_ERROR, "Mismatched node# in lock "
+                                    "cookie %u:%llu, name %.*s, node %u\n",
+                                    dlm_get_lock_cookie_node(be64_to_cpu(c)),
+                                    dlm_get_lock_cookie_seq(be64_to_cpu(c)),
+                                    res->lockname.len, res->lockname.name,
+                                    lock->ml.node);
+                               c = ml->cookie;
+                               mlog(ML_ERROR, "Migrate lock cookie %u:%llu, "
+                                    "node %u, list %u, flags 0x%x, type %d, "
+                                    "conv %d, highest blocked %d\n",
+                                    dlm_get_lock_cookie_node(be64_to_cpu(c)),
+                                    dlm_get_lock_cookie_seq(be64_to_cpu(c)),
+                                    ml->node, ml->list, ml->flags, ml->type,
+                                    ml->convert_type, ml->highest_blocked);
                                __dlm_print_one_lock_resource(res);
                                BUG();
                        }
-                       BUG_ON(lock->ml.node != ml->node);
 
                        if (tmpq != queue) {
-                               mlog(0, "lock was on %u instead of %u for %.*s\n",
-                                    j, ml->list, res->lockname.len, res->lockname.name);
+                               c = ml->cookie;
+                               mlog(0, "Lock cookie %u:%llu was on list %u "
+                                    "instead of list %u for %.*s\n",
+                                    dlm_get_lock_cookie_node(be64_to_cpu(c)),
+                                    dlm_get_lock_cookie_seq(be64_to_cpu(c)),
+                                    j, ml->list, res->lockname.len,
+                                    res->lockname.name);
+                               __dlm_print_one_lock_resource(res);
                                spin_unlock(&res->spinlock);
                                continue;
                        }
@@ -1839,7 +1889,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
                                 * the lvb. */
                                memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
                        } else {
-                               /* otherwise, the node is sending its 
+                               /* otherwise, the node is sending its
                                 * most recent valid lvb info */
                                BUG_ON(ml->type != LKM_EXMODE &&
                                       ml->type != LKM_PRMODE);
@@ -1886,7 +1936,7 @@ skip_lvb:
                spin_lock(&res->spinlock);
                list_for_each_entry(lock, queue, list) {
                        if (lock->ml.cookie == ml->cookie) {
-                               __be64 c = lock->ml.cookie;
+                               c = lock->ml.cookie;
                                mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already "
                                     "exists on this lockres!\n", dlm->name,
                                     res->lockname.len, res->lockname.name,
@@ -2114,7 +2164,7 @@ static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
        assert_spin_locked(&res->spinlock);
 
        if (res->owner == dlm->node_num)
-               /* if this node owned the lockres, and if the dead node 
+               /* if this node owned the lockres, and if the dead node
                 * had an EX when he died, blank out the lvb */
                search_node = dead_node;
        else {
@@ -2152,7 +2202,7 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
 
        /* this node is the lockres master:
         * 1) remove any stale locks for the dead node
-        * 2) if the dead node had an EX when he died, blank out the lvb 
+        * 2) if the dead node had an EX when he died, blank out the lvb
         */
        assert_spin_locked(&dlm->spinlock);
        assert_spin_locked(&res->spinlock);
@@ -2193,7 +2243,12 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
                mlog(0, "%s:%.*s: freed %u locks for dead node %u, "
                     "dropping ref from lockres\n", dlm->name,
                     res->lockname.len, res->lockname.name, freed, dead_node);
-               BUG_ON(!test_bit(dead_node, res->refmap));
+               if(!test_bit(dead_node, res->refmap)) {
+                       mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, "
+                            "but ref was not set\n", dlm->name,
+                            res->lockname.len, res->lockname.name, freed, dead_node);
+                       __dlm_print_one_lock_resource(res);
+               }
                dlm_lockres_clear_refmap_bit(dead_node, res);
        } else if (test_bit(dead_node, res->refmap)) {
                mlog(0, "%s:%.*s: dead node %u had a ref, but had "
@@ -2260,7 +2315,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
                                }
                                spin_unlock(&res->spinlock);
                                continue;
-                       }                       
+                       }
                        spin_lock(&res->spinlock);
                        /* zero the lvb if necessary */
                        dlm_revalidate_lvb(dlm, res, dead_node);
@@ -2411,7 +2466,7 @@ static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
  * this function on each node racing to become the recovery
  * master will not stop attempting this until either:
  * a) this node gets the EX (and becomes the recovery master),
- * or b) dlm->reco.new_master gets set to some nodenum 
+ * or b) dlm->reco.new_master gets set to some nodenum
  * != O2NM_INVALID_NODE_NUM (another node will do the reco).
  * so each time a recovery master is needed, the entire cluster
  * will sync at this point.  if the new master dies, that will
@@ -2424,7 +2479,7 @@ static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
 
        mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
             dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
-again: 
+again:
        memset(&lksb, 0, sizeof(lksb));
 
        ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
@@ -2437,8 +2492,8 @@ again:
        if (ret == DLM_NORMAL) {
                mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
                     dlm->name, dlm->node_num);
-               
-               /* got the EX lock.  check to see if another node 
+
+               /* got the EX lock.  check to see if another node
                 * just became the reco master */
                if (dlm_reco_master_ready(dlm)) {
                        mlog(0, "%s: got reco EX lock, but %u will "
@@ -2451,12 +2506,12 @@ again:
                        /* see if recovery was already finished elsewhere */
                        spin_lock(&dlm->spinlock);
                        if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
-                               status = -EINVAL;       
+                               status = -EINVAL;
                                mlog(0, "%s: got reco EX lock, but "
                                     "node got recovered already\n", dlm->name);
                                if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
                                        mlog(ML_ERROR, "%s: new master is %u "
-                                            "but no dead node!\n", 
+                                            "but no dead node!\n",
                                             dlm->name, dlm->reco.new_master);
                                        BUG();
                                }
@@ -2468,7 +2523,7 @@ again:
                 * set the master and send the messages to begin recovery */
                if (!status) {
                        mlog(0, "%s: dead=%u, this=%u, sending "
-                            "begin_reco now\n", dlm->name, 
+                            "begin_reco now\n", dlm->name,
                             dlm->reco.dead_node, dlm->node_num);
                        status = dlm_send_begin_reco_message(dlm,
                                      dlm->reco.dead_node);
@@ -2501,7 +2556,7 @@ again:
                mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
                     dlm->name, dlm->node_num);
                /* another node is master. wait on
-                * reco.new_master != O2NM_INVALID_NODE_NUM 
+                * reco.new_master != O2NM_INVALID_NODE_NUM
                 * for at most one second */
                wait_event_timeout(dlm->dlm_reco_thread_wq,
                                         dlm_reco_master_ready(dlm),
@@ -2589,7 +2644,13 @@ retry:
                             "begin reco msg (%d)\n", dlm->name, nodenum, ret);
                        ret = 0;
                }
-               if (ret == -EAGAIN) {
+
+               /*
+                * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8,
+                * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN.
+                * We are handling both for compatibility reasons.
+                */
+               if (ret == -EAGAIN || ret == EAGAIN) {
                        mlog(0, "%s: trying to start recovery of node "
                             "%u, but node %u is waiting for last recovery "
                             "to complete, backoff for a bit\n", dlm->name,
@@ -2599,7 +2660,7 @@ retry:
                }
                if (ret < 0) {
                        struct dlm_lock_resource *res;
-                       /* this is now a serious problem, possibly ENOMEM 
+                       /* this is now a serious problem, possibly ENOMEM
                         * in the network stack.  must retry */
                        mlog_errno(ret);
                        mlog(ML_ERROR, "begin reco of dlm %s to node %u "
@@ -2612,7 +2673,7 @@ retry:
                        } else {
                                mlog(ML_ERROR, "recovery lock not found\n");
                        }
-                       /* sleep for a bit in hopes that we can avoid 
+                       /* sleep for a bit in hopes that we can avoid
                         * another ENOMEM */
                        msleep(100);
                        goto retry;
@@ -2664,7 +2725,7 @@ int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
        }
        if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
                mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
-                    "node %u changing it to %u\n", dlm->name, 
+                    "node %u changing it to %u\n", dlm->name,
                     dlm->reco.dead_node, br->node_idx, br->dead_node);
        }
        dlm_set_reco_master(dlm, br->node_idx);
@@ -2730,8 +2791,8 @@ stage2:
                if (ret < 0) {
                        mlog_errno(ret);
                        if (dlm_is_host_down(ret)) {
-                               /* this has no effect on this recovery 
-                                * session, so set the status to zero to 
+                               /* this has no effect on this recovery
+                                * session, so set the status to zero to
                                 * finish out the last recovery */
                                mlog(ML_ERROR, "node %u went down after this "
                                     "node finished recovery.\n", nodenum);
@@ -2768,7 +2829,7 @@ int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
        mlog(0, "%s: node %u finalizing recovery stage%d of "
             "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage,
             fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master);
+
        spin_lock(&dlm->spinlock);
 
        if (dlm->reco.new_master != fr->node_idx) {
index 00f53b2aea76a21a955eeac0c403a81c954947e8..49e29ecd02017be2144c2f50df76b8b0f761ad2d 100644 (file)
@@ -190,8 +190,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
                        actions &= ~(DLM_UNLOCK_REMOVE_LOCK|
                                     DLM_UNLOCK_REGRANT_LOCK|
                                     DLM_UNLOCK_CLEAR_CONVERT_TYPE);
-               } else if (status == DLM_RECOVERING || 
-                          status == DLM_MIGRATING || 
+               } else if (status == DLM_RECOVERING ||
+                          status == DLM_MIGRATING ||
                           status == DLM_FORWARD) {
                        /* must clear the actions because this unlock
                         * is about to be retried.  cannot free or do
@@ -661,14 +661,14 @@ retry:
        if (call_ast) {
                mlog(0, "calling unlockast(%p, %d)\n", data, status);
                if (is_master) {
-                       /* it is possible that there is one last bast 
+                       /* it is possible that there is one last bast
                         * pending.  make sure it is flushed, then
                         * call the unlockast.
                         * not an issue if this is a mastered remotely,
                         * since this lock has been removed from the
                         * lockres queues and cannot be found. */
                        dlm_kick_thread(dlm, NULL);
-                       wait_event(dlm->ast_wq, 
+                       wait_event(dlm->ast_wq,
                                   dlm_lock_basts_flushed(dlm, lock));
                }
                (*unlockast)(data, status);
index c5e4a49e3a1257b48a08abe717236cd713b4dd1f..e044019cb3b12e41da5ca6139d3bd79a778bbf85 100644 (file)
@@ -875,6 +875,14 @@ static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lo
                lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
 
        lockres->l_level = lockres->l_requested;
+
+       /*
+        * We set the OCFS2_LOCK_UPCONVERT_FINISHING flag before clearing
+        * the OCFS2_LOCK_BUSY flag to prevent the dc thread from
+        * downconverting the lock before the upconvert has fully completed.
+        */
+       lockres_or_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
+
        lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
 
        mlog_exit_void();
@@ -907,8 +915,6 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
 
        assert_spin_locked(&lockres->l_lock);
 
-       lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
-
        if (level > lockres->l_blocking) {
                /* only schedule a downconvert if we haven't already scheduled
                 * one that goes low enough to satisfy the level we're
@@ -921,6 +927,9 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
                lockres->l_blocking = level;
        }
 
+       if (needs_downconvert)
+               lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
+
        mlog_exit(needs_downconvert);
        return needs_downconvert;
 }
@@ -1133,6 +1142,7 @@ static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
        mlog_entry_void();
        spin_lock_irqsave(&lockres->l_lock, flags);
        lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
+       lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
        if (convert)
                lockres->l_action = OCFS2_AST_INVALID;
        else
@@ -1323,13 +1333,13 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
 again:
        wait = 0;
 
+       spin_lock_irqsave(&lockres->l_lock, flags);
+
        if (catch_signals && signal_pending(current)) {
                ret = -ERESTARTSYS;
-               goto out;
+               goto unlock;
        }
 
-       spin_lock_irqsave(&lockres->l_lock, flags);
-
        mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING,
                        "Cluster lock called on freeing lockres %s! flags "
                        "0x%lx\n", lockres->l_name, lockres->l_flags);
@@ -1346,6 +1356,25 @@ again:
                goto unlock;
        }
 
+       if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) {
+               /*
+                * We've upconverted. If the lock now has a level we can
+                * work with, we take it. If, however, the lock is not at the
+                * required level, we go thru the full cycle. One way this could
+                * happen is if a process requesting an upconvert to PR is
+                * closely followed by another requesting upconvert to an EX.
+                * If the process requesting EX lands here, we want it to
+                * continue attempting to upconvert and let the process
+                * requesting PR take the lock.
+                * If multiple processes request upconvert to PR, the first one
+                * here will take the lock. The others will have to go thru the
+                * OCFS2_LOCK_BLOCKED check to ensure that there is no pending
+                * downconvert request.
+                */
+               if (level <= lockres->l_level)
+                       goto update_holders;
+       }
+
        if (lockres->l_flags & OCFS2_LOCK_BLOCKED &&
            !ocfs2_may_continue_on_blocked_lock(lockres, level)) {
                /* is the lock is currently blocked on behalf of
@@ -1416,11 +1445,14 @@ again:
                goto again;
        }
 
+update_holders:
        /* Ok, if we get here then we're good to go. */
        ocfs2_inc_holders(lockres, level);
 
        ret = 0;
 unlock:
+       lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
+
        spin_unlock_irqrestore(&lockres->l_lock, flags);
 out:
        /*
@@ -3155,7 +3187,7 @@ out:
 /* Mark the lockres as being dropped. It will no longer be
  * queued if blocking, but we still may have to wait on it
  * being dequeued from the downconvert thread before we can consider
- * it safe to drop. 
+ * it safe to drop.
  *
  * You can *not* attempt to call cluster_lock on this lockres anymore. */
 void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres)
@@ -3352,6 +3384,7 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb,
        unsigned long flags;
        int blocking;
        int new_level;
+       int level;
        int ret = 0;
        int set_lvb = 0;
        unsigned int gen;
@@ -3360,9 +3393,17 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb,
 
        spin_lock_irqsave(&lockres->l_lock, flags);
 
-       BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
-
 recheck:
+       /*
+        * Is it still blocking? If not, we have no more work to do.
+        */
+       if (!(lockres->l_flags & OCFS2_LOCK_BLOCKED)) {
+               BUG_ON(lockres->l_blocking != DLM_LOCK_NL);
+               spin_unlock_irqrestore(&lockres->l_lock, flags);
+               ret = 0;
+               goto leave;
+       }
+
        if (lockres->l_flags & OCFS2_LOCK_BUSY) {
                /* XXX
                 * This is a *big* race.  The OCFS2_LOCK_PENDING flag
@@ -3401,6 +3442,31 @@ recheck:
                goto leave;
        }
 
+       /*
+        * This prevents livelocks. OCFS2_LOCK_UPCONVERT_FINISHING flag is
+        * set when the ast is received for an upconvert just before the
+        * OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast
+        * on the heels of the ast, we want to delay the downconvert just
+        * enough to allow the up requestor to do its task. Because this
+        * lock is in the blocked queue, the lock will be downconverted
+        * as soon as the requestor is done with the lock.
+        */
+       if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING)
+               goto leave_requeue;
+
+       /*
+        * How can we block and yet be at NL?  We were trying to upconvert
+        * from NL and got canceled.  The code comes back here, and now
+        * we notice and clear BLOCKING.
+        */
+       if (lockres->l_level == DLM_LOCK_NL) {
+               BUG_ON(lockres->l_ex_holders || lockres->l_ro_holders);
+               lockres->l_blocking = DLM_LOCK_NL;
+               lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
+               spin_unlock_irqrestore(&lockres->l_lock, flags);
+               goto leave;
+       }
+
        /* if we're blocking an exclusive and we have *any* holders,
         * then requeue. */
        if ((lockres->l_blocking == DLM_LOCK_EX)
@@ -3438,6 +3504,7 @@ recheck:
         * may sleep, so we save off a copy of what we're blocking as
         * it may change while we're not holding the spin lock. */
        blocking = lockres->l_blocking;
+       level = lockres->l_level;
        spin_unlock_irqrestore(&lockres->l_lock, flags);
 
        ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking);
@@ -3446,7 +3513,7 @@ recheck:
                goto leave;
 
        spin_lock_irqsave(&lockres->l_lock, flags);
-       if (blocking != lockres->l_blocking) {
+       if ((blocking != lockres->l_blocking) || (level != lockres->l_level)) {
                /* If this changed underneath us, then we can't drop
                 * it just yet. */
                goto recheck;
index 15713cbb865c8ee66cde272a2e95bfb47ceb3429..19ad145d2af3171aaab2be2580dc89549ac37eeb 100644 (file)
@@ -239,7 +239,7 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len,
                mlog(0, "Encoding parent: blkno: %llu, generation: %u\n",
                     (unsigned long long)blkno, generation);
        }
-       
+
        *max_len = len;
 
 bail:
index d35a27f4523e13e2ec00411e08e6ebb2e1afbf76..5328529e7fd289e963da8cfa7b66d5706babb00c 100644 (file)
@@ -192,7 +192,7 @@ static int ocfs2_try_to_merge_extent_map(struct ocfs2_extent_map_item *emi,
                emi->ei_clusters += ins->ei_clusters;
                return 1;
        } else if ((ins->ei_phys + ins->ei_clusters) == emi->ei_phys &&
-                  (ins->ei_cpos + ins->ei_clusters) == emi->ei_phys &&
+                  (ins->ei_cpos + ins->ei_clusters) == emi->ei_cpos &&
                   ins->ei_flags == emi->ei_flags) {
                emi->ei_phys = ins->ei_phys;
                emi->ei_cpos = ins->ei_cpos;
index 06ccf6a86d35ca01a8f80e5a997a30ea8820f30a..558ce03124210049742360210efff968232c668f 100644 (file)
@@ -749,7 +749,7 @@ static int ocfs2_write_zero_page(struct inode *inode,
        int ret;
 
        offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
-       /* ugh.  in prepare/commit_write, if from==to==start of block, we 
+       /* ugh.  in prepare/commit_write, if from==to==start of block, we
        ** skip the prepare.  make sure we never send an offset for the start
        ** of a block
        */
@@ -1779,7 +1779,7 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
        struct inode *inode = dentry->d_inode;
        loff_t saved_pos, end;
 
-       /* 
+       /*
         * We start with a read level meta lock and only jump to an ex
         * if we need to make modifications here.
         */
@@ -2013,8 +2013,8 @@ out_dio:
        /* buffered aio wouldn't have proper lock coverage today */
        BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
 
-       if ((file->f_flags & O_DSYNC && !direct_io) || IS_SYNC(inode) ||
-           (file->f_flags & O_DIRECT && has_refcount)) {
+       if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
+           ((file->f_flags & O_DIRECT) && has_refcount)) {
                ret = filemap_fdatawrite_range(file->f_mapping, pos,
                                               pos + count - 1);
                if (ret < 0)
@@ -2033,7 +2033,7 @@ out_dio:
                                                      pos + count - 1);
        }
 
-       /* 
+       /*
         * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
         * function pointer which is called when o_direct io completes so that
         * it can unlock our rw lock.  (it's the clustered equivalent of
@@ -2198,7 +2198,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
                goto bail;
        }
 
-       /* 
+       /*
         * buffered reads protect themselves in ->readpage().  O_DIRECT reads
         * need locks to protect pending reads from racing with truncate.
         */
@@ -2220,10 +2220,10 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
         * We're fine letting folks race truncates and extending
         * writes with read across the cluster, just like they can
         * locally. Hence no rw_lock during read.
-        * 
+        *
         * Take and drop the meta data lock to update inode fields
         * like i_size. This allows the checks down below
-        * generic_file_aio_read() a chance of actually working. 
+        * generic_file_aio_read() a chance of actually working.
         */
        ret = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level);
        if (ret < 0) {
@@ -2248,7 +2248,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
 bail:
        if (have_alloc_sem)
                up_read(&inode->i_alloc_sem);
-       if (rw_level != -1) 
+       if (rw_level != -1)
                ocfs2_rw_unlock(inode, rw_level);
        mlog_exit(ret);
 
index 0297fb8982b861afdae1d974e9fdae96bf0a1f13..88459bdd1ff37eb538485b796174086ffd421138 100644 (file)
@@ -475,7 +475,7 @@ static int ocfs2_read_locked_inode(struct inode *inode,
        if (args->fi_flags & OCFS2_FI_FLAG_ORPHAN_RECOVERY) {
                status = ocfs2_try_open_lock(inode, 0);
                if (status) {
-                       make_bad_inode(inode);  
+                       make_bad_inode(inode);
                        return status;
                }
        }
@@ -684,7 +684,7 @@ bail:
        return status;
 }
 
-/* 
+/*
  * Serialize with orphan dir recovery. If the process doing
  * recovery on this orphan dir does an iget() with the dir
  * i_mutex held, we'll deadlock here. Instead we detect this
index 31fbb061951035d3211e1041e5b39bb6ed8d6db9..7d9d9c132cef3a5d59a412aa4d0d3c26c10a5fb0 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <linux/fs.h>
 #include <linux/mount.h>
+#include <linux/compat.h>
 
 #define MLOG_MASK_PREFIX ML_INODE
 #include <cluster/masklog.h>
@@ -181,6 +182,10 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 #ifdef CONFIG_COMPAT
 long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
 {
+       bool preserve;
+       struct reflink_arguments args;
+       struct inode *inode = file->f_path.dentry->d_inode;
+
        switch (cmd) {
        case OCFS2_IOC32_GETFLAGS:
                cmd = OCFS2_IOC_GETFLAGS;
@@ -195,8 +200,15 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
        case OCFS2_IOC_GROUP_EXTEND:
        case OCFS2_IOC_GROUP_ADD:
        case OCFS2_IOC_GROUP_ADD64:
-       case OCFS2_IOC_REFLINK:
                break;
+       case OCFS2_IOC_REFLINK:
+               if (copy_from_user(&args, (struct reflink_arguments *)arg,
+                                  sizeof(args)))
+                       return -EFAULT;
+               preserve = (args.preserve != 0);
+
+               return ocfs2_reflink_ioctl(inode, compat_ptr(args.old_path),
+                                          compat_ptr(args.new_path), preserve);
        default:
                return -ENOIOCTLCMD;
        }
index bf34c491ae966601f61f05897db6567fc216e6c8..9336c60e3a36a9bd72b155b5cf68ba8b20b4aa90 100644 (file)
@@ -2034,7 +2034,7 @@ static int ocfs2_queue_orphans(struct ocfs2_super *osb,
                status = -ENOENT;
                mlog_errno(status);
                return status;
-       }       
+       }
 
        mutex_lock(&orphan_dir_inode->i_mutex);
        status = ocfs2_inode_lock(orphan_dir_inode, NULL, 0);
index 9362eea7424be2421b461b76032d85733573c731..740f448041e2e66bef6792166f93c0df838a6785 100644 (file)
@@ -136,6 +136,10 @@ enum ocfs2_unlock_action {
 #define OCFS2_LOCK_PENDING       (0x00000400) /* This lockres is pending a
                                                 call to dlm_lock.  Only
                                                 exists with BUSY set. */
+#define OCFS2_LOCK_UPCONVERT_FINISHING (0x00000800) /* blocks the dc thread
+                                                    * from downconverting
+                                                    * before the upconvert
+                                                    * has completed */
 
 struct ocfs2_lock_res_ops;
 
index 1a1a679e51b5dd235a8bef0d45212fb8626eae9b..7638a38c32bc61659995bb7c1d476e54f7b08963 100644 (file)
@@ -1417,9 +1417,16 @@ static inline int ocfs2_fast_symlink_chars(int blocksize)
        return blocksize - offsetof(struct ocfs2_dinode, id2.i_symlink);
 }
 
-static inline int ocfs2_max_inline_data(int blocksize)
+static inline int ocfs2_max_inline_data_with_xattr(int blocksize,
+                                                  struct ocfs2_dinode *di)
 {
-       return blocksize - offsetof(struct ocfs2_dinode, id2.i_data.id_data);
+       if (di && (di->i_dyn_features & OCFS2_INLINE_XATTR_FL))
+               return blocksize -
+                       offsetof(struct ocfs2_dinode, id2.i_data.id_data) -
+                       di->i_xattr_inline_size;
+       else
+               return blocksize -
+                       offsetof(struct ocfs2_dinode, id2.i_data.id_data);
 }
 
 static inline int ocfs2_extent_recs_per_inode(int blocksize)
index 74db2be75dd686220913207662dd0c04ea7c755d..8ae65c9c020c7f2769472a09668aad6737d933df 100644 (file)
@@ -2945,7 +2945,7 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
 
        while (offset < end) {
                page_index = offset >> PAGE_CACHE_SHIFT;
-               map_end = (page_index + 1) << PAGE_CACHE_SHIFT;
+               map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT;
                if (map_end > end)
                        map_end = end;
 
@@ -2957,8 +2957,12 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
 
                page = grab_cache_page(mapping, page_index);
 
-               /* This page can't be dirtied before we CoW it out. */
-               BUG_ON(PageDirty(page));
+               /*
+                * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page
+                * can't be dirtied before we CoW it out.
+                */
+               if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize)
+                       BUG_ON(PageDirty(page));
 
                if (!PageUptodate(page)) {
                        ret = block_read_full_page(page, ocfs2_get_block);
@@ -3170,7 +3174,7 @@ static int ocfs2_cow_sync_writeback(struct super_block *sb,
 
        while (offset < end) {
                page_index = offset >> PAGE_CACHE_SHIFT;
-               map_end = (page_index + 1) << PAGE_CACHE_SHIFT;
+               map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT;
                if (map_end > end)
                        map_end = end;
 
index e49c41050264dfa9d36afc48e742b626ee9d2755..3038c92af4939438ab065f9f8cb123bc3c66a8b6 100644 (file)
@@ -277,7 +277,7 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn)
        u32 dlm_key;
        struct dlm_ctxt *dlm;
        struct o2dlm_private *priv;
-       struct dlm_protocol_version dlm_version;
+       struct dlm_protocol_version fs_version;
 
        BUG_ON(conn == NULL);
        BUG_ON(o2cb_stack.sp_proto == NULL);
@@ -304,18 +304,18 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn)
        /* used by the dlm code to make message headers unique, each
         * node in this domain must agree on this. */
        dlm_key = crc32_le(0, conn->cc_name, conn->cc_namelen);
-       dlm_version.pv_major = conn->cc_version.pv_major;
-       dlm_version.pv_minor = conn->cc_version.pv_minor;
+       fs_version.pv_major = conn->cc_version.pv_major;
+       fs_version.pv_minor = conn->cc_version.pv_minor;
 
-       dlm = dlm_register_domain(conn->cc_name, dlm_key, &dlm_version);
+       dlm = dlm_register_domain(conn->cc_name, dlm_key, &fs_version);
        if (IS_ERR(dlm)) {
                rc = PTR_ERR(dlm);
                mlog_errno(rc);
                goto out_free;
        }
 
-       conn->cc_version.pv_major = dlm_version.pv_major;
-       conn->cc_version.pv_minor = dlm_version.pv_minor;
+       conn->cc_version.pv_major = fs_version.pv_major;
+       conn->cc_version.pv_minor = fs_version.pv_minor;
        conn->cc_lockspace = dlm;
 
        dlm_register_eviction_cb(dlm, &priv->op_eviction_cb);
index 26069917a9f51752a18c8dea60940d0b00ca1054..755cd49a5ef3c41e535de82a36d4d534f1f06dfa 100644 (file)
@@ -1062,7 +1062,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
                                     "file system, but write access is "
                                     "unavailable.\n");
                        else
-                               mlog_errno(status);                     
+                               mlog_errno(status);
                        goto read_super_error;
                }
 
index 49b133ccbf113f79a62b58298b2e65405f3e4601..32499d213fc4f80f37efde6f71196283236896bf 100644 (file)
@@ -137,20 +137,20 @@ static void *ocfs2_fast_follow_link(struct dentry *dentry,
        }
 
        memcpy(link, target, len);
-       nd_set_link(nd, link);
 
 bail:
+       nd_set_link(nd, status ? ERR_PTR(status) : link);
        brelse(bh);
 
        mlog_exit(status);
-       return status ? ERR_PTR(status) : link;
+       return NULL;
 }
 
 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
 {
-       char *link = cookie;
-
-       kfree(link);
+       char *link = nd_get_link(nd);
+       if (!IS_ERR(link))
+               kfree(link);
 }
 
 const struct inode_operations ocfs2_symlink_inode_operations = {
index c61369342a276e17a6e35fcc79df544a2bd4419c..a0a120e82b9712fce6d128a4ab536be2b9dd2d0a 100644 (file)
@@ -267,8 +267,8 @@ static int ocfs2_buffer_cached(struct ocfs2_caching_info *ci,
 }
 
 /* Warning: even if it returns true, this does *not* guarantee that
- * the block is stored in our inode metadata cache. 
- * 
+ * the block is stored in our inode metadata cache.
+ *
  * This can be called under lock_buffer()
  */
 int ocfs2_buffer_uptodate(struct ocfs2_caching_info *ci,
index 38a6948ce0c2cd7a50917c1a82570d851196f053..20f31567ccee324d4218dc65527412c4bf032c9a 100644 (file)
@@ -647,9 +647,9 @@ static inline int ata_id_has_large_logical_sectors(const u16 *id)
        return id[ATA_ID_SECTOR_SIZE] & (1 << 13);
 }
 
-static inline u8 ata_id_logical_per_physical_sectors(const u16 *id)
+static inline u16 ata_id_logical_per_physical_sectors(const u16 *id)
 {
-       return id[ATA_ID_SECTOR_SIZE] & 0xf;
+       return 1 << (id[ATA_ID_SECTOR_SIZE] & 0xf);
 }
 
 static inline int ata_id_has_lba48(const u16 *id)
index 5be3dab4a69547bf6bb378a45d73e553166c5b12..188fcae10a995f3509fe54eb4035e21259359692 100644 (file)
@@ -15,6 +15,7 @@
 # define __acquire(x)  __context__(x,1)
 # define __release(x)  __context__(x,-1)
 # define __cond_lock(x,c)      ((c) ? ({ __acquire(x); 1; }) : 0)
+# define __percpu      __attribute__((noderef, address_space(3)))
 extern void __chk_user_ptr(const volatile void __user *);
 extern void __chk_io_ptr(const volatile void __iomem *);
 #else
@@ -32,6 +33,7 @@ extern void __chk_io_ptr(const volatile void __iomem *);
 # define __acquire(x) (void)0
 # define __release(x) (void)0
 # define __cond_lock(x,c) (c)
+# define __percpu
 #endif
 
 #ifdef __KERNEL__
index 99dc6d5cf7e59bc0412fbbd7afad788f04db933f..975837e7d6c093392d57e93b2374d669d245eeca 100644 (file)
@@ -17,7 +17,7 @@ struct linux_binprm;
 extern int ima_bprm_check(struct linux_binprm *bprm);
 extern int ima_inode_alloc(struct inode *inode);
 extern void ima_inode_free(struct inode *inode);
-extern int ima_path_check(struct path *path, int mask);
+extern int ima_file_check(struct file *file, int mask);
 extern void ima_file_free(struct file *file);
 extern int ima_file_mmap(struct file *file, unsigned long prot);
 extern void ima_counts_get(struct file *file);
@@ -38,7 +38,7 @@ static inline void ima_inode_free(struct inode *inode)
        return;
 }
 
-static inline int ima_path_check(struct path *path, int mask)
+static inline int ima_file_check(struct file *file, int mask)
 {
        return 0;
 }
index ba1ba0c5efd1b5047d9251acee2211043e4d0805..63d449807d9b0aaa7a65c743f518cf180761fd59 100644 (file)
@@ -11,6 +11,8 @@ struct nf_conntrack_ecache;
 struct netns_ct {
        atomic_t                count;
        unsigned int            expect_count;
+       unsigned int            htable_size;
+       struct kmem_cache       *nf_conntrack_cachep;
        struct hlist_nulls_head *hash;
        struct hlist_head       *expect_hash;
        struct hlist_nulls_head unconfirmed;
@@ -28,5 +30,6 @@ struct netns_ct {
 #endif
        int                     hash_vmalloc;
        int                     expect_vmalloc;
+       char                    *slabname;
 };
 #endif
index 2eb3814d6258e8d0f9da0be8c81560b20af5b9cf..9a4b8b7140794f3aab802d4a5d8264b53da169bc 100644 (file)
@@ -40,6 +40,7 @@ struct netns_ipv4 {
        struct xt_table         *iptable_security;
        struct xt_table         *nat_table;
        struct hlist_head       *nat_bysource;
+       unsigned int            nat_htable_size;
        int                     nat_vmalloced;
 #endif
 
index dac44a9356a52637e56669989c303f0b792aaa11..4cb47a159f028fa48a114efbf74cf79a9d828883 100644 (file)
@@ -657,9 +657,9 @@ asmlinkage void __init start_kernel(void)
        proc_caches_init();
        buffer_init();
        key_init();
+       radix_tree_init();
        security_init();
        vfs_caches_init(totalram_pages);
-       radix_tree_init();
        signals_init();
        /* rootfs populating might need page-writeback */
        page_writeback_init();
index 7faaa32fbf4f37d1d2efdedf9a3087a98455f7b6..e2ab064c6d418b21cc5467339f9a2a2e1909eb17 100644 (file)
@@ -880,6 +880,7 @@ void getboottime(struct timespec *ts)
 
        set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
 }
+EXPORT_SYMBOL_GPL(getboottime);
 
 /**
  * monotonic_to_bootbased - Convert the monotonic time to boot based.
@@ -889,6 +890,7 @@ void monotonic_to_bootbased(struct timespec *ts)
 {
        *ts = timespec_add_safe(*ts, total_sleep_time);
 }
+EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
 
 unsigned long get_seconds(void)
 {
index efddbf0926b283ae5ef292e076ff9900842c4dd5..9a0db5bbabe4c93c0fce3ee7c24f13f2fbf84dc0 100644 (file)
@@ -912,6 +912,9 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
                                goto out_pm;
 
                        err = -ENODEV;
+                       if (node < 0 || node >= MAX_NUMNODES)
+                               goto out_pm;
+
                        if (!node_state(node, N_HIGH_MEMORY))
                                goto out_pm;
 
index 8af95b2dddd62eb4baa6205c776681bf09b91a64..09d4f1e2e4a8cef1974929198e8df452597c40f6 100644 (file)
@@ -69,7 +69,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...);
 
 static int parse_opts(char *opts, struct p9_client *clnt)
 {
-       char *options;
+       char *options, *tmp_options;
        char *p;
        substring_t args[MAX_OPT_ARGS];
        int option;
@@ -81,12 +81,13 @@ static int parse_opts(char *opts, struct p9_client *clnt)
        if (!opts)
                return 0;
 
-       options = kstrdup(opts, GFP_KERNEL);
-       if (!options) {
+       tmp_options = kstrdup(opts, GFP_KERNEL);
+       if (!tmp_options) {
                P9_DPRINTK(P9_DEBUG_ERROR,
                                "failed to allocate copy of option string\n");
                return -ENOMEM;
        }
+       options = tmp_options;
 
        while ((p = strsep(&options, ",")) != NULL) {
                int token;
@@ -108,6 +109,13 @@ static int parse_opts(char *opts, struct p9_client *clnt)
                        break;
                case Opt_trans:
                        clnt->trans_mod = v9fs_get_trans_by_name(&args[0]);
+                       if(clnt->trans_mod == NULL) {
+                               P9_DPRINTK(P9_DEBUG_ERROR,
+                                  "Could not find request transport: %s\n",
+                                  (char *) &args[0]);
+                               ret = -EINVAL;
+                               goto free_and_return;
+                       }
                        break;
                case Opt_legacy:
                        clnt->dotu = 0;
@@ -117,7 +125,8 @@ static int parse_opts(char *opts, struct p9_client *clnt)
                }
        }
 
-       kfree(options);
+free_and_return:
+       kfree(tmp_options);
        return ret;
 }
 
@@ -667,18 +676,12 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
        clnt->trans = NULL;
        spin_lock_init(&clnt->lock);
        INIT_LIST_HEAD(&clnt->fidlist);
-       clnt->fidpool = p9_idpool_create();
-       if (IS_ERR(clnt->fidpool)) {
-               err = PTR_ERR(clnt->fidpool);
-               clnt->fidpool = NULL;
-               goto error;
-       }
 
        p9_tag_init(clnt);
 
        err = parse_opts(options, clnt);
        if (err < 0)
-               goto error;
+               goto free_client;
 
        if (!clnt->trans_mod)
                clnt->trans_mod = v9fs_get_default_trans();
@@ -687,7 +690,14 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
                err = -EPROTONOSUPPORT;
                P9_DPRINTK(P9_DEBUG_ERROR,
                                "No transport defined or default transport\n");
-               goto error;
+               goto free_client;
+       }
+
+       clnt->fidpool = p9_idpool_create();
+       if (IS_ERR(clnt->fidpool)) {
+               err = PTR_ERR(clnt->fidpool);
+               clnt->fidpool = NULL;
+               goto put_trans;
        }
 
        P9_DPRINTK(P9_DEBUG_MUX, "clnt %p trans %p msize %d dotu %d\n",
@@ -695,19 +705,25 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
 
        err = clnt->trans_mod->create(clnt, dev_name, options);
        if (err)
-               goto error;
+               goto destroy_fidpool;
 
        if ((clnt->msize+P9_IOHDRSZ) > clnt->trans_mod->maxsize)
                clnt->msize = clnt->trans_mod->maxsize-P9_IOHDRSZ;
 
        err = p9_client_version(clnt);
        if (err)
-               goto error;
+               goto close_trans;
 
        return clnt;
 
-error:
-       p9_client_destroy(clnt);
+close_trans:
+       clnt->trans_mod->close(clnt);
+destroy_fidpool:
+       p9_idpool_destroy(clnt->fidpool);
+put_trans:
+       v9fs_put_trans(clnt->trans_mod);
+free_client:
+       kfree(clnt);
        return ERR_PTR(err);
 }
 EXPORT_SYMBOL(p9_client_create);
@@ -1214,10 +1230,11 @@ static int p9_client_statsize(struct p9_wstat *wst, int optional)
 {
        int ret;
 
+       /* NOTE: size shouldn't include its own length */
        /* size[2] type[2] dev[4] qid[13] */
        /* mode[4] atime[4] mtime[4] length[8]*/
        /* name[s] uid[s] gid[s] muid[s] */
-       ret = 2+2+4+13+4+4+4+8+2+2+2+2;
+       ret = 2+4+13+4+4+4+8+2+2+2+2;
 
        if (wst->name)
                ret += strlen(wst->name);
@@ -1258,7 +1275,7 @@ int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst)
                wst->name, wst->uid, wst->gid, wst->muid, wst->extension,
                wst->n_uid, wst->n_gid, wst->n_muid);
 
-       req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size, wst);
+       req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size+2, wst);
        if (IS_ERR(req)) {
                err = PTR_ERR(req);
                goto error;
index be1cb909d8c00e5e9cac2a910fa2d8f793249779..31d0b05582a970e9c35b5e01e7d90d36ac25fd6b 100644 (file)
@@ -714,7 +714,7 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
        char *p;
        substring_t args[MAX_OPT_ARGS];
        int option;
-       char *options;
+       char *options, *tmp_options;
        int ret;
 
        opts->port = P9_PORT;
@@ -724,12 +724,13 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
        if (!params)
                return 0;
 
-       options = kstrdup(params, GFP_KERNEL);
-       if (!options) {
+       tmp_options = kstrdup(params, GFP_KERNEL);
+       if (!tmp_options) {
                P9_DPRINTK(P9_DEBUG_ERROR,
                                "failed to allocate copy of option string\n");
                return -ENOMEM;
        }
+       options = tmp_options;
 
        while ((p = strsep(&options, ",")) != NULL) {
                int token;
@@ -760,7 +761,8 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
                        continue;
                }
        }
-       kfree(options);
+
+       kfree(tmp_options);
        return 0;
 }
 
index 65cb29db03f8cbd1440e62dfece699e4c8ff4c4f..2c95a89c0f46464e379bb15ca79622149e9f051e 100644 (file)
@@ -166,7 +166,7 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
        char *p;
        substring_t args[MAX_OPT_ARGS];
        int option;
-       char *options;
+       char *options, *tmp_options;
        int ret;
 
        opts->port = P9_PORT;
@@ -177,12 +177,13 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
        if (!params)
                return 0;
 
-       options = kstrdup(params, GFP_KERNEL);
-       if (!options) {
+       tmp_options = kstrdup(params, GFP_KERNEL);
+       if (!tmp_options) {
                P9_DPRINTK(P9_DEBUG_ERROR,
                           "failed to allocate copy of option string\n");
                return -ENOMEM;
        }
+       options = tmp_options;
 
        while ((p = strsep(&options, ",")) != NULL) {
                int token;
@@ -216,7 +217,7 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
        }
        /* RQ must be at least as large as the SQ */
        opts->rq_depth = max(opts->rq_depth, opts->sq_depth);
-       kfree(options);
+       kfree(tmp_options);
        return 0;
 }
 
index ea1e3daabefeddb1dc6f46ddf5538b33cc3c0317..cb50f4ae5eefa1f5cde720f43daaf6f19e9034eb 100644 (file)
@@ -102,7 +102,8 @@ static void p9_virtio_close(struct p9_client *client)
        struct virtio_chan *chan = client->trans;
 
        mutex_lock(&virtio_9p_lock);
-       chan->inuse = false;
+       if (chan)
+               chan->inuse = false;
        mutex_unlock(&virtio_9p_lock);
 }
 
@@ -311,6 +312,7 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
        }
 
        client->trans = (void *)chan;
+       client->status = Connected;
        chan->client = client;
 
        return 0;
index b7c4224f4e7dee01288dd31f4581f7a8821c7a21..b10e3cdb08f87358ca64d0db8cf83c27f5ad624a 100644 (file)
@@ -377,6 +377,9 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
 
        if (acl->state == BT_CONNECTED &&
                        (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
+               acl->power_save = 1;
+               hci_conn_enter_active_mode(acl);
+
                if (lmp_esco_capable(hdev))
                        hci_setup_sync(sco, acl->handle);
                else
index 28517bad796c3181251bf76b1ddc459090d45deb..592da5c909c1bb482b8358c93ef213f41d7a4e70 100644 (file)
@@ -1699,6 +1699,7 @@ static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_bu
                break;
 
        case 0x1c:      /* SCO interval rejected */
+       case 0x1a:      /* Unsupported Remote Feature */
        case 0x1f:      /* Unspecified error */
                if (conn->out && conn->attempt < 2) {
                        conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
index 6cf526d06e2185787554e0eea6bb408192220221..fc6ec1e726527ac7b64ed084d2a766175c9401ff 100644 (file)
@@ -703,29 +703,9 @@ static void hidp_close(struct hid_device *hid)
 static int hidp_parse(struct hid_device *hid)
 {
        struct hidp_session *session = hid->driver_data;
-       struct hidp_connadd_req *req = session->req;
-       unsigned char *buf;
-       int ret;
-
-       buf = kmalloc(req->rd_size, GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
-
-       if (copy_from_user(buf, req->rd_data, req->rd_size)) {
-               kfree(buf);
-               return -EFAULT;
-       }
-
-       ret = hid_parse_report(session->hid, buf, req->rd_size);
-
-       kfree(buf);
-
-       if (ret)
-               return ret;
-
-       session->req = NULL;
 
-       return 0;
+       return hid_parse_report(session->hid, session->rd_data,
+                       session->rd_size);
 }
 
 static int hidp_start(struct hid_device *hid)
@@ -770,12 +750,24 @@ static int hidp_setup_hid(struct hidp_session *session,
        bdaddr_t src, dst;
        int err;
 
+       session->rd_data = kzalloc(req->rd_size, GFP_KERNEL);
+       if (!session->rd_data)
+               return -ENOMEM;
+
+       if (copy_from_user(session->rd_data, req->rd_data, req->rd_size)) {
+               err = -EFAULT;
+               goto fault;
+       }
+       session->rd_size = req->rd_size;
+
        hid = hid_allocate_device();
-       if (IS_ERR(hid))
-               return PTR_ERR(hid);
+       if (IS_ERR(hid)) {
+               err = PTR_ERR(hid);
+               goto fault;
+       }
 
        session->hid = hid;
-       session->req = req;
+
        hid->driver_data = session;
 
        baswap(&src, &bt_sk(session->ctrl_sock->sk)->src);
@@ -806,6 +798,10 @@ failed:
        hid_destroy_device(hid);
        session->hid = NULL;
 
+fault:
+       kfree(session->rd_data);
+       session->rd_data = NULL;
+
        return err;
 }
 
@@ -900,6 +896,9 @@ unlink:
                session->hid = NULL;
        }
 
+       kfree(session->rd_data);
+       session->rd_data = NULL;
+
 purge:
        skb_queue_purge(&session->ctrl_transmit);
        skb_queue_purge(&session->intr_transmit);
index faf3d74c35863aeb24aff69033e1f4b9f9768219..a4e215d50c10ba23190c402f9b3d66a27d454bcb 100644 (file)
@@ -154,7 +154,9 @@ struct hidp_session {
        struct sk_buff_head ctrl_transmit;
        struct sk_buff_head intr_transmit;
 
-       struct hidp_connadd_req *req;
+       /* Report descriptor */
+       __u8 *rd_data;
+       uint rd_size;
 };
 
 static inline void hidp_schedule(struct hidp_session *session)
index fc5ee3296e224f4144a08ddc2ba0f01bb6b31895..89f4a59eb82b9a520886a35e6b3aa65a6286ea70 100644 (file)
@@ -252,7 +252,6 @@ static void rfcomm_session_timeout(unsigned long arg)
        BT_DBG("session %p state %ld", s, s->state);
 
        set_bit(RFCOMM_TIMED_OUT, &s->flags);
-       rfcomm_session_put(s);
        rfcomm_schedule(RFCOMM_SCHED_TIMEO);
 }
 
@@ -1151,7 +1150,11 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
                        break;
 
                case BT_DISCONN:
-                       rfcomm_session_put(s);
+                       /* When socket is closed and we are not RFCOMM
+                        * initiator rfcomm_process_rx already calls
+                        * rfcomm_session_put() */
+                       if (s->sock->sk->sk_state != BT_CLOSED)
+                               rfcomm_session_put(s);
                        break;
                }
        }
@@ -1920,6 +1923,7 @@ static inline void rfcomm_process_sessions(void)
                if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) {
                        s->state = BT_DISCONN;
                        rfcomm_send_disc(s, 0);
+                       rfcomm_session_put(s);
                        continue;
                }
 
index 57bc4d5b8d084c053cded6ebddfe66bf6255d9f5..cb1b3488b739837fcff71c10d36694f2ea3ab393 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/string.h>
 #include <linux/types.h>
 #include <net/net_namespace.h>
+#include <linux/sched.h>
 
 #include <net/dst.h>
 
@@ -79,6 +80,7 @@ loop:
        while ((dst = next) != NULL) {
                next = dst->next;
                prefetch(&next->next);
+               cond_resched();
                if (likely(atomic_read(&dst->__refcnt))) {
                        last->next = dst;
                        last = dst;
index de0c2c726420e2044c55fd6235e15e915de16a51..2e692afdc55da7225570469c75cff9e34905dbb0 100644 (file)
@@ -3524,6 +3524,7 @@ static int pktgen_thread_worker(void *arg)
                        wait_event_interruptible_timeout(t->queue,
                                                         t->control != 0,
                                                         HZ/10);
+                       try_to_freeze();
                        continue;
                }
 
index 57dfb9c8c4f23f0a2ab132843b3b42fb2e2dc203..ff16e9df196972fef0c097c3aa464438a2ad556b 100644 (file)
@@ -83,7 +83,7 @@ static struct kmem_cache *ccid_kmem_cache_create(int obj_size, char *slab_name_f
        va_list args;
 
        va_start(args, fmt);
-       vsnprintf(slab_name_fmt, sizeof(slab_name_fmt), fmt, args);
+       vsnprintf(slab_name_fmt, CCID_SLAB_NAME_LENGTH, fmt, args);
        va_end(args);
 
        slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0,
index 269958bf7fe91e12edd0686652d097b87b42b245..6df6f8ac963664e2174b55c890ce7f45cbe05128 100644 (file)
@@ -19,7 +19,9 @@
 #include <linux/list.h>
 #include <linux/module.h>
 
-#define CCID_MAX 255
+/* maximum value for a CCID (RFC 4340, 19.5) */
+#define CCID_MAX               255
+#define CCID_SLAB_NAME_LENGTH  32
 
 struct tcp_info;
 
@@ -49,8 +51,8 @@ struct ccid_operations {
        const char              *ccid_name;
        struct kmem_cache       *ccid_hc_rx_slab,
                                *ccid_hc_tx_slab;
-       char                    ccid_hc_rx_slab_name[32];
-       char                    ccid_hc_tx_slab_name[32];
+       char                    ccid_hc_rx_slab_name[CCID_SLAB_NAME_LENGTH];
+       char                    ccid_hc_tx_slab_name[CCID_SLAB_NAME_LENGTH];
        __u32                   ccid_hc_rx_obj_size,
                                ccid_hc_tx_obj_size;
        /* Interface Routines */
index bace1d8cbcfdad1cae8135ba1946ef2b548b6e61..f5b3464f124292ef26dcbaf7562355e7ab5d5a63 100644 (file)
@@ -161,8 +161,8 @@ static __init int dccpprobe_init(void)
        if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops))
                goto err0;
 
-       ret = try_then_request_module((register_jprobe(&dccp_send_probe) == 0),
-                                       "dccp");
+       try_then_request_module((ret = register_jprobe(&dccp_send_probe)) == 0,
+                               "dccp");
        if (ret)
                goto err1;
 
index 06632762ba5f9150818040c19c7cb210e0edede5..90203e1b9187eac858d566cbfcd8bde09e604ab5 100644 (file)
@@ -925,10 +925,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
        if (t && !IS_ERR(t)) {
                struct arpt_getinfo info;
                const struct xt_table_info *private = t->private;
-
 #ifdef CONFIG_COMPAT
+               struct xt_table_info tmp;
+
                if (compat) {
-                       struct xt_table_info tmp;
                        ret = compat_table_info(private, &tmp);
                        xt_compat_flush_offsets(NFPROTO_ARP);
                        private = &tmp;
index 572330a552ef4e7f73f51f1ef23667baf28b645e..3ce53cf13d5a71d5068e96e593f0e65a3d5033f8 100644 (file)
@@ -1132,10 +1132,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
        if (t && !IS_ERR(t)) {
                struct ipt_getinfo info;
                const struct xt_table_info *private = t->private;
-
 #ifdef CONFIG_COMPAT
+               struct xt_table_info tmp;
+
                if (compat) {
-                       struct xt_table_info tmp;
                        ret = compat_table_info(private, &tmp);
                        xt_compat_flush_offsets(AF_INET);
                        private = &tmp;
index d171b123a656dd9aba7d8e7d52d7f43c92a410e7..d1ea38a7c490befaaf23456ea8b774aa6ada6fd9 100644 (file)
@@ -210,7 +210,7 @@ static ctl_table ip_ct_sysctl_table[] = {
        },
        {
                .procname       = "ip_conntrack_buckets",
-               .data           = &nf_conntrack_htable_size,
+               .data           = &init_net.ct.htable_size,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0444,
                .proc_handler   = proc_dointvec,
index 8668a3defda6bd212170ff0b26b400bb60274f9d..2fb7b76da94fafed76c19f9a0fe8396ae5bbf321 100644 (file)
@@ -32,7 +32,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
        struct hlist_nulls_node *n;
 
        for (st->bucket = 0;
-            st->bucket < nf_conntrack_htable_size;
+            st->bucket < net->ct.htable_size;
             st->bucket++) {
                n = rcu_dereference(net->ct.hash[st->bucket].first);
                if (!is_a_nulls(n))
@@ -50,7 +50,7 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
        head = rcu_dereference(head->next);
        while (is_a_nulls(head)) {
                if (likely(get_nulls_value(head) == st->bucket)) {
-                       if (++st->bucket >= nf_conntrack_htable_size)
+                       if (++st->bucket >= net->ct.htable_size)
                                return NULL;
                }
                head = rcu_dereference(net->ct.hash[st->bucket].first);
index fe1a64479dd088de80cd51226db941609bfff658..26066a2327ad3dadba650ff0a8524a9069441f24 100644 (file)
@@ -35,9 +35,6 @@ static DEFINE_SPINLOCK(nf_nat_lock);
 
 static struct nf_conntrack_l3proto *l3proto __read_mostly;
 
-/* Calculated at init based on memory size */
-static unsigned int nf_nat_htable_size __read_mostly;
-
 #define MAX_IP_NAT_PROTO 256
 static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO]
                                                __read_mostly;
@@ -72,7 +69,7 @@ EXPORT_SYMBOL_GPL(nf_nat_proto_put);
 
 /* We keep an extra hash for each conntrack, for fast searching. */
 static inline unsigned int
-hash_by_src(const struct nf_conntrack_tuple *tuple)
+hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple)
 {
        unsigned int hash;
 
@@ -80,7 +77,7 @@ hash_by_src(const struct nf_conntrack_tuple *tuple)
        hash = jhash_3words((__force u32)tuple->src.u3.ip,
                            (__force u32)tuple->src.u.all,
                            tuple->dst.protonum, 0);
-       return ((u64)hash * nf_nat_htable_size) >> 32;
+       return ((u64)hash * net->ipv4.nat_htable_size) >> 32;
 }
 
 /* Is this tuple already taken? (not by us) */
@@ -147,7 +144,7 @@ find_appropriate_src(struct net *net,
                     struct nf_conntrack_tuple *result,
                     const struct nf_nat_range *range)
 {
-       unsigned int h = hash_by_src(tuple);
+       unsigned int h = hash_by_src(net, tuple);
        const struct nf_conn_nat *nat;
        const struct nf_conn *ct;
        const struct hlist_node *n;
@@ -330,7 +327,7 @@ nf_nat_setup_info(struct nf_conn *ct,
        if (have_to_hash) {
                unsigned int srchash;
 
-               srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+               srchash = hash_by_src(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
                spin_lock_bh(&nf_nat_lock);
                /* nf_conntrack_alter_reply might re-allocate exntension aera */
                nat = nfct_nat(ct);
@@ -679,8 +676,10 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct,
 
 static int __net_init nf_nat_net_init(struct net *net)
 {
-       net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size,
-                                                     &net->ipv4.nat_vmalloced, 0);
+       /* Leave them the same for the moment. */
+       net->ipv4.nat_htable_size = net->ct.htable_size;
+       net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size,
+                                                      &net->ipv4.nat_vmalloced, 0);
        if (!net->ipv4.nat_bysource)
                return -ENOMEM;
        return 0;
@@ -703,7 +702,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
        nf_ct_iterate_cleanup(net, &clean_nat, NULL);
        synchronize_rcu();
        nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced,
-                            nf_nat_htable_size);
+                            net->ipv4.nat_htable_size);
 }
 
 static struct pernet_operations nf_nat_net_ops = {
@@ -724,9 +723,6 @@ static int __init nf_nat_init(void)
                return ret;
        }
 
-       /* Leave them the same for the moment. */
-       nf_nat_htable_size = nf_conntrack_htable_size;
-
        ret = register_pernet_subsys(&nf_nat_net_ops);
        if (ret < 0)
                goto cleanup_extend;
index 480d7f8c9802083660c19ca20d225254b40b3b18..8a7e0f52e17792ff994dd3b7d02d0f9c0654c6d9 100644 (file)
@@ -1164,10 +1164,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
        if (t && !IS_ERR(t)) {
                struct ip6t_getinfo info;
                const struct xt_table_info *private = t->private;
-
 #ifdef CONFIG_COMPAT
+               struct xt_table_info tmp;
+
                if (compat) {
-                       struct xt_table_info tmp;
                        ret = compat_table_info(private, &tmp);
                        xt_compat_flush_offsets(AF_INET6);
                        private = &tmp;
index 156020d138b507685633feda14fa747d11c90538..6b3602de359ae556761ec10effa42151c0a92817 100644 (file)
@@ -698,15 +698,18 @@ dev_irnet_ioctl(
 
       /* Query PPP channel and unit number */
     case PPPIOCGCHAN:
+      lock_kernel();
       if(ap->ppp_open && !put_user(ppp_channel_index(&ap->chan),
                                                (int __user *)argp))
        err = 0;
+      unlock_kernel();
       break;
     case PPPIOCGUNIT:
       lock_kernel();
       if(ap->ppp_open && !put_user(ppp_unit_number(&ap->chan),
                                                (int __user *)argp))
-      err = 0;
+        err = 0;
+      unlock_kernel();
       break;
 
       /* All these ioctls can be passed both directly and from ppp_generic,
index 76fa6fef64739c3fb881dafc92947a5e5c1c054c..539f43bc97db79955925b4b6e6442df64548fffc 100644 (file)
@@ -3794,9 +3794,9 @@ static struct pernet_operations pfkey_net_ops = {
 
 static void __exit ipsec_pfkey_exit(void)
 {
-       unregister_pernet_subsys(&pfkey_net_ops);
        xfrm_unregister_km(&pfkeyv2_mgr);
        sock_unregister(PF_KEY);
+       unregister_pernet_subsys(&pfkey_net_ops);
        proto_unregister(&key_proto);
 }
 
@@ -3807,21 +3807,22 @@ static int __init ipsec_pfkey_init(void)
        if (err != 0)
                goto out;
 
-       err = sock_register(&pfkey_family_ops);
+       err = register_pernet_subsys(&pfkey_net_ops);
        if (err != 0)
                goto out_unregister_key_proto;
+       err = sock_register(&pfkey_family_ops);
+       if (err != 0)
+               goto out_unregister_pernet;
        err = xfrm_register_km(&pfkeyv2_mgr);
        if (err != 0)
                goto out_sock_unregister;
-       err = register_pernet_subsys(&pfkey_net_ops);
-       if (err != 0)
-               goto out_xfrm_unregister_km;
 out:
        return err;
-out_xfrm_unregister_km:
-       xfrm_unregister_km(&pfkeyv2_mgr);
+
 out_sock_unregister:
        sock_unregister(PF_KEY);
+out_unregister_pernet:
+       unregister_pernet_subsys(&pfkey_net_ops);
 out_unregister_key_proto:
        proto_unregister(&key_proto);
        goto out;
index 0e98c3282d42fe546562454084469a680077847f..4d79e3c1616ce57ff2335487f2492afea9ebead2 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/netdevice.h>
 #include <linux/socket.h>
 #include <linux/mm.h>
+#include <linux/nsproxy.h>
 #include <linux/rculist_nulls.h>
 
 #include <net/netfilter/nf_conntrack.h>
@@ -63,8 +64,6 @@ EXPORT_SYMBOL_GPL(nf_conntrack_max);
 struct nf_conn nf_conntrack_untracked __read_mostly;
 EXPORT_SYMBOL_GPL(nf_conntrack_untracked);
 
-static struct kmem_cache *nf_conntrack_cachep __read_mostly;
-
 static int nf_conntrack_hash_rnd_initted;
 static unsigned int nf_conntrack_hash_rnd;
 
@@ -86,9 +85,10 @@ static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
        return ((u64)h * size) >> 32;
 }
 
-static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple)
+static inline u_int32_t hash_conntrack(const struct net *net,
+                                      const struct nf_conntrack_tuple *tuple)
 {
-       return __hash_conntrack(tuple, nf_conntrack_htable_size,
+       return __hash_conntrack(tuple, net->ct.htable_size,
                                nf_conntrack_hash_rnd);
 }
 
@@ -296,7 +296,7 @@ __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple)
 {
        struct nf_conntrack_tuple_hash *h;
        struct hlist_nulls_node *n;
-       unsigned int hash = hash_conntrack(tuple);
+       unsigned int hash = hash_conntrack(net, tuple);
 
        /* Disable BHs the entire time since we normally need to disable them
         * at least once for the stats anyway.
@@ -366,10 +366,11 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct,
 
 void nf_conntrack_hash_insert(struct nf_conn *ct)
 {
+       struct net *net = nf_ct_net(ct);
        unsigned int hash, repl_hash;
 
-       hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
-       repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+       hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+       repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
 
        __nf_conntrack_hash_insert(ct, hash, repl_hash);
 }
@@ -397,8 +398,8 @@ __nf_conntrack_confirm(struct sk_buff *skb)
        if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
                return NF_ACCEPT;
 
-       hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
-       repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+       hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+       repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
 
        /* We're not in hash table, and we refuse to set up related
           connections for unconfirmed conns.  But packet copies and
@@ -468,7 +469,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
        struct net *net = nf_ct_net(ignored_conntrack);
        struct nf_conntrack_tuple_hash *h;
        struct hlist_nulls_node *n;
-       unsigned int hash = hash_conntrack(tuple);
+       unsigned int hash = hash_conntrack(net, tuple);
 
        /* Disable BHs the entire time since we need to disable them at
         * least once for the stats anyway.
@@ -503,7 +504,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
        int dropped = 0;
 
        rcu_read_lock();
-       for (i = 0; i < nf_conntrack_htable_size; i++) {
+       for (i = 0; i < net->ct.htable_size; i++) {
                hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
                                         hnnode) {
                        tmp = nf_ct_tuplehash_to_ctrack(h);
@@ -523,7 +524,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
                if (cnt >= NF_CT_EVICTION_RANGE)
                        break;
 
-               hash = (hash + 1) % nf_conntrack_htable_size;
+               hash = (hash + 1) % net->ct.htable_size;
        }
        rcu_read_unlock();
 
@@ -557,7 +558,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
 
        if (nf_conntrack_max &&
            unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
-               unsigned int hash = hash_conntrack(orig);
+               unsigned int hash = hash_conntrack(net, orig);
                if (!early_drop(net, hash)) {
                        atomic_dec(&net->ct.count);
                        if (net_ratelimit())
@@ -572,7 +573,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
         * Do not use kmem_cache_zalloc(), as this cache uses
         * SLAB_DESTROY_BY_RCU.
         */
-       ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
+       ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
        if (ct == NULL) {
                pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");
                atomic_dec(&net->ct.count);
@@ -611,7 +612,7 @@ void nf_conntrack_free(struct nf_conn *ct)
        nf_ct_ext_destroy(ct);
        atomic_dec(&net->ct.count);
        nf_ct_ext_free(ct);
-       kmem_cache_free(nf_conntrack_cachep, ct);
+       kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_free);
 
@@ -1014,7 +1015,7 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
        struct hlist_nulls_node *n;
 
        spin_lock_bh(&nf_conntrack_lock);
-       for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
+       for (; *bucket < net->ct.htable_size; (*bucket)++) {
                hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
                        ct = nf_ct_tuplehash_to_ctrack(h);
                        if (iter(ct, data))
@@ -1113,9 +1114,12 @@ static void nf_ct_release_dying_list(struct net *net)
 
 static void nf_conntrack_cleanup_init_net(void)
 {
+       /* wait until all references to nf_conntrack_untracked are dropped */
+       while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
+               schedule();
+
        nf_conntrack_helper_fini();
        nf_conntrack_proto_fini();
-       kmem_cache_destroy(nf_conntrack_cachep);
 }
 
 static void nf_conntrack_cleanup_net(struct net *net)
@@ -1127,15 +1131,14 @@ static void nf_conntrack_cleanup_net(struct net *net)
                schedule();
                goto i_see_dead_people;
        }
-       /* wait until all references to nf_conntrack_untracked are dropped */
-       while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
-               schedule();
 
        nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
-                            nf_conntrack_htable_size);
+                            net->ct.htable_size);
        nf_conntrack_ecache_fini(net);
        nf_conntrack_acct_fini(net);
        nf_conntrack_expect_fini(net);
+       kmem_cache_destroy(net->ct.nf_conntrack_cachep);
+       kfree(net->ct.slabname);
        free_percpu(net->ct.stat);
 }
 
@@ -1190,10 +1193,12 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
 {
        int i, bucket, vmalloced, old_vmalloced;
        unsigned int hashsize, old_size;
-       int rnd;
        struct hlist_nulls_head *hash, *old_hash;
        struct nf_conntrack_tuple_hash *h;
 
+       if (current->nsproxy->net_ns != &init_net)
+               return -EOPNOTSUPP;
+
        /* On boot, we can set this without any fancy locking. */
        if (!nf_conntrack_htable_size)
                return param_set_uint(val, kp);
@@ -1206,33 +1211,29 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
        if (!hash)
                return -ENOMEM;
 
-       /* We have to rehahs for the new table anyway, so we also can
-        * use a newrandom seed */
-       get_random_bytes(&rnd, sizeof(rnd));
-
        /* Lookups in the old hash might happen in parallel, which means we
         * might get false negatives during connection lookup. New connections
         * created because of a false negative won't make it into the hash
         * though since that required taking the lock.
         */
        spin_lock_bh(&nf_conntrack_lock);
-       for (i = 0; i < nf_conntrack_htable_size; i++) {
+       for (i = 0; i < init_net.ct.htable_size; i++) {
                while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
                        h = hlist_nulls_entry(init_net.ct.hash[i].first,
                                        struct nf_conntrack_tuple_hash, hnnode);
                        hlist_nulls_del_rcu(&h->hnnode);
-                       bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
+                       bucket = __hash_conntrack(&h->tuple, hashsize,
+                                                 nf_conntrack_hash_rnd);
                        hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
                }
        }
-       old_size = nf_conntrack_htable_size;
+       old_size = init_net.ct.htable_size;
        old_vmalloced = init_net.ct.hash_vmalloc;
        old_hash = init_net.ct.hash;
 
-       nf_conntrack_htable_size = hashsize;
+       init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
        init_net.ct.hash_vmalloc = vmalloced;
        init_net.ct.hash