Merge branch 'audit.b22' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/audit...
authorLinus Torvalds <torvalds@g5.osdl.org>
Sat, 1 Jul 2006 16:59:08 +0000 (09:59 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Sat, 1 Jul 2006 16:59:08 +0000 (09:59 -0700)
* 'audit.b22' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/audit-current:
  [PATCH] audit syscall classes
  [PATCH] audit: support for object context filters
  [PATCH] audit: rename AUDIT_SE_* constants
  [PATCH] add rule filterkey

86 files changed:
Makefile
arch/i386/kernel/alternative.c
arch/i386/mm/init.c
arch/parisc/mm/init.c
arch/s390/kernel/entry.S
arch/s390/kernel/entry64.S
arch/s390/mm/init.c
arch/um/kernel/skas/uaccess.c
arch/um/os-Linux/umid.c
arch/um/scripts/Makefile.rules
arch/x86_64/mm/init.c
drivers/acpi/bus.c
drivers/block/nbd.c
drivers/edac/amd76x_edac.c
drivers/edac/e752x_edac.c
drivers/edac/e7xxx_edac.c
drivers/edac/edac_mc.h
drivers/edac/i82860_edac.c
drivers/edac/i82875p_edac.c
drivers/edac/r82600_edac.c
drivers/ide/pci/it821x.c
drivers/infiniband/hw/ipath/Kconfig
drivers/infiniband/hw/ipath/Makefile
drivers/infiniband/hw/ipath/ipath_common.h
drivers/infiniband/hw/ipath/ipath_cq.c
drivers/infiniband/hw/ipath/ipath_debug.h
drivers/infiniband/hw/ipath/ipath_diag.c
drivers/infiniband/hw/ipath/ipath_driver.c
drivers/infiniband/hw/ipath/ipath_eeprom.c
drivers/infiniband/hw/ipath/ipath_file_ops.c
drivers/infiniband/hw/ipath/ipath_fs.c
drivers/infiniband/hw/ipath/ipath_ht400.c
drivers/infiniband/hw/ipath/ipath_init_chip.c
drivers/infiniband/hw/ipath/ipath_intr.c
drivers/infiniband/hw/ipath/ipath_kernel.h
drivers/infiniband/hw/ipath/ipath_keys.c
drivers/infiniband/hw/ipath/ipath_layer.c
drivers/infiniband/hw/ipath/ipath_layer.h
drivers/infiniband/hw/ipath/ipath_mad.c
drivers/infiniband/hw/ipath/ipath_mr.c
drivers/infiniband/hw/ipath/ipath_pe800.c
drivers/infiniband/hw/ipath/ipath_qp.c
drivers/infiniband/hw/ipath/ipath_rc.c
drivers/infiniband/hw/ipath/ipath_registers.h
drivers/infiniband/hw/ipath/ipath_ruc.c
drivers/infiniband/hw/ipath/ipath_srq.c
drivers/infiniband/hw/ipath/ipath_stats.c
drivers/infiniband/hw/ipath/ipath_sysfs.c
drivers/infiniband/hw/ipath/ipath_uc.c
drivers/infiniband/hw/ipath/ipath_ud.c
drivers/infiniband/hw/ipath/ipath_user_pages.c
drivers/infiniband/hw/ipath/ipath_verbs.c
drivers/infiniband/hw/ipath/ipath_verbs.h
drivers/infiniband/hw/ipath/ipath_verbs_mcast.c
drivers/infiniband/hw/ipath/ipath_wc_x86_64.c
drivers/infiniband/hw/ipath/ips_common.h [deleted file]
drivers/infiniband/hw/ipath/verbs_debug.h
drivers/pnp/resource.c
drivers/rtc/Kconfig
drivers/rtc/Makefile
drivers/rtc/rtc-s3c.c [new file with mode: 0644]
fs/reiserfs/inode.c
fs/ufs/balloc.c
fs/ufs/file.c
fs/ufs/inode.c
fs/ufs/truncate.c
fs/ufs/util.c
fs/ufs/util.h
include/asm-generic/sections.h
include/asm-i386/alternative.h
include/asm-um/kmap_types.h
include/asm-x86_64/alternative.h
include/linux/cpu.h
include/linux/err.h
include/linux/ufs_fs.h
kernel/futex.c
kernel/irq/manage.c
scripts/Kbuild.include
scripts/Makefile.build
scripts/Makefile.host
scripts/Makefile.lib
scripts/Makefile.modpost
scripts/kconfig/lxdialog/checklist.c
scripts/kernel-doc
scripts/mod/modpost.c
scripts/mod/modpost.h

index e9560c6f8156983cb15328c767b594028354da8e..4dcf25d43fa6b1aeb30be44bdf0042fd94983ab9 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -41,8 +41,9 @@ ifndef KBUILD_VERBOSE
   KBUILD_VERBOSE = 0
 endif
 
-# Call sparse as part of compilation of C files
-# Use 'make C=1' to enable sparse checking
+# Call checker as part of compilation of C files
+# Use 'make C=1' to enable checking (sparse, by default)
+# Override with 'make C=1 CHECK=checker_executable CHECKFLAGS=....'
 
 ifdef C
   ifeq ("$(origin C)", "command line")
@@ -1060,8 +1061,8 @@ help:
 
        @echo  '  make V=0|1 [targets] 0 => quiet build (default), 1 => verbose build'
        @echo  '  make O=dir [targets] Locate all output files in "dir", including .config'
-       @echo  '  make C=1   [targets] Check all c source with $$CHECK (sparse)'
-       @echo  '  make C=2   [targets] Force check of all c source with $$CHECK (sparse)'
+       @echo  '  make C=1   [targets] Check all c source with $$CHECK (sparse by default)'
+       @echo  '  make C=2   [targets] Force check of all c source with $$CHECK'
        @echo  ''
        @echo  'Execute "make" or "make all" to build all targets marked with [*] '
        @echo  'For further info see the ./README file'
@@ -1352,7 +1353,7 @@ quiet_cmd_rmfiles = $(if $(wildcard $(rm-files)),CLEAN   $(wildcard $(rm-files))
 
 a_flags = -Wp,-MD,$(depfile) $(AFLAGS) $(AFLAGS_KERNEL) \
          $(NOSTDINC_FLAGS) $(CPPFLAGS) \
-         $(modkern_aflags) $(EXTRA_AFLAGS) $(AFLAGS_$(*F).o)
+         $(modkern_aflags) $(EXTRA_AFLAGS) $(AFLAGS_$(basetarget).o)
 
 quiet_cmd_as_o_S = AS      $@
 cmd_as_o_S       = $(CC) $(a_flags) -c -o $@ $<
index 50eb0e03777e4b1a7f0fe564f507b6ccfe358f18..7b421b3a053e0ce8827410566a601b7050a7ff76 100644 (file)
@@ -168,6 +168,8 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
        }
 }
 
+#ifdef CONFIG_SMP
+
 static void alternatives_smp_save(struct alt_instr *start, struct alt_instr *end)
 {
        struct alt_instr *a;
@@ -328,6 +330,8 @@ void alternatives_smp_switch(int smp)
        spin_unlock_irqrestore(&smp_alt, flags);
 }
 
+#endif
+
 void __init alternative_instructions(void)
 {
        if (no_replacement) {
@@ -349,6 +353,7 @@ void __init alternative_instructions(void)
        smp_alt_once = 1;
 #endif
 
+#ifdef CONFIG_SMP
        if (smp_alt_once) {
                if (1 == num_possible_cpus()) {
                        printk(KERN_INFO "SMP alternatives: switching to UP code\n");
@@ -370,4 +375,5 @@ void __init alternative_instructions(void)
                                            _text, _etext);
                alternatives_smp_switch(0);
        }
+#endif
 }
index dc5d8979cd647b11f2a48dac27ef7ef70a4c7188..89e8486aac3499f3f39b35f5268df100b95285ad 100644 (file)
@@ -725,16 +725,15 @@ static int noinline do_test_wp_bit(void)
 
 #ifdef CONFIG_DEBUG_RODATA
 
-extern char __start_rodata, __end_rodata;
 void mark_rodata_ro(void)
 {
-       unsigned long addr = (unsigned long)&__start_rodata;
+       unsigned long addr = (unsigned long)__start_rodata;
 
-       for (; addr < (unsigned long)&__end_rodata; addr += PAGE_SIZE)
+       for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
                change_page_attr(virt_to_page(addr), 1, PAGE_KERNEL_RO);
 
-       printk ("Write protecting the kernel read-only data: %luk\n",
-                       (unsigned long)(&__end_rodata - &__start_rodata) >> 10);
+       printk("Write protecting the kernel read-only data: %uk\n",
+                       (__end_rodata - __start_rodata) >> 10);
 
        /*
         * change_page_attr() requires a global_flush_tlb() call after it.
index b64602a99d8972f92ede87f71007ca00ef30f5f7..f2b96f1e0da75f731e9ae628b1db605cb755cd43 100644 (file)
@@ -27,6 +27,7 @@
 #include <asm/tlb.h>
 #include <asm/pdc_chassis.h>
 #include <asm/mmzone.h>
+#include <asm/sections.h>
 
 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 
@@ -417,11 +418,10 @@ void free_initmem(void)
 #ifdef CONFIG_DEBUG_RODATA
 void mark_rodata_ro(void)
 {
-       extern char __start_rodata, __end_rodata;
        /* rodata memory was already mapped with KERNEL_RO access rights by
            pagetable_init() and map_pages(). No need to do additional stuff here */
        printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n",
-               (unsigned long)(&__end_rodata - &__start_rodata) >> 10);
+               (unsigned long)(__end_rodata - __start_rodata) >> 10);
 }
 #endif
 
index 1a434a7004ee4f5432d37e8019c03d31e8265d26..d8948c342caf554aff4b657ce41363e415f8e8da 100644 (file)
@@ -228,8 +228,9 @@ sysc_do_svc:
 sysc_nr_ok:
        mvc     SP_ARGS(4,%r15),SP_R7(%r15)
 sysc_do_restart:
+       l       %r8,BASED(.Lsysc_table)
        tm      __TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
-        l       %r8,sys_call_table-system_call(%r7,%r13) # get system call addr.
+       l       %r8,0(%r7,%r8)    # get system call addr.
         bnz     BASED(sysc_tracesys)
         basr    %r14,%r8          # call sys_xxxx
         st      %r2,SP_R2(%r15)   # store return value (change R2 on stack)
@@ -330,9 +331,10 @@ sysc_tracesys:
        basr    %r14,%r1
        clc     SP_R2(4,%r15),BASED(.Lnr_syscalls)
        bnl     BASED(sysc_tracenogo)
+       l       %r8,BASED(.Lsysc_table)
        l       %r7,SP_R2(%r15)        # strace might have changed the 
        sll     %r7,2                  #  system call
-       l       %r8,sys_call_table-system_call(%r7,%r13)
+       l       %r8,0(%r7,%r8)
 sysc_tracego:
        lm      %r3,%r6,SP_R3(%r15)
        l       %r2,SP_ORIG_R2(%r15)
@@ -1009,6 +1011,7 @@ cleanup_io_leave_insn:
 .Ltrace:       .long  syscall_trace
 .Lvfork:       .long  sys_vfork
 .Lschedtail:   .long  schedule_tail
+.Lsysc_table:  .long  sys_call_table
 
 .Lcritical_start:
                .long  __critical_start + 0x80000000
@@ -1017,8 +1020,8 @@ cleanup_io_leave_insn:
 .Lcleanup_critical:
                .long  cleanup_critical
 
+              .section .rodata, "a"
 #define SYSCALL(esa,esame,emu) .long esa
 sys_call_table:
 #include "syscalls.S"
 #undef SYSCALL
-
index edad607716733fcfba5c6538ae1f1a03cdd44c30..1ca499fa54b4b3c71cbff168b68eeaac08e23df0 100644 (file)
@@ -991,6 +991,7 @@ cleanup_io_leave_insn:
 .Lcritical_end:
                .quad  __critical_end
 
+              .section .rodata, "a"
 #define SYSCALL(esa,esame,emu) .long esame
 sys_call_table:
 #include "syscalls.S"
index 81dce185f8361b2d695e1f56c5101d8bde49b17e..eb6ebfef134ae8b710def114b33533507e62a673 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/init.h>
 #include <linux/pagemap.h>
 #include <linux/bootmem.h>
+#include <linux/pfn.h>
 
 #include <asm/processor.h>
 #include <asm/system.h>
@@ -33,6 +34,7 @@
 #include <asm/lowcore.h>
 #include <asm/tlb.h>
 #include <asm/tlbflush.h>
+#include <asm/sections.h>
 
 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 
@@ -89,17 +91,6 @@ void show_mem(void)
         printk("%d pages swap cached\n",cached);
 }
 
-/* References to section boundaries */
-
-extern unsigned long _text;
-extern unsigned long _etext;
-extern unsigned long _edata;
-extern unsigned long __bss_start;
-extern unsigned long _end;
-
-extern unsigned long __init_begin;
-extern unsigned long __init_end;
-
 extern unsigned long __initdata zholes_size[];
 /*
  * paging_init() sets up the page tables
@@ -116,6 +107,10 @@ void __init paging_init(void)
         unsigned long pfn = 0;
         unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
         static const int ssm_mask = 0x04000000L;
+       unsigned long ro_start_pfn, ro_end_pfn;
+
+       ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
+       ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
 
        /* unmap whole virtual address space */
        
@@ -143,7 +138,10 @@ void __init paging_init(void)
                 pg_dir++;
 
                 for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) {
-                        pte = pfn_pte(pfn, PAGE_KERNEL);
+                       if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
+                               pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
+                       else
+                               pte = pfn_pte(pfn, PAGE_KERNEL);
                         if (pfn >= max_low_pfn)
                                 pte_clear(&init_mm, 0, &pte);
                         set_pte(pg_table, pte);
@@ -175,6 +173,7 @@ void __init paging_init(void)
 }
 
 #else /* CONFIG_64BIT */
+
 void __init paging_init(void)
 {
         pgd_t * pg_dir;
@@ -186,13 +185,15 @@ void __init paging_init(void)
         unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) |
           _KERN_REGION_TABLE;
        static const int ssm_mask = 0x04000000L;
-
        unsigned long zones_size[MAX_NR_ZONES];
        unsigned long dma_pfn, high_pfn;
+       unsigned long ro_start_pfn, ro_end_pfn;
 
        memset(zones_size, 0, sizeof(zones_size));
        dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
        high_pfn = max_low_pfn;
+       ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
+       ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
 
        if (dma_pfn > high_pfn)
                zones_size[ZONE_DMA] = high_pfn;
@@ -231,7 +232,10 @@ void __init paging_init(void)
                         pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
        
                         for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) {
-                                pte = pfn_pte(pfn, PAGE_KERNEL);
+                               if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
+                                       pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
+                               else
+                                       pte = pfn_pte(pfn, PAGE_KERNEL);
                                 if (pfn >= max_low_pfn) {
                                         pte_clear(&init_mm, 0, &pte); 
                                         continue;
@@ -282,6 +286,9 @@ void __init mem_init(void)
                 reservedpages << (PAGE_SHIFT-10),
                 datasize >>10,
                 initsize >> 10);
+       printk("Write protected kernel read-only data: %#lx - %#lx\n",
+              (unsigned long)&__start_rodata,
+              PFN_ALIGN((unsigned long)&__end_rodata) - 1);
 }
 
 void free_initmem(void)
index 5992c3257167443a0ec57d2be3093a02d9af412e..8912cec0fe43380cb6761e088099c04dccd1bfaf 100644 (file)
@@ -8,6 +8,7 @@
 #include "linux/kernel.h"
 #include "linux/string.h"
 #include "linux/fs.h"
+#include "linux/hardirq.h"
 #include "linux/highmem.h"
 #include "asm/page.h"
 #include "asm/pgtable.h"
@@ -38,7 +39,7 @@ static unsigned long maybe_map(unsigned long virt, int is_write)
        return((unsigned long) phys);
 }
 
-static int do_op(unsigned long addr, int len, int is_write,
+static int do_op_one_page(unsigned long addr, int len, int is_write,
                 int (*op)(unsigned long addr, int len, void *arg), void *arg)
 {
        struct page *page;
@@ -49,9 +50,11 @@ static int do_op(unsigned long addr, int len, int is_write,
                return(-1);
 
        page = phys_to_page(addr);
-       addr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
+       addr = (unsigned long) kmap_atomic(page, KM_UML_USERCOPY) + (addr & ~PAGE_MASK);
+
        n = (*op)(addr, len, arg);
-       kunmap(page);
+
+       kunmap_atomic(page, KM_UML_USERCOPY);
 
        return(n);
 }
@@ -77,7 +80,7 @@ static void do_buffer_op(void *jmpbuf, void *arg_ptr)
        remain = len;
 
        current->thread.fault_catcher = jmpbuf;
-       n = do_op(addr, size, is_write, op, arg);
+       n = do_op_one_page(addr, size, is_write, op, arg);
        if(n != 0){
                *res = (n < 0 ? remain : 0);
                goto out;
@@ -91,7 +94,7 @@ static void do_buffer_op(void *jmpbuf, void *arg_ptr)
        }
 
        while(addr < ((addr + remain) & PAGE_MASK)){
-               n = do_op(addr, PAGE_SIZE, is_write, op, arg);
+               n = do_op_one_page(addr, PAGE_SIZE, is_write, op, arg);
                if(n != 0){
                        *res = (n < 0 ? remain : 0);
                        goto out;
@@ -105,7 +108,7 @@ static void do_buffer_op(void *jmpbuf, void *arg_ptr)
                goto out;
        }
 
-       n = do_op(addr, remain, is_write, op, arg);
+       n = do_op_one_page(addr, remain, is_write, op, arg);
        if(n != 0)
                *res = (n < 0 ? remain : 0);
        else *res = 0;
index 362db059fe305d4a7276fb7c40705cb18d1a704b..48092b95c8ab997b4914a072afcd17f0e1aa7eda 100644 (file)
@@ -67,32 +67,53 @@ err:
        return err;
 }
 
-static int actually_do_remove(char *dir)
+/*
+ * Unlinks the files contained in @dir and then removes @dir.
+ * Doesn't handle directory trees, so it's not like rm -rf, but almost such. We
+ * ignore ENOENT errors for anything (they happen, strangely enough - possibly due
+ * to races between multiple dying UML threads).
+ */
+static int remove_files_and_dir(char *dir)
 {
        DIR *directory;
        struct dirent *ent;
        int len;
        char file[256];
+       int ret;
 
        directory = opendir(dir);
-       if(directory == NULL)
-               return -errno;
+       if (directory == NULL) {
+               if (errno != ENOENT)
+                       return -errno;
+               else
+                       return 0;
+       }
 
-       while((ent = readdir(directory)) != NULL){
-               if(!strcmp(ent->d_name, ".") || !strcmp(ent->d_name, ".."))
+       while ((ent = readdir(directory)) != NULL) {
+               if (!strcmp(ent->d_name, ".") || !strcmp(ent->d_name, ".."))
                        continue;
                len = strlen(dir) + sizeof("/") + strlen(ent->d_name) + 1;
-               if(len > sizeof(file))
-                       return -E2BIG;
+               if (len > sizeof(file)) {
+                       ret = -E2BIG;
+                       goto out;
+               }
 
                sprintf(file, "%s/%s", dir, ent->d_name);
-               if(unlink(file) < 0)
-                       return -errno;
+               if (unlink(file) < 0 && errno != ENOENT) {
+                       ret = -errno;
+                       goto out;
+               }
        }
-       if(rmdir(dir) < 0)
-               return -errno;
 
-       return 0;
+       if (rmdir(dir) < 0 && errno != ENOENT) {
+               ret = -errno;
+               goto out;
+       }
+
+       ret = 0;
+out:
+       closedir(directory);
+       return ret;
 }
 
 /* This says that there isn't already a user of the specified directory even if
@@ -103,9 +124,10 @@ static int actually_do_remove(char *dir)
  *     something other than UML sticking stuff in the directory
  *     this boot racing with a shutdown of the other UML
  * In any of these cases, the directory isn't useful for anything else.
+ *
+ * Boolean return: 1 if in use, 0 otherwise.
  */
-
-static int not_dead_yet(char *dir)
+static inline int is_umdir_used(char *dir)
 {
        char file[strlen(uml_dir) + UMID_LEN + sizeof("/pid\0")];
        char pid[sizeof("nnnnn\0")], *end;
@@ -113,7 +135,7 @@ static int not_dead_yet(char *dir)
 
        n = snprintf(file, sizeof(file), "%s/pid", dir);
        if(n >= sizeof(file)){
-               printk("not_dead_yet - pid filename too long\n");
+               printk("is_umdir_used - pid filename too long\n");
                err = -E2BIG;
                goto out;
        }
@@ -123,7 +145,7 @@ static int not_dead_yet(char *dir)
        if(fd < 0) {
                fd = -errno;
                if(fd != -ENOENT){
-                       printk("not_dead_yet : couldn't open pid file '%s', "
+                       printk("is_umdir_used : couldn't open pid file '%s', "
                               "err = %d\n", file, -fd);
                }
                goto out;
@@ -132,18 +154,18 @@ static int not_dead_yet(char *dir)
        err = 0;
        n = read(fd, pid, sizeof(pid));
        if(n < 0){
-               printk("not_dead_yet : couldn't read pid file '%s', "
+               printk("is_umdir_used : couldn't read pid file '%s', "
                       "err = %d\n", file, errno);
                goto out_close;
        } else if(n == 0){
-               printk("not_dead_yet : couldn't read pid file '%s', "
+               printk("is_umdir_used : couldn't read pid file '%s', "
                       "0-byte read\n", file);
                goto out_close;
        }
 
        p = strtoul(pid, &end, 0);
        if(end == pid){
-               printk("not_dead_yet : couldn't parse pid file '%s', "
+               printk("is_umdir_used : couldn't parse pid file '%s', "
                       "errno = %d\n", file, errno);
                goto out_close;
        }
@@ -153,19 +175,32 @@ static int not_dead_yet(char *dir)
                return 1;
        }
 
-       err = actually_do_remove(dir);
-       if(err)
-               printk("not_dead_yet - actually_do_remove failed with "
-                      "err = %d\n", err);
-
-       return err;
-
 out_close:
        close(fd);
 out:
        return 0;
 }
 
+/*
+ * Try to remove the directory @dir unless it's in use.
+ * Precondition: @dir exists.
+ * Returns 0 for success, < 0 for failure in removal or if the directory is in
+ * use.
+ */
+static int umdir_take_if_dead(char *dir)
+{
+       int ret;
+       if (is_umdir_used(dir))
+               return -EEXIST;
+
+       ret = remove_files_and_dir(dir);
+       if (ret) {
+               printk("is_umdir_used - remove_files_and_dir failed with "
+                      "err = %d\n", ret);
+       }
+       return ret;
+}
+
 static void __init create_pid_file(void)
 {
        char file[strlen(uml_dir) + UMID_LEN + sizeof("/pid\0")];
@@ -244,11 +279,7 @@ int __init make_umid(void)
                if(err != -EEXIST)
                        goto err;
 
-               /* 1   -> this umid is already in use
-                * < 0 -> we couldn't remove the umid directory
-                * In either case, we can't use this umid, so return -EEXIST.
-                */
-               if(not_dead_yet(tmp) != 0)
+               if (umdir_take_if_dead(tmp) < 0)
                        goto err;
 
                err = mkdir(tmp, 0777);
@@ -344,9 +375,9 @@ static void remove_umid_dir(void)
        char dir[strlen(uml_dir) + UMID_LEN + 1], err;
 
        sprintf(dir, "%s%s", uml_dir, umid);
-       err = actually_do_remove(dir);
+       err = remove_files_and_dir(dir);
        if(err)
-               printf("remove_umid_dir - actually_do_remove failed with "
+               printf("remove_umid_dir - remove_files_and_dir failed with "
                       "err = %d\n", err);
 }
 
index 1347dc6d521828508f525a1b9f7660e133c8f879..813077fb1e5bf5bf69a16fb4355342dc622ecfe3 100644 (file)
@@ -8,7 +8,7 @@ USER_OBJS += $(filter %_user.o,$(obj-y) $(obj-m)  $(USER_SINGLE_OBJS))
 USER_OBJS := $(foreach file,$(USER_OBJS),$(obj)/$(file))
 
 $(USER_OBJS:.o=.%): \
-       c_flags = -Wp,-MD,$(depfile) $(USER_CFLAGS) $(CFLAGS_$(*F).o)
+       c_flags = -Wp,-MD,$(depfile) $(USER_CFLAGS) $(CFLAGS_$(basetarget).o)
 $(USER_OBJS) : CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ \
        -Dunix -D__unix__ -D__$(SUBARCH)__
 
@@ -17,7 +17,7 @@ $(USER_OBJS) : CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ \
 UNPROFILE_OBJS := $(foreach file,$(UNPROFILE_OBJS),$(obj)/$(file))
 
 $(UNPROFILE_OBJS:.o=.%): \
-       c_flags = -Wp,-MD,$(depfile) $(call unprofile,$(USER_CFLAGS)) $(CFLAGS_$(*F).o)
+       c_flags = -Wp,-MD,$(depfile) $(call unprofile,$(USER_CFLAGS)) $(CFLAGS_$(basetarget).o)
 $(UNPROFILE_OBJS) : CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ \
        -Dunix -D__unix__ -D__$(SUBARCH)__
 
index 72f140f81b70bfdc0d8b570b4be21a56557ee86c..d14fb2dfbfc4a742b399882482a877b9c2db9428 100644 (file)
@@ -678,16 +678,15 @@ void free_initmem(void)
 
 #ifdef CONFIG_DEBUG_RODATA
 
-extern char __start_rodata, __end_rodata;
 void mark_rodata_ro(void)
 {
-       unsigned long addr = (unsigned long)&__start_rodata;
+       unsigned long addr = (unsigned long)__start_rodata;
 
-       for (; addr < (unsigned long)&__end_rodata; addr += PAGE_SIZE)
+       for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
                change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
 
        printk ("Write protecting the kernel read-only data: %luk\n",
-                       (&__end_rodata - &__start_rodata) >> 10);
+                       (__end_rodata - __start_rodata) >> 10);
 
        /*
         * change_page_attr_addr() requires a global_flush_tlb() call after it.
index dec044c04273de93a76ba97b2cbf8d227272a83a..ea5a0496a4fd9a63531dd1665975b44d810b0dbb 100644 (file)
@@ -192,7 +192,7 @@ int acpi_bus_set_power(acpi_handle handle, int state)
        /* Make sure this is a valid target state */
 
        if (!device->flags.power_manageable) {
-               printk(KERN_DEBUG "Device `[%s]is not power manageable",
+               printk(KERN_DEBUG "Device `[%s]is not power manageable",
                                device->kobj.name);
                return -ENODEV;
        }
index 39662f0c9cce65afd9da3b397607093f05a3f026..0a1b1ea36ddcdc3064ba4daa35567a01e0f5b72d 100644 (file)
@@ -50,9 +50,9 @@
 #define DBG_RX          0x0200
 #define DBG_TX          0x0400
 static unsigned int debugflags;
-static unsigned int nbds_max = 16;
 #endif /* NDEBUG */
 
+static unsigned int nbds_max = 16;
 static struct nbd_device nbd_dev[MAX_NBD];
 
 /*
index d75864e35fef33edcba22a26f7bfcb3b2d07a614..f79f6b587bfa740383bb9f7dfe0616a4b6f0b099 100644 (file)
@@ -19,8 +19,8 @@
 #include <linux/slab.h>
 #include "edac_mc.h"
 
-#define AMD76X_REVISION        " Ver: 2.0.0 "  __DATE__
-
+#define AMD76X_REVISION        " Ver: 2.0.1 "  __DATE__
+#define EDAC_MOD_STR   "amd76x_edac"
 
 #define amd76x_printk(level, fmt, arg...) \
        edac_printk(level, "amd76x", fmt, ##arg)
index 815c3eb783de8297a186ecee94f356750d65089d..c82bc0ed7f143113fcea830717b60f388a46b3dd 100644 (file)
@@ -24,7 +24,8 @@
 #include <linux/slab.h>
 #include "edac_mc.h"
 
-#define E752X_REVISION " Ver: 2.0.0 " __DATE__
+#define E752X_REVISION " Ver: 2.0.1 " __DATE__
+#define EDAC_MOD_STR   "e752x_edac"
 
 static int force_function_unhide;
 
index 5a5ecd5a040977f4faabe78ebefaa49b56c7a968..310d91b41c96154bf0ba8154b5b017f126a441d1 100644 (file)
@@ -29,7 +29,8 @@
 #include <linux/slab.h>
 #include "edac_mc.h"
 
-#define        E7XXX_REVISION " Ver: 2.0.0 " __DATE__
+#define        E7XXX_REVISION " Ver: 2.0.1 " __DATE__
+#define        EDAC_MOD_STR    "e7xxx_edac"
 
 #define e7xxx_printk(level, fmt, arg...) \
        edac_printk(level, "e7xxx", fmt, ##arg)
index 1be4947e28af6a3ce20cb6b5b79f0c0c2458ac6d..bf6ab8a8d5ed8700dce391de101a03b60ee72987 100644 (file)
@@ -78,10 +78,6 @@ extern int edac_debug_level;
 
 #endif  /* !CONFIG_EDAC_DEBUG */
 
-#define edac_xstr(s) edac_str(s)
-#define edac_str(s) #s
-#define EDAC_MOD_STR edac_xstr(KBUILD_BASENAME)
-
 #define BIT(x) (1 << (x))
 
 #define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, \
index e30a4a2eaf38bddf6a6b1f4d67497caf5ca5133c..e4bb298e613f4aea9a84d3320bfaa2fb21122d73 100644 (file)
@@ -16,7 +16,8 @@
 #include <linux/slab.h>
 #include "edac_mc.h"
 
-#define  I82860_REVISION " Ver: 2.0.0 " __DATE__
+#define  I82860_REVISION " Ver: 2.0.1 " __DATE__
+#define EDAC_MOD_STR   "i82860_edac"
 
 #define i82860_printk(level, fmt, arg...) \
        edac_printk(level, "i82860", fmt, ##arg)
index 9423ee5e7edd2140f8dc3e9b947047186aa6bbad..161fe09a6d3871200dd353b272b6b70f3413c9c9 100644 (file)
@@ -20,7 +20,8 @@
 #include <linux/slab.h>
 #include "edac_mc.h"
 
-#define I82875P_REVISION       " Ver: 2.0.0 " __DATE__
+#define I82875P_REVISION       " Ver: 2.0.1 " __DATE__
+#define EDAC_MOD_STR           "i82875p_edac"
 
 #define i82875p_printk(level, fmt, arg...) \
        edac_printk(level, "i82875p", fmt, ##arg)
index a0e248d11ed9a8b674a124c8387a170dd06d82e1..a49cf0a39398a4476e37c7105c02b6a3123daee4 100644 (file)
@@ -22,7 +22,8 @@
 #include <linux/slab.h>
 #include "edac_mc.h"
 
-#define R82600_REVISION        " Ver: 2.0.0 " __DATE__
+#define R82600_REVISION        " Ver: 2.0.1 " __DATE__
+#define EDAC_MOD_STR   "r82600_edac"
 
 #define r82600_printk(level, fmt, arg...) \
        edac_printk(level, "r82600", fmt, ##arg)
index 7fb3635683dce13d6785d881aa39c15b5c808632..3cb04424d351dcb9f88e0163a120ea103983b19c 100644 (file)
@@ -650,6 +650,8 @@ static void __devinit init_hwif_it821x(ide_hwif_t *hwif)
        }
        ide_set_hwifdata(hwif, idev);
 
+       hwif->atapi_dma = 1;
+
        pci_read_config_byte(hwif->pci_dev, 0x50, &conf);
        if(conf & 1) {
                idev->smart = 1;
index 9ea67c409b6d4a83e32cbd76b06532c764e03a5b..1db9489f1e82bbc7ae0d10a934af5961a94397df 100644 (file)
@@ -1,16 +1,16 @@
 config IPATH_CORE
-       tristate "PathScale InfiniPath Driver"
+       tristate "QLogic InfiniPath Driver"
        depends on 64BIT && PCI_MSI && NET
        ---help---
-       This is a low-level driver for PathScale InfiniPath host channel
+       This is a low-level driver for QLogic InfiniPath host channel
        adapters (HCAs) based on the HT-400 and PE-800 chips.
 
 config INFINIBAND_IPATH
-       tristate "PathScale InfiniPath Verbs Driver"
+       tristate "QLogic InfiniPath Verbs Driver"
        depends on IPATH_CORE && INFINIBAND
        ---help---
        This is a driver that provides InfiniBand verbs support for
-       PathScale InfiniPath host channel adapters (HCAs).  This
+       QLogic InfiniPath host channel adapters (HCAs).  This
        allows these devices to be used with both kernel upper level
        protocols such as IP-over-InfiniBand as well as with userspace
        applications (in conjunction with InfiniBand userspace access).
index b4d084abfd22c3ace2a33198ec8cb63e09b98018..b0bf72864130e03945c0da8117d713f12b5b0302 100644 (file)
@@ -1,4 +1,4 @@
-EXTRA_CFLAGS += -DIPATH_IDSTR='"PathScale kernel.org driver"' \
+EXTRA_CFLAGS += -DIPATH_IDSTR='"QLogic kernel.org driver"' \
        -DIPATH_KERN_TYPE=0
 
 obj-$(CONFIG_IPATH_CORE) += ipath_core.o
index 48a55247b832df8b4aa08a2595b6f32814b357d0..062bd392e7e55d22e8d5bf95d057e8af2eb68c78 100644 (file)
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -38,7 +39,8 @@
  * to communicate between kernel and user code.
  */
 
-/* This is the IEEE-assigned OUI for PathScale, Inc. */
+
+/* This is the IEEE-assigned OUI for QLogic Inc. InfiniPath */
 #define IPATH_SRC_OUI_1 0x00
 #define IPATH_SRC_OUI_2 0x11
 #define IPATH_SRC_OUI_3 0x75
@@ -96,8 +98,8 @@ struct infinipath_stats {
        __u64 sps_hwerrs;
        /* number of times IB link changed state unexpectedly */
        __u64 sps_iblink;
-       /* no longer used; left for compatibility */
-       __u64 sps_unused3;
+       /* kernel receive interrupts that didn't read intstat */
+       __u64 sps_fastrcvint;
        /* number of kernel (port0) packets received */
        __u64 sps_port0pkts;
        /* number of "ethernet" packets sent by driver */
@@ -121,8 +123,7 @@ struct infinipath_stats {
        __u64 sps_ports;
        /* list of pkeys (other than default) accepted (0 means not set) */
        __u16 sps_pkeys[4];
-       /* lids for up to 4 infinipaths, indexed by infinipath # */
-       __u16 sps_lid[4];
+       __u16 sps_unused16[4]; /* available; maintaining compatible layout */
        /* number of user ports per chip (not IB ports) */
        __u32 sps_nports;
        /* not our interrupt, or already handled */
@@ -140,10 +141,8 @@ struct infinipath_stats {
         * packets if ipath not configured, sma/mad, etc.)
         */
        __u64 sps_krdrops;
-       /* mlids for up to 4 infinipaths, indexed by infinipath # */
-       __u16 sps_mlid[4];
        /* pad for future growth */
-       __u64 __sps_pad[45];
+       __u64 __sps_pad[46];
 };
 
 /*
@@ -310,6 +309,9 @@ struct ipath_base_info {
        __u32 spi_rcv_egrchunksize;
        /* total size of mmap to cover full rcvegrbuffers */
        __u32 spi_rcv_egrbuftotlen;
+       __u32 spi_filler_for_align;
+       /* address of readonly memory copy of the rcvhdrq tail register. */
+       __u64 spi_rcvhdr_tailaddr;
 } __attribute__ ((aligned(8)));
 
 
@@ -342,9 +344,9 @@ struct ipath_base_info {
 /*
  * Similarly, this is the kernel version going back to the user.  It's
  * slightly different, in that we want to tell if the driver was built as
- * part of a PathScale release, or from the driver from OpenIB, kernel.org,
- * or a standard distribution, for support reasons.  The high bit is 0 for
- * non-PathScale, and 1 for PathScale-built/supplied.
+ * part of a QLogic release, or from the driver from openfabrics.org,
+ * kernel.org, or a standard distribution, for support reasons.
+ * The high bit is 0 for non-QLogic and 1 for QLogic-built/supplied.
  *
  * It's returned by the driver to the user code during initialization in the
  * spi_sw_version field of ipath_base_info, so the user code can in turn
@@ -379,13 +381,7 @@ struct ipath_user_info {
         */
        __u32 spu_rcvhdrsize;
 
-       /*
-        * cache line aligned (64 byte) user address to
-        * which the rcvhdrtail register will be written by infinipath
-        * whenever it changes, so that no chip registers are read in
-        * the performance path.
-        */
-       __u64 spu_rcvhdraddr;
+       __u64 spu_unused; /* kept for compatible layout */
 
        /*
         * address of struct base_info to write to
@@ -481,7 +477,7 @@ struct ipath_sma_pkt
  * Data layout in I2C flash (for GUID, etc.)
  * All fields are little-endian binary unless otherwise stated
  */
-#define IPATH_FLASH_VERSION 1
+#define IPATH_FLASH_VERSION 2
 struct ipath_flash {
        /* flash layout version (IPATH_FLASH_VERSION) */
        __u8 if_fversion;
@@ -489,14 +485,14 @@ struct ipath_flash {
        __u8 if_csum;
        /*
         * valid length (in use, protected by if_csum), including
-        * if_fversion and if_sum themselves)
+        * if_fversion and if_csum themselves)
         */
        __u8 if_length;
        /* the GUID, in network order */
        __u8 if_guid[8];
        /* number of GUIDs to use, starting from if_guid */
        __u8 if_numguid;
-       /* the board serial number, in ASCII */
+       /* the (last 10 characters of) board serial number, in ASCII */
        char if_serial[12];
        /* board mfg date (YYYYMMDD ASCII) */
        char if_mfgdate[8];
@@ -508,8 +504,10 @@ struct ipath_flash {
        __u8 if_powerhour[2];
        /* ASCII free-form comment field */
        char if_comment[32];
-       /* 78 bytes used, min flash size is 128 bytes */
-       __u8 if_future[50];
+       /* Backwards compatible prefix for longer QLogic Serial Numbers */
+       char if_sprefix[4];
+       /* 82 bytes used, min flash size is 128 bytes */
+       __u8 if_future[46];
 };
 
 /*
@@ -603,14 +601,118 @@ struct infinipath_counters {
 #define INFINIPATH_KPF_INTR 0x1
 
 /* SendPIO per-buffer control */
-#define INFINIPATH_SP_LENGTHP1_MASK 0x3FF
-#define INFINIPATH_SP_LENGTHP1_SHIFT 0
-#define INFINIPATH_SP_INTR    0x80000000
-#define INFINIPATH_SP_TEST    0x40000000
-#define INFINIPATH_SP_TESTEBP 0x20000000
+#define INFINIPATH_SP_TEST    0x40
+#define INFINIPATH_SP_TESTEBP 0x20
 
 /* SendPIOAvail bits */
 #define INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT 1
 #define INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT 0
 
+/* infinipath header format */
+struct ipath_header {
+       /*
+        * Version - 4 bits, Port - 4 bits, TID - 10 bits and Offset -
+        * 14 bits before ECO change ~28 Dec 03.  After that, Vers 4,
+        * Port 3, TID 11, offset 14.
+        */
+       __le32 ver_port_tid_offset;
+       __le16 chksum;
+       __le16 pkt_flags;
+};
+
+/* infinipath user message header format.
+ * This structure contains the first 4 fields common to all protocols
+ * that employ infinipath.
+ */
+struct ipath_message_header {
+       __be16 lrh[4];
+       __be32 bth[3];
+       /* fields below this point are in host byte order */
+       struct ipath_header iph;
+       __u8 sub_opcode;
+};
+
+/* infinipath ethernet header format */
+struct ether_header {
+       __be16 lrh[4];
+       __be32 bth[3];
+       struct ipath_header iph;
+       __u8 sub_opcode;
+       __u8 cmd;
+       __be16 lid;
+       __u16 mac[3];
+       __u8 frag_num;
+       __u8 seq_num;
+       __le32 len;
+       /* MUST be of word size due to PIO write requirements */
+       __le32 csum;
+       __le16 csum_offset;
+       __le16 flags;
+       __u16 first_2_bytes;
+       __u8 unused[2];         /* currently unused */
+};
+
+
+/* IB - LRH header consts */
+#define IPATH_LRH_GRH 0x0003   /* 1. word of IB LRH - next header: GRH */
+#define IPATH_LRH_BTH 0x0002   /* 1. word of IB LRH - next header: BTH */
+
+/* misc. */
+#define SIZE_OF_CRC 1
+
+#define IPATH_DEFAULT_P_KEY 0xFFFF
+#define IPATH_PERMISSIVE_LID 0xFFFF
+#define IPATH_AETH_CREDIT_SHIFT 24
+#define IPATH_AETH_CREDIT_MASK 0x1F
+#define IPATH_AETH_CREDIT_INVAL 0x1F
+#define IPATH_PSN_MASK 0xFFFFFF
+#define IPATH_MSN_MASK 0xFFFFFF
+#define IPATH_QPN_MASK 0xFFFFFF
+#define IPATH_MULTICAST_LID_BASE 0xC000
+#define IPATH_MULTICAST_QPN 0xFFFFFF
+
+/* Receive Header Queue: receive type (from infinipath) */
+#define RCVHQ_RCV_TYPE_EXPECTED  0
+#define RCVHQ_RCV_TYPE_EAGER     1
+#define RCVHQ_RCV_TYPE_NON_KD    2
+#define RCVHQ_RCV_TYPE_ERROR     3
+
+
+/* sub OpCodes - ith4x  */
+#define IPATH_ITH4X_OPCODE_ENCAP 0x81
+#define IPATH_ITH4X_OPCODE_LID_ARP 0x82
+
+#define IPATH_HEADER_QUEUE_WORDS 9
+
+/* functions for extracting fields from rcvhdrq entries for the driver.
+ */
+static inline __u32 ipath_hdrget_err_flags(const __le32 * rbuf)
+{
+       return __le32_to_cpu(rbuf[1]);
+}
+
+static inline __u32 ipath_hdrget_rcv_type(const __le32 * rbuf)
+{
+       return (__le32_to_cpu(rbuf[0]) >> INFINIPATH_RHF_RCVTYPE_SHIFT)
+           & INFINIPATH_RHF_RCVTYPE_MASK;
+}
+
+static inline __u32 ipath_hdrget_length_in_bytes(const __le32 * rbuf)
+{
+       return ((__le32_to_cpu(rbuf[0]) >> INFINIPATH_RHF_LENGTH_SHIFT)
+               & INFINIPATH_RHF_LENGTH_MASK) << 2;
+}
+
+static inline __u32 ipath_hdrget_index(const __le32 * rbuf)
+{
+       return (__le32_to_cpu(rbuf[0]) >> INFINIPATH_RHF_EGRINDEX_SHIFT)
+           & INFINIPATH_RHF_EGRINDEX_MASK;
+}
+
+static inline __u32 ipath_hdrget_ipath_ver(__le32 hdrword)
+{
+       return (__le32_to_cpu(hdrword) >> INFINIPATH_I_VERS_SHIFT)
+           & INFINIPATH_I_VERS_MASK;
+}
+
 #endif                         /* _IPATH_COMMON_H */
index 7ece1135ddfe8680981352f6bc356bee28014332..3efee341c9bcddcf31c1401a3b38ac17c3e6f641 100644 (file)
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -157,10 +158,21 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
                              struct ib_ucontext *context,
                              struct ib_udata *udata)
 {
+       struct ipath_ibdev *dev = to_idev(ibdev);
        struct ipath_cq *cq;
        struct ib_wc *wc;
        struct ib_cq *ret;
 
+       if (entries > ib_ipath_max_cqes) {
+               ret = ERR_PTR(-EINVAL);
+               goto bail;
+       }
+
+       if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
+               ret = ERR_PTR(-ENOMEM);
+               goto bail;
+       }
+
        /*
         * Need to use vmalloc() if we want to support large #s of
         * entries.
@@ -196,6 +208,8 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
 
        ret = &cq->ibcq;
 
+       dev->n_cqs_allocated++;
+
 bail:
        return ret;
 }
@@ -210,9 +224,11 @@ bail:
  */
 int ipath_destroy_cq(struct ib_cq *ibcq)
 {
+       struct ipath_ibdev *dev = to_idev(ibcq->device);
        struct ipath_cq *cq = to_icq(ibcq);
 
        tasklet_kill(&cq->comptask);
+       dev->n_cqs_allocated--;
        vfree(cq->queue);
        kfree(cq);
 
index 46762387f5f8e29fd65642a6d34d86a5353a5c08..f415beda0d32bd3e2c7809468625e10e90a3f862 100644 (file)
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
index 28ddceb260e895dd88863e5995e4dbb6df1dbd9e..147dd89e21c901267c8799467f55fdfb2eb29e97 100644 (file)
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
 #include <linux/pci.h>
 #include <asm/uaccess.h>
 
-#include "ipath_common.h"
 #include "ipath_kernel.h"
-#include "ips_common.h"
 #include "ipath_layer.h"
+#include "ipath_common.h"
 
 int ipath_diag_inuse;
 static int diag_set_link;
@@ -66,18 +66,20 @@ static struct file_operations diag_file_ops = {
        .release = ipath_diag_release
 };
 
-static struct cdev *diag_cdev;
-static struct class_device *diag_class_dev;
-
-int ipath_diag_init(void)
+int ipath_diag_add(struct ipath_devdata *dd)
 {
-       return ipath_cdev_init(IPATH_DIAG_MINOR, "ipath_diag",
-                              &diag_file_ops, &diag_cdev, &diag_class_dev);
+       char name[16];
+
+       snprintf(name, sizeof(name), "ipath_diag%d", dd->ipath_unit);
+
+       return ipath_cdev_init(IPATH_DIAG_MINOR_BASE + dd->ipath_unit, name,
+                              &diag_file_ops, &dd->diag_cdev,
+                              &dd->diag_class_dev);
 }
 
-void ipath_diag_cleanup(void)
+void ipath_diag_remove(struct ipath_devdata *dd)
 {
-       ipath_cdev_cleanup(&diag_cdev, &diag_class_dev);
+       ipath_cdev_cleanup(&dd->diag_cdev, &dd->diag_class_dev);
 }
 
 /**
@@ -101,8 +103,7 @@ static int ipath_read_umem64(struct ipath_devdata *dd, void __user *uaddr,
        int ret;
 
        /* not very efficient, but it works for now */
-       if (reg_addr < dd->ipath_kregbase ||
-           reg_end > dd->ipath_kregend) {
+       if (reg_addr < dd->ipath_kregbase || reg_end > dd->ipath_kregend) {
                ret = -EINVAL;
                goto bail;
        }
@@ -113,7 +114,7 @@ static int ipath_read_umem64(struct ipath_devdata *dd, void __user *uaddr,
                        goto bail;
                }
                reg_addr++;
-               uaddr++;
+               uaddr += sizeof(u64);
        }
        ret = 0;
 bail:
@@ -139,8 +140,7 @@ static int ipath_write_umem64(struct ipath_devdata *dd, void __iomem *caddr,
        int ret;
 
        /* not very efficient, but it works for now */
-       if (reg_addr < dd->ipath_kregbase ||
-           reg_end > dd->ipath_kregend) {
+       if (reg_addr < dd->ipath_kregbase || reg_end > dd->ipath_kregend) {
                ret = -EINVAL;
                goto bail;
        }
@@ -153,7 +153,7 @@ static int ipath_write_umem64(struct ipath_devdata *dd, void __iomem *caddr,
                writeq(data, reg_addr);
 
                reg_addr++;
-               uaddr++;
+               uaddr += sizeof(u64);
        }
        ret = 0;
 bail:
@@ -191,7 +191,8 @@ static int ipath_read_umem32(struct ipath_devdata *dd, void __user *uaddr,
                }
 
                reg_addr++;
-               uaddr++;
+               uaddr += sizeof(u32);
+
        }
        ret = 0;
 bail:
@@ -230,7 +231,7 @@ static int ipath_write_umem32(struct ipath_devdata *dd, void __iomem *caddr,
                writel(data, reg_addr);
 
                reg_addr++;
-               uaddr++;
+               uaddr += sizeof(u32);
        }
        ret = 0;
 bail:
@@ -239,59 +240,45 @@ bail:
 
 static int ipath_diag_open(struct inode *in, struct file *fp)
 {
+       int unit = iminor(in) - IPATH_DIAG_MINOR_BASE;
        struct ipath_devdata *dd;
-       int unit = 0; /* XXX this is bogus */
-       unsigned long flags;
        int ret;
 
-       dd = ipath_lookup(unit);
-
        mutex_lock(&ipath_mutex);
-       spin_lock_irqsave(&ipath_devs_lock, flags);
 
        if (ipath_diag_inuse) {
                ret = -EBUSY;
                goto bail;
        }
 
-       list_for_each_entry(dd, &ipath_dev_list, ipath_list) {
-               /*
-                * we need at least one infinipath device to be present
-                * (don't use INITTED, because we want to be able to open
-                * even if device is in freeze mode, which cleared INITTED).
-                * There is a small amount of risk to this, which is why we
-                * also verify kregbase is set.
-                */
-
-               if (!(dd->ipath_flags & IPATH_PRESENT) ||
-                   !dd->ipath_kregbase)
-                       continue;
-
-               ipath_diag_inuse = 1;
-               diag_set_link = 0;
-               ret = 0;
+       dd = ipath_lookup(unit);
+
+       if (dd == NULL || !(dd->ipath_flags & IPATH_PRESENT) ||
+           !dd->ipath_kregbase) {
+               ret = -ENODEV;
                goto bail;
        }
 
-       ret = -ENODEV;
-
-bail:
-       spin_unlock_irqrestore(&ipath_devs_lock, flags);
+       fp->private_data = dd;
+       ipath_diag_inuse = 1;
+       diag_set_link = 0;
+       ret = 0;
 
        /* Only expose a way to reset the device if we
           make it into diag mode. */
-       if (ret == 0)
-               ipath_expose_reset(&dd->pcidev->dev);
+       ipath_expose_reset(&dd->pcidev->dev);
 
+bail:
        mutex_unlock(&ipath_mutex);
 
        return ret;
 }
 
-static int ipath_diag_release(struct inode *i, struct file *f)
+static int ipath_diag_release(struct inode *in, struct file *fp)
 {
        mutex_lock(&ipath_mutex);
        ipath_diag_inuse = 0;
+       fp->private_data = NULL;
        mutex_unlock(&ipath_mutex);
        return 0;
 }
@@ -299,17 +286,10 @@ static int ipath_diag_release(struct inode *i, struct file *f)
 static ssize_t ipath_diag_read(struct file *fp, char __user *data,
                               size_t count, loff_t *off)
 {
-       int unit = 0; /* XXX provide for reads on other units some day */
-       struct ipath_devdata *dd;
+       struct ipath_devdata *dd = fp->private_data;
        void __iomem *kreg_base;
        ssize_t ret;
 
-       dd = ipath_lookup(unit);
-       if (!dd) {
-               ret = -ENODEV;
-               goto bail;
-       }
-
        kreg_base = dd->ipath_kregbase;
 
        if (count == 0)
@@ -328,23 +308,16 @@ static ssize_t ipath_diag_read(struct file *fp, char __user *data,
                ret = count;
        }
 
-bail:
        return ret;
 }
 
 static ssize_t ipath_diag_write(struct file *fp, const char __user *data,
                                size_t count, loff_t *off)
 {
-       int unit = 0; /* XXX this is bogus */
-       struct ipath_devdata *dd;
+       struct ipath_devdata *dd = fp->private_data;
        void __iomem *kreg_base;
        ssize_t ret;
 
-       dd = ipath_lookup(unit);
-       if (!dd) {
-               ret = -ENODEV;
-               goto bail;
-       }
        kreg_base = dd->ipath_kregbase;
 
        if (count == 0)
@@ -363,6 +336,5 @@ static ssize_t ipath_diag_write(struct file *fp, const char __user *data,
                ret = count;
        }
 
-bail:
        return ret;
 }
index e4b897fa569a5e945841fee575e1db1706f4ecd4..6efc56bce92148b28fa75995347967a874347281 100644 (file)
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -38,8 +39,8 @@
 #include <linux/vmalloc.h>
 
 #include "ipath_kernel.h"
-#include "ips_common.h"
 #include "ipath_layer.h"
+#include "ipath_common.h"
 
 static void ipath_update_pio_bufs(struct ipath_devdata *);
 
@@ -52,7 +53,7 @@ const char *ipath_get_unit_name(int unit)
 
 EXPORT_SYMBOL_GPL(ipath_get_unit_name);
 
-#define DRIVER_LOAD_MSG "PathScale " IPATH_DRV_NAME " loaded: "
+#define DRIVER_LOAD_MSG "QLogic " IPATH_DRV_NAME " loaded: "
 #define PFX IPATH_DRV_NAME ": "
 
 /*
@@ -74,8 +75,8 @@ MODULE_PARM_DESC(debug, "mask for debug prints");
 EXPORT_SYMBOL_GPL(ipath_debug);
 
 MODULE_LICENSE("GPL");
-MODULE_AUTHOR("PathScale <support@pathscale.com>");
-MODULE_DESCRIPTION("Pathscale InfiniPath driver");
+MODULE_AUTHOR("QLogic <support@pathscale.com>");
+MODULE_DESCRIPTION("QLogic InfiniPath driver");
 
 const char *ipath_ibcstatus_str[] = {
        "Disabled",
@@ -130,14 +131,6 @@ static struct pci_driver ipath_driver = {
        .id_table = ipath_pci_tbl,
 };
 
-/*
- * This is where port 0's rcvhdrtail register is written back; we also
- * want nothing else sharing the cache line, so make it a cache line
- * in size.  Used for all units.
- */
-volatile __le64 *ipath_port0_rcvhdrtail;
-dma_addr_t ipath_port0_rcvhdrtail_dma;
-static int port0_rcvhdrtail_refs;
 
 static inline void read_bars(struct ipath_devdata *dd, struct pci_dev *dev,
                             u32 *bar0, u32 *bar1)
@@ -170,14 +163,13 @@ static void ipath_free_devdata(struct pci_dev *pdev,
                list_del(&dd->ipath_list);
                spin_unlock_irqrestore(&ipath_devs_lock, flags);
        }
-       dma_free_coherent(&pdev->dev, sizeof(*dd), dd, dd->ipath_dma_addr);
+       vfree(dd);
 }
 
 static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
 {
        unsigned long flags;
        struct ipath_devdata *dd;
-       dma_addr_t dma_addr;
        int ret;
 
        if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
@@ -185,15 +177,12 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
                goto bail;
        }
 
-       dd = dma_alloc_coherent(&pdev->dev, sizeof(*dd), &dma_addr,
-                               GFP_KERNEL);
-
+       dd = vmalloc(sizeof(*dd));
        if (!dd) {
                dd = ERR_PTR(-ENOMEM);
                goto bail;
        }
-
-       dd->ipath_dma_addr = dma_addr;
+       memset(dd, 0, sizeof(*dd));
        dd->ipath_unit = -1;
 
        spin_lock_irqsave(&ipath_devs_lock, flags);
@@ -271,47 +260,6 @@ int ipath_count_units(int *npresentp, int *nupp, u32 *maxportsp)
        return nunits;
 }
 
-static int init_port0_rcvhdrtail(struct pci_dev *pdev)
-{
-       int ret;
-
-       mutex_lock(&ipath_mutex);
-
-       if (!ipath_port0_rcvhdrtail) {
-               ipath_port0_rcvhdrtail =
-                       dma_alloc_coherent(&pdev->dev,
-                                          IPATH_PORT0_RCVHDRTAIL_SIZE,
-                                          &ipath_port0_rcvhdrtail_dma,
-                                          GFP_KERNEL);
-
-               if (!ipath_port0_rcvhdrtail) {
-                       ret = -ENOMEM;
-                       goto bail;
-               }
-       }
-       port0_rcvhdrtail_refs++;
-       ret = 0;
-
-bail:
-       mutex_unlock(&ipath_mutex);
-
-       return ret;
-}
-
-static void cleanup_port0_rcvhdrtail(struct pci_dev *pdev)
-{
-       mutex_lock(&ipath_mutex);
-
-       if (!--port0_rcvhdrtail_refs) {
-               dma_free_coherent(&pdev->dev, IPATH_PORT0_RCVHDRTAIL_SIZE,
-                                 (void *) ipath_port0_rcvhdrtail,
-                                 ipath_port0_rcvhdrtail_dma);
-               ipath_port0_rcvhdrtail = NULL;
-       }
-
-       mutex_unlock(&ipath_mutex);
-}
-
 /*
  * These next two routines are placeholders in case we don't have per-arch
  * code for controlling write combining.  If explicit control of write
@@ -336,20 +284,12 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
        u32 bar0 = 0, bar1 = 0;
        u8 rev;
 
-       ret = init_port0_rcvhdrtail(pdev);
-       if (ret < 0) {
-               printk(KERN_ERR IPATH_DRV_NAME
-                      ": Could not allocate port0_rcvhdrtail: error %d\n",
-                      -ret);
-               goto bail;
-       }
-
        dd = ipath_alloc_devdata(pdev);
        if (IS_ERR(dd)) {
                ret = PTR_ERR(dd);
                printk(KERN_ERR IPATH_DRV_NAME
                       ": Could not allocate devdata: error %d\n", -ret);
-               goto bail_rcvhdrtail;
+               goto bail;
        }
 
        ipath_cdbg(VERBOSE, "initializing unit #%u\n", dd->ipath_unit);
@@ -424,12 +364,29 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
                 */
                ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
                if (ret) {
-                       dev_info(&pdev->dev, "pci_set_dma_mask unit %u "
-                                "fails: %d\n", dd->ipath_unit, ret);
+                       dev_info(&pdev->dev,
+                               "Unable to set DMA mask for unit %u: %d\n",
+                               dd->ipath_unit, ret);
                        goto bail_regions;
                }
-               else
+               else {
                        ipath_dbg("No 64bit DMA mask, used 32 bit mask\n");
+                       ret = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+                       if (ret)
+                               dev_info(&pdev->dev,
+                                       "Unable to set DMA consistent mask "
+                                       "for unit %u: %d\n",
+                                       dd->ipath_unit, ret);
+
+               }
+       }
+       else {
+               ret = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
+               if (ret)
+                       dev_info(&pdev->dev,
+                               "Unable to set DMA consistent mask "
+                               "for unit %u: %d\n",
+                               dd->ipath_unit, ret);
        }
 
        pci_set_master(pdev);
@@ -452,7 +409,7 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
                ipath_init_pe800_funcs(dd);
                break;
        default:
-               ipath_dev_err(dd, "Found unknown PathScale deviceid 0x%x, "
+               ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, "
                              "failing\n", ent->device);
                return -ENODEV;
        }
@@ -495,16 +452,16 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
                ((void __iomem *)dd->ipath_kregbase + len);
        dd->ipath_physaddr = addr;      /* used for io_remap, etc. */
        /* for user mmap */
-       dd->ipath_kregvirt = (u64 __iomem *) phys_to_virt(addr);
-       ipath_cdbg(VERBOSE, "mapped io addr %llx to kregbase %p "
-                  "kregvirt %p\n", addr, dd->ipath_kregbase,
-                  dd->ipath_kregvirt);
+       ipath_cdbg(VERBOSE, "mapped io addr %llx to kregbase %p\n",
+                  addr, dd->ipath_kregbase);
 
        /*
         * clear ipath_flags here instead of in ipath_init_chip as it is set
         * by ipath_setup_htconfig.
         */
        dd->ipath_flags = 0;
+       dd->ipath_lli_counter = 0;
+       dd->ipath_lli_errors = 0;
 
        if (dd->ipath_f_bus(dd, pdev))
                ipath_dev_err(dd, "Failed to setup config space; "
@@ -545,6 +502,7 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
        ipath_device_create_group(&pdev->dev, dd);
        ipathfs_add_device(dd);
        ipath_user_add(dd);
+       ipath_diag_add(dd);
        ipath_layer_add(dd);
 
        goto bail;
@@ -561,9 +519,6 @@ bail_disable:
 bail_devdata:
        ipath_free_devdata(pdev, dd);
 
-bail_rcvhdrtail:
-       cleanup_port0_rcvhdrtail(pdev);
-
 bail:
        return ret;
 }
@@ -577,8 +532,9 @@ static void __devexit ipath_remove_one(struct pci_dev *pdev)
                return;
 
        dd = pci_get_drvdata(pdev);
-       ipath_layer_del(dd);
-       ipath_user_del(dd);
+       ipath_layer_remove(dd);
+       ipath_diag_remove(dd);
+       ipath_user_remove(dd);
        ipathfs_remove_device(dd);
        ipath_device_remove_group(&pdev->dev, dd);
        ipath_cdbg(VERBOSE, "Releasing pci memory regions, dd %p, "
@@ -594,7 +550,6 @@ static void __devexit ipath_remove_one(struct pci_dev *pdev)
        pci_disable_device(pdev);
 
        ipath_free_devdata(pdev, dd);
-       cleanup_port0_rcvhdrtail(pdev);
 }
 
 /* general driver use */
@@ -868,7 +823,8 @@ static void ipath_rcv_layer(struct ipath_devdata *dd, u32 etail,
        u8 pad, *bthbytes;
        struct sk_buff *skb, *nskb;
 
-       if (dd->ipath_port0_skbs && hdr->sub_opcode == OPCODE_ENCAP) {
+       if (dd->ipath_port0_skbs &&
+                       hdr->sub_opcode == IPATH_ITH4X_OPCODE_ENCAP) {
                /*
                 * Allocate a new sk_buff to replace the one we give
                 * to the network stack.
@@ -899,7 +855,7 @@ static void ipath_rcv_layer(struct ipath_devdata *dd, u32 etail,
                /* another ether packet received */
                ipath_stats.sps_ether_rpkts++;
        }
-       else if (hdr->sub_opcode == OPCODE_LID_ARP)
+       else if (hdr->sub_opcode == IPATH_ITH4X_OPCODE_LID_ARP)
                __ipath_layer_rcv_lid(dd, hdr);
 }
 
@@ -916,8 +872,8 @@ void ipath_kreceive(struct ipath_devdata *dd)
        const u32 rsize = dd->ipath_rcvhdrentsize;      /* words */
        const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize; /* words */
        u32 etail = -1, l, hdrqtail;
-       struct ips_message_header *hdr;
-       u32 eflags, i, etype, tlen, pkttot = 0;
+       struct ipath_message_header *hdr;
+       u32 eflags, i, etype, tlen, pkttot = 0, updegr=0, reloop=0;
        static u64 totcalls;    /* stats, may eventually remove */
        char emsg[128];
 
@@ -931,24 +887,18 @@ void ipath_kreceive(struct ipath_devdata *dd)
        if (test_and_set_bit(0, &dd->ipath_rcv_pending))
                goto bail;
 
-       if (dd->ipath_port0head ==
-           (u32)le64_to_cpu(*dd->ipath_hdrqtailptr))
+       l = dd->ipath_port0head;
+       hdrqtail = (u32) le64_to_cpu(*dd->ipath_hdrqtailptr);
+       if (l == hdrqtail)
                goto done;
 
-gotmore:
-       /*
-        * read only once at start.  If in flood situation, this helps
-        * performance slightly.  If more arrive while we are processing,
-        * we'll come back here and do them
-        */
-       hdrqtail = (u32)le64_to_cpu(*dd->ipath_hdrqtailptr);
-
-       for (i = 0, l = dd->ipath_port0head; l != hdrqtail; i++) {
+reloop:
+       for (i = 0; l != hdrqtail; i++) {
                u32 qp;
                u8 *bthbytes;
 
                rc = (u64 *) (dd->ipath_pd[0]->port_rcvhdrq + (l << 2));
-               hdr = (struct ips_message_header *)&rc[1];
+               hdr = (struct ipath_message_header *)&rc[1];
                /*
                 * could make a network order version of IPATH_KD_QP, and
                 * do the obvious shift before masking to speed this up.
@@ -956,10 +906,10 @@ gotmore:
                qp = ntohl(hdr->bth[1]) & 0xffffff;
                bthbytes = (u8 *) hdr->bth;
 
-               eflags = ips_get_hdr_err_flags((__le32 *) rc);
-               etype = ips_get_rcv_type((__le32 *) rc);
+               eflags = ipath_hdrget_err_flags((__le32 *) rc);
+               etype = ipath_hdrget_rcv_type((__le32 *) rc);
                /* total length */
-               tlen = ips_get_length_in_bytes((__le32 *) rc);
+               tlen = ipath_hdrget_length_in_bytes((__le32 *) rc);
                ebuf = NULL;
                if (etype != RCVHQ_RCV_TYPE_EXPECTED) {
                        /*
@@ -969,7 +919,7 @@ gotmore:
                         * set ebuf (so we try to copy data) unless the
                         * length requires it.
                         */
-                       etail = ips_get_index((__le32 *) rc);
+                       etail = ipath_hdrget_index((__le32 *) rc);
                        if (tlen > sizeof(*hdr) ||
                            etype == RCVHQ_RCV_TYPE_NON_KD)
                                ebuf = ipath_get_egrbuf(dd, etail, 0);
@@ -981,7 +931,7 @@ gotmore:
                 */
 
                if (etype != RCVHQ_RCV_TYPE_NON_KD && etype !=
-                   RCVHQ_RCV_TYPE_ERROR && ips_get_ipath_ver(
+                   RCVHQ_RCV_TYPE_ERROR && ipath_hdrget_ipath_ver(
                            hdr->iph.ver_port_tid_offset) !=
                    IPS_PROTO_VERSION) {
                        ipath_cdbg(PKT, "Bad InfiniPath protocol version "
@@ -994,7 +944,19 @@ gotmore:
                        ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u "
                                   "tlen=%x opcode=%x egridx=%x: %s\n",
                                   eflags, l, etype, tlen, bthbytes[0],
-                                  ips_get_index((__le32 *) rc), emsg);
+                                  ipath_hdrget_index((__le32 *) rc), emsg);
+                       /* Count local link integrity errors. */
+                       if (eflags & (INFINIPATH_RHF_H_ICRCERR |
+                                     INFINIPATH_RHF_H_VCRCERR)) {
+                               u8 n = (dd->ipath_ibcctrl >>
+                                       INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
+                                       INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
+
+                               if (++dd->ipath_lli_counter > n) {
+                                       dd->ipath_lli_counter = 0;
+                                       dd->ipath_lli_errors++;
+                               }
+                       }
                } else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
                                int ret = __ipath_verbs_rcv(dd, rc + 1,
                                                            ebuf, tlen);
@@ -1002,6 +964,9 @@ gotmore:
                                        ipath_cdbg(VERBOSE,
                                                   "received IB packet, "
                                                   "not SMA (QP=%x)\n", qp);
+                               if (dd->ipath_lli_counter)
+                                       dd->ipath_lli_counter--;
+
                } else if (etype == RCVHQ_RCV_TYPE_EAGER) {
                        if (qp == IPATH_KD_QP &&
                            bthbytes[0] == ipath_layer_rcv_opcode &&
@@ -1054,25 +1019,49 @@ gotmore:
                l += rsize;
                if (l >= maxcnt)
                        l = 0;
+               if (etype != RCVHQ_RCV_TYPE_EXPECTED)
+                   updegr = 1;
                /*
-                * update for each packet, to help prevent overflows if we
-                * have lots of packets.
+                * update head regs on last packet, and every 16 packets.
+                * Reduce bus traffic, while still trying to prevent
+                * rcvhdrq overflows, for when the queue is nearly full
                 */
-               (void)ipath_write_ureg(dd, ur_rcvhdrhead,
-                                      dd->ipath_rhdrhead_intr_off | l, 0);
-               if (etype != RCVHQ_RCV_TYPE_EXPECTED)
-                       (void)ipath_write_ureg(dd, ur_rcvegrindexhead,
-                                              etail, 0);
+               if (l == hdrqtail || (i && !(i&0xf))) {
+                       u64 lval;
+                       if (l == hdrqtail) /* PE-800 interrupt only on last */
+                               lval = dd->ipath_rhdrhead_intr_off | l;
+                       else
+                               lval = l;
+                       (void)ipath_write_ureg(dd, ur_rcvhdrhead, lval, 0);
+                       if (updegr) {
+                               (void)ipath_write_ureg(dd, ur_rcvegrindexhead,
+                                                      etail, 0);
+                               updegr = 0;
+                       }
+               }
+       }
+
+       if (!dd->ipath_rhdrhead_intr_off && !reloop) {
+               /* HT-400 workaround; we can have a race clearing chip
+                * interrupt with another interrupt about to be delivered,
+                * and can clear it before it is delivered on the GPIO
+                * workaround.  By doing the extra check here for the
+                * in-memory tail register updating while we were doing
+                * earlier packets, we "almost" guarantee we have covered
+                * that case.
+                */
+               u32 hqtail = (u32)le64_to_cpu(*dd->ipath_hdrqtailptr);
+               if (hqtail != hdrqtail) {
+                       hdrqtail = hqtail;
+                       reloop = 1; /* loop 1 extra time at most */
+                       goto reloop;
+               }
        }
 
        pkttot += i;
 
        dd->ipath_port0head = l;
 
-       if (hdrqtail != (u32)le64_to_cpu(*dd->ipath_hdrqtailptr))
-               /* more arrived while we handled first batch */
-               goto gotmore;
-
        if (pkttot > ipath_stats.sps_maxpkts_call)
                ipath_stats.sps_maxpkts_call = pkttot;
        ipath_stats.sps_port0pkts += pkttot;
@@ -1369,26 +1358,20 @@ bail:
  * @dd: the infinipath device
  * @pd: the port data
  *
- * this *must* be physically contiguous memory, and for now,
- * that limits it to what kmalloc can do.
+ * this must be contiguous memory (from an i/o perspective), and must be
+ * DMA'able (which means for some systems, it will go through an IOMMU,
+ * or be forced into a low address range).
  */
 int ipath_create_rcvhdrq(struct ipath_devdata *dd,
                         struct ipath_portdata *pd)
 {
-       int ret = 0, amt;
+       int ret = 0;
 
-       amt = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
-                   sizeof(u32), PAGE_SIZE);
        if (!pd->port_rcvhdrq) {
-               /*
-                * not using REPEAT isn't viable; at 128KB, we can easily
-                * fail this.  The problem with REPEAT is we can block here
-                * "forever".  There isn't an inbetween, unfortunately.  We
-                * could reduce the risk by never freeing the rcvhdrq except
-                * at unload, but even then, the first time a port is used,
-                * we could delay for some time...
-                */
+               dma_addr_t phys_hdrqtail;
                gfp_t gfp_flags = GFP_USER | __GFP_COMP;
+               int amt = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
+                               sizeof(u32), PAGE_SIZE);
 
                pd->port_rcvhdrq = dma_alloc_coherent(
                        &dd->pcidev->dev, amt, &pd->port_rcvhdrq_phys,
@@ -1401,6 +1384,16 @@ int ipath_create_rcvhdrq(struct ipath_devdata *dd,
                        ret = -ENOMEM;
                        goto bail;
                }
+               pd->port_rcvhdrtail_kvaddr = dma_alloc_coherent(
+                       &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail, GFP_KERNEL);
+               if (!pd->port_rcvhdrtail_kvaddr) {
+                       ipath_dev_err(dd, "attempt to allocate 1 page "
+                                     "for port %u rcvhdrqtailaddr failed\n",
+                                     pd->port_port);
+                       ret = -ENOMEM;
+                       goto bail;
+               }
+               pd->port_rcvhdrqtailaddr_phys = phys_hdrqtail;
 
                pd->port_rcvhdrq_size = amt;
 
@@ -1410,20 +1403,28 @@ int ipath_create_rcvhdrq(struct ipath_devdata *dd,
                           (unsigned long) pd->port_rcvhdrq_phys,
                           (unsigned long) pd->port_rcvhdrq_size,
                           pd->port_port);
-       } else {
-               /*
-                * clear for security, sanity, and/or debugging, each
-                * time we reuse
-                */
-               memset(pd->port_rcvhdrq, 0, amt);
+
+               ipath_cdbg(VERBOSE, "port %d hdrtailaddr, %llx physical\n",
+                          pd->port_port,
+                          (unsigned long long) phys_hdrqtail);
        }
+       else
+               ipath_cdbg(VERBOSE, "reuse port %d rcvhdrq @%p %llx phys; "
+                          "hdrtailaddr@%p %llx physical\n",
+                          pd->port_port, pd->port_rcvhdrq,
+                          pd->port_rcvhdrq_phys, pd->port_rcvhdrtail_kvaddr,
+                          (unsigned long long)pd->port_rcvhdrqtailaddr_phys);
+
+       /* clear for security and sanity on each use */
+       memset(pd->port_rcvhdrq, 0, pd->port_rcvhdrq_size);
+       memset((void *)pd->port_rcvhdrtail_kvaddr, 0, PAGE_SIZE);
 
        /*
         * tell chip each time we init it, even if we are re-using previous
-        * memory (we zero it at process close)
+        * memory (we zero the register at process close)
         */
-       ipath_cdbg(VERBOSE, "writing port %d rcvhdraddr as %lx\n",
-                  pd->port_port, (unsigned long) pd->port_rcvhdrq_phys);
+       ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
+                             pd->port_port, pd->port_rcvhdrqtailaddr_phys);
        ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
                              pd->port_port, pd->port_rcvhdrq_phys);
 
@@ -1511,15 +1512,27 @@ void ipath_set_ib_lstate(struct ipath_devdata *dd, int which)
                [INFINIPATH_IBCC_LINKCMD_ARMED] = "ARMED",
                [INFINIPATH_IBCC_LINKCMD_ACTIVE] = "ACTIVE"
        };
+       int linkcmd = (which >> INFINIPATH_IBCC_LINKCMD_SHIFT) &
+                       INFINIPATH_IBCC_LINKCMD_MASK;
+
        ipath_cdbg(SMA, "Trying to move unit %u to %s, current ltstate "
                   "is %s\n", dd->ipath_unit,
-                  what[(which >> INFINIPATH_IBCC_LINKCMD_SHIFT) &
-                       INFINIPATH_IBCC_LINKCMD_MASK],
+                  what[linkcmd],
                   ipath_ibcstatus_str[
                           (ipath_read_kreg64
                            (dd, dd->ipath_kregs->kr_ibcstatus) >>
                            INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
                           INFINIPATH_IBCS_LINKTRAININGSTATE_MASK]);
+       /* flush all queued sends when going to DOWN or INIT, to be sure that
+        * they don't block SMA and other MAD packets */
+       if (!linkcmd || linkcmd == INFINIPATH_IBCC_LINKCMD_INIT) {
+               ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+                                INFINIPATH_S_ABORT);
+               ipath_disarm_piobufs(dd, dd->ipath_lastport_piobuf,
+                                   (unsigned)(dd->ipath_piobcnt2k +
+                                   dd->ipath_piobcnt4k) -
+                                   dd->ipath_lastport_piobuf);
+       }
 
        ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
                         dd->ipath_ibcctrl | which);
@@ -1638,7 +1651,7 @@ void ipath_shutdown_device(struct ipath_devdata *dd)
        /* disable IBC */
        dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
        ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
-                        dd->ipath_control);
+                        dd->ipath_control | INFINIPATH_C_FREEZEMODE);
 
        /*
         * clear SerdesEnable and turn the leds off; do this here because
@@ -1667,60 +1680,54 @@ void ipath_shutdown_device(struct ipath_devdata *dd)
 /**
  * ipath_free_pddata - free a port's allocated data
  * @dd: the infinipath device
- * @port: the port
- * @freehdrq: free the port data structure if true
+ * @pd: the portdata structure
  *
- * when closing, free up any allocated data for a port, if the
- * reference count goes to zero
- * Note: this also optionally frees the portdata itself!
- * Any changes here have to be matched up with the reinit case
- * of ipath_init_chip(), which calls this routine on reinit after reset.
+ * free up any allocated data for a port
+ * This should not touch anything that would affect a simultaneous
+ * re-allocation of port data, because it is called after ipath_mutex
+ * is released (and can be called from reinit as well).
+ * It should never change any chip state, or global driver state.
+ * (The only exception to global state is freeing the port0 port0_skbs.)
  */
-void ipath_free_pddata(struct ipath_devdata *dd, u32 port, int freehdrq)
+void ipath_free_pddata(struct ipath_devdata *dd, struct ipath_portdata *pd)
 {
-       struct ipath_portdata *pd = dd->ipath_pd[port];
-
        if (!pd)
                return;
-       if (freehdrq)
-               /*
-                * only clear and free portdata if we are going to also
-                * release the hdrq, otherwise we leak the hdrq on each
-                * open/close cycle
-                */
-               dd->ipath_pd[port] = NULL;
-       if (freehdrq && pd->port_rcvhdrq) {
+
+       if (pd->port_rcvhdrq) {
                ipath_cdbg(VERBOSE, "free closed port %d rcvhdrq @ %p "
                           "(size=%lu)\n", pd->port_port, pd->port_rcvhdrq,
                           (unsigned long) pd->port_rcvhdrq_size);
                dma_free_coherent(&dd->pcidev->dev, pd->port_rcvhdrq_size,
                                  pd->port_rcvhdrq, pd->port_rcvhdrq_phys);
                pd->port_rcvhdrq = NULL;
+               if (pd->port_rcvhdrtail_kvaddr) {
+                       dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
+                                        (void *)pd->port_rcvhdrtail_kvaddr,
+                                        pd->port_rcvhdrqtailaddr_phys);
+                       pd->port_rcvhdrtail_kvaddr = NULL;
+               }
        }
-       if (port && pd->port_rcvegrbuf) {
-               /* always free this */
-               if (pd->port_rcvegrbuf) {
-                       unsigned e;
-
-                       for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
-                               void *base = pd->port_rcvegrbuf[e];
-                               size_t size = pd->port_rcvegrbuf_size;
-
-                               ipath_cdbg(VERBOSE, "egrbuf free(%p, %lu), "
-                                          "chunk %u/%u\n", base,
-                                          (unsigned long) size,
-                                          e, pd->port_rcvegrbuf_chunks);
-                               dma_free_coherent(
-                                       &dd->pcidev->dev, size, base,
-                                       pd->port_rcvegrbuf_phys[e]);
-                       }
-                       vfree(pd->port_rcvegrbuf);
-                       pd->port_rcvegrbuf = NULL;
-                       vfree(pd->port_rcvegrbuf_phys);
-                       pd->port_rcvegrbuf_phys = NULL;
+       if (pd->port_port && pd->port_rcvegrbuf) {
+               unsigned e;
+
+               for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
+                       void *base = pd->port_rcvegrbuf[e];
+                       size_t size = pd->port_rcvegrbuf_size;
+
+                       ipath_cdbg(VERBOSE, "egrbuf free(%p, %lu), "
+                                  "chunk %u/%u\n", base,
+                                  (unsigned long) size,
+                                  e, pd->port_rcvegrbuf_chunks);
+                       dma_free_coherent(&dd->pcidev->dev, size,
+                               base, pd->port_rcvegrbuf_phys[e]);
                }
+               vfree(pd->port_rcvegrbuf);
+               pd->port_rcvegrbuf = NULL;
+               vfree(pd->port_rcvegrbuf_phys);
+               pd->port_rcvegrbuf_phys = NULL;
                pd->port_rcvegrbuf_chunks = 0;
-       } else if (port == 0 && dd->ipath_port0_skbs) {
+       } else if (pd->port_port == 0 && dd->ipath_port0_skbs) {
                unsigned e;
                struct sk_buff **skbs = dd->ipath_port0_skbs;
 
@@ -1732,10 +1739,8 @@ void ipath_free_pddata(struct ipath_devdata *dd, u32 port, int freehdrq)
                                dev_kfree_skb(skbs[e]);
                vfree(skbs);
        }
-       if (freehdrq) {
-               kfree(pd->port_tid_pg_list);
-               kfree(pd);
-       }
+       kfree(pd->port_tid_pg_list);
+       kfree(pd);
 }
 
 static int __init infinipath_init(void)
@@ -1806,7 +1811,6 @@ static void cleanup_device(struct ipath_devdata *dd)
                         * re-init
                         */
                        dd->ipath_kregbase = NULL;
-                       dd->ipath_kregvirt = NULL;
                        dd->ipath_uregbase = 0;
                        dd->ipath_sregbase = 0;
                        dd->ipath_cregbase = 0;
@@ -1821,6 +1825,12 @@ static void cleanup_device(struct ipath_devdata *dd)
                                  dd->ipath_pioavailregs_phys);
                dd->ipath_pioavailregs_dma = NULL;
        }
+       if (dd->ipath_dummy_hdrq) {
+               dma_free_coherent(&dd->pcidev->dev,
+                       dd->ipath_pd[0]->port_rcvhdrq_size,
+                       dd->ipath_dummy_hdrq, dd->ipath_dummy_hdrq_phys);
+               dd->ipath_dummy_hdrq = NULL;
+       }
 
        if (dd->ipath_pageshadow) {
                struct page **tmpp = dd->ipath_pageshadow;
@@ -1861,10 +1871,14 @@ static void cleanup_device(struct ipath_devdata *dd)
 
        /*
         * free any resources still in use (usually just kernel ports)
-        * at unload
+        * at unload; we do for portcnt, not cfgports, because cfgports
+        * could have changed while we were loaded.
         */
-       for (port = 0; port < dd->ipath_cfgports; port++)
-               ipath_free_pddata(dd, port, 1);
+       for (port = 0; port < dd->ipath_portcnt; port++) {
+               struct ipath_portdata *pd = dd->ipath_pd[port];
+               dd->ipath_pd[port] = NULL;
+               ipath_free_pddata(dd, pd);
+       }
        kfree(dd->ipath_pd);
        /*
         * debuggability, in case some cleanup path tries to use it
index a2f1ceafcca9f67360d1187b8c58e9e5a813f3a0..3313356ab93aa13d1c6b3fe4bb60d543f21402fc 100644 (file)
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -600,8 +601,31 @@ void ipath_get_eeprom_info(struct ipath_devdata *dd)
                guid = *(__be64 *) ifp->if_guid;
        dd->ipath_guid = guid;
        dd->ipath_nguid = ifp->if_numguid;
-       memcpy(dd->ipath_serial, ifp->if_serial,
-              sizeof(ifp->if_serial));
+       /*
+        * Things are slightly complicated by the desire to transparently
+        * support both the Pathscale 10-digit serial number and the QLogic
+        * 13-character version.
+        */
+       if ((ifp->if_fversion > 1) && ifp->if_sprefix[0]
+               && ((u8 *)ifp->if_sprefix)[0] != 0xFF) {
+               /* This board has a Serial-prefix, which is stored
+                * elsewhere for backward-compatibility.
+                */
+               char *snp = dd->ipath_serial;
+               int len;
+               memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix);
+               snp[sizeof ifp->if_sprefix] = '\0';
+               len = strlen(snp);
+               snp += len;
+               len = (sizeof dd->ipath_serial) - len;
+               if (len > sizeof ifp->if_serial) {
+                       len = sizeof ifp->if_serial;
+               }
+               memcpy(snp, ifp->if_serial, len);
+       } else
+               memcpy(dd->ipath_serial, ifp->if_serial,
+                      sizeof ifp->if_serial);
+
        ipath_cdbg(VERBOSE, "Initted GUID to %llx from eeprom\n",
                   (unsigned long long) be64_to_cpu(dd->ipath_guid));
 
index ada267e41f6c749901b53f898a9c39df891c7061..bbaa70e57db1ebf4442e6f3cb4e6e9f4a430efe6 100644 (file)
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -38,8 +39,8 @@
 #include <asm/pgtable.h>
 
 #include "ipath_kernel.h"
-#include "ips_common.h"
 #include "ipath_layer.h"
+#include "ipath_common.h"
 
 static int ipath_open(struct inode *, struct file *);
 static int ipath_close(struct inode *, struct file *);
@@ -122,6 +123,7 @@ static int ipath_get_base_info(struct ipath_portdata *pd,
         * on to yet another method of dealing with this
         */
        kinfo->spi_rcvhdr_base = (u64) pd->port_rcvhdrq_phys;
+       kinfo->spi_rcvhdr_tailaddr = (u64)pd->port_rcvhdrqtailaddr_phys;
        kinfo->spi_rcv_egrbufs = (u64) pd->port_rcvegr_phys;
        kinfo->spi_pioavailaddr = (u64) dd->ipath_pioavailregs_phys;
        kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
@@ -456,7 +458,7 @@ static int ipath_set_part_key(struct ipath_portdata *pd, u16 key)
        u16 lkey = key & 0x7FFF;
        int ret;
 
-       if (lkey == (IPS_DEFAULT_P_KEY & 0x7FFF)) {
+       if (lkey == (IPATH_DEFAULT_P_KEY & 0x7FFF)) {
                /* nothing to do; this key always valid */
                ret = 0;
                goto bail;
@@ -704,6 +706,15 @@ static int ipath_create_user_egr(struct ipath_portdata *pd)
        unsigned e, egrcnt, alloced, egrperchunk, chunk, egrsize, egroff;
        size_t size;
        int ret;
+       gfp_t gfp_flags;
+
+       /*
+        * GFP_USER, but without GFP_FS, so buffer cache can be
+        * coalesced (we hope); otherwise, even at order 4,
+        * heavy filesystem activity makes these fail, and we can
+        * use compound pages.
+        */
+       gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP;
 
        egrcnt = dd->ipath_rcvegrcnt;
        /* TID number offset for this port */
@@ -720,10 +731,8 @@ static int ipath_create_user_egr(struct ipath_portdata *pd)
         * memory pressure (creating large files and then copying them over
         * NFS while doing lots of MPI jobs), we hit some allocation
         * failures, even though we can sleep...  (2.6.10) Still get
-        * failures at 64K.  32K is the lowest we can go without waiting
-        * more memory again.  It seems likely that the coalescing in
-        * free_pages, etc. still has issues (as it has had previously
-        * during 2.6.x development).
+        * failures at 64K.  32K is the lowest we can go without wasting
+        * additional memory.
         */
        size = 0x8000;
        alloced = ALIGN(egrsize * egrcnt, size);
@@ -744,12 +753,6 @@ static int ipath_create_user_egr(struct ipath_portdata *pd)
                goto bail_rcvegrbuf;
        }
        for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
-               /*
-                * GFP_USER, but without GFP_FS, so buffer cache can be
-                * coalesced (we hope); otherwise, even at order 4,
-                * heavy filesystem activity makes these fail
-                */
-               gfp_t gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP;
 
                pd->port_rcvegrbuf[e] = dma_alloc_coherent(
                        &dd->pcidev->dev, size, &pd->port_rcvegrbuf_phys[e],
@@ -783,11 +786,12 @@ static int ipath_create_user_egr(struct ipath_portdata *pd)
 
 bail_rcvegrbuf_phys:
        for (e = 0; e < pd->port_rcvegrbuf_chunks &&
-                    pd->port_rcvegrbuf[e]; e++)
+               pd->port_rcvegrbuf[e]; e++) {
                dma_free_coherent(&dd->pcidev->dev, size,
                                  pd->port_rcvegrbuf[e],
                                  pd->port_rcvegrbuf_phys[e]);
 
+       }
        vfree(pd->port_rcvegrbuf_phys);
        pd->port_rcvegrbuf_phys = NULL;
 bail_rcvegrbuf:
@@ -802,10 +806,7 @@ static int ipath_do_user_init(struct ipath_portdata *pd,
 {
        int ret = 0;
        struct ipath_devdata *dd = pd->port_dd;
-       u64 physaddr, uaddr, off, atmp;
-       struct page *pagep;
        u32 head32;
-       u64 head;
 
        /* for now, if major version is different, bail */
        if ((uinfo->spu_userversion >> 16) != IPATH_USER_SWMAJOR) {
@@ -830,54 +831,6 @@ static int ipath_do_user_init(struct ipath_portdata *pd,
 
        /* for now we do nothing with rcvhdrcnt: uinfo->spu_rcvhdrcnt */
 
-       /* set up for the rcvhdr Q tail register writeback to user memory */
-       if (!uinfo->spu_rcvhdraddr ||
-           !access_ok(VERIFY_WRITE, (u64 __user *) (unsigned long)
-                      uinfo->spu_rcvhdraddr, sizeof(u64))) {
-               ipath_dbg("Port %d rcvhdrtail addr %llx not valid\n",
-                         pd->port_port,
-                         (unsigned long long) uinfo->spu_rcvhdraddr);
-               ret = -EINVAL;
-               goto done;
-       }
-
-       off = offset_in_page(uinfo->spu_rcvhdraddr);
-       uaddr = PAGE_MASK & (unsigned long) uinfo->spu_rcvhdraddr;
-       ret = ipath_get_user_pages_nocopy(uaddr, &pagep);
-       if (ret) {
-               dev_info(&dd->pcidev->dev, "Failed to lookup and lock "
-                        "address %llx for rcvhdrtail: errno %d\n",
-                        (unsigned long long) uinfo->spu_rcvhdraddr, -ret);
-               goto done;
-       }
-       ipath_stats.sps_pagelocks++;
-       pd->port_rcvhdrtail_uaddr = uaddr;
-       pd->port_rcvhdrtail_pagep = pagep;
-       pd->port_rcvhdrtail_kvaddr =
-               page_address(pagep);
-       pd->port_rcvhdrtail_kvaddr += off;
-       physaddr = page_to_phys(pagep) + off;
-       ipath_cdbg(VERBOSE, "port %d user addr %llx hdrtailaddr, %llx "
-                  "physical (off=%llx)\n",
-                  pd->port_port,
-                  (unsigned long long) uinfo->spu_rcvhdraddr,
-                  (unsigned long long) physaddr, (unsigned long long) off);
-       ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
-                             pd->port_port, physaddr);
-       atmp = ipath_read_kreg64_port(dd,
-                                     dd->ipath_kregs->kr_rcvhdrtailaddr,
-                                     pd->port_port);
-       if (physaddr != atmp) {
-               ipath_dev_err(dd,
-                             "Catastrophic software error, "
-                             "RcvHdrTailAddr%u written as %llx, "
-                             "read back as %llx\n", pd->port_port,
-                             (unsigned long long) physaddr,
-                             (unsigned long long) atmp);
-               ret = -EINVAL;
-               goto done;
-       }
-
        /* for right now, kernel piobufs are at end, so port 1 is at 0 */
        pd->port_piobufs = dd->ipath_piobufbase +
                dd->ipath_pbufsport * (pd->port_port -
@@ -896,26 +849,18 @@ static int ipath_do_user_init(struct ipath_portdata *pd,
                ret = ipath_create_user_egr(pd);
        if (ret)
                goto done;
-       /* enable receives now */
-       /* atomically set enable bit for this port */
-       set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port,
-               &dd->ipath_rcvctrl);
 
        /*
-        * set the head registers for this port to the current values
+        * set the eager head register for this port to the current values
         * of the tail pointers, since we don't know if they were
         * updated on last use of the port.
         */
-       head32 = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
-       head = (u64) head32;
-       ipath_write_ureg(dd, ur_rcvhdrhead, head, pd->port_port);
        head32 = ipath_read_ureg32(dd, ur_rcvegrindextail, pd->port_port);
        ipath_write_ureg(dd, ur_rcvegrindexhead, head32, pd->port_port);
        dd->ipath_lastegrheads[pd->port_port] = -1;
        dd->ipath_lastrcvhdrqtails[pd->port_port] = -1;
-       ipath_cdbg(VERBOSE, "Wrote port%d head %llx, egrhead %x from "
-                  "tail regs\n", pd->port_port,
-                  (unsigned long long) head, head32);
+       ipath_cdbg(VERBOSE, "Wrote port%d egrhead %x from tail regs\n",
+               pd->port_port, head32);
        pd->port_tidcursor = 0; /* start at beginning after open */
        /*
         * now enable the port; the tail registers will be written to memory
@@ -924,24 +869,76 @@ static int ipath_do_user_init(struct ipath_portdata *pd,
         * transition from 0 to 1, so clear it first, then set it as part of
         * enabling the port.  This will (very briefly) affect any other
         * open ports, but it shouldn't be long enough to be an issue.
+        * We explictly set the in-memory copy to 0 beforehand, so we don't
+        * have to wait to be sure the DMA update has happened.
         */
+       *pd->port_rcvhdrtail_kvaddr = 0ULL;
+       set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port,
+               &dd->ipath_rcvctrl);
        ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
                         dd->ipath_rcvctrl & ~INFINIPATH_R_TAILUPD);
        ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
                         dd->ipath_rcvctrl);
-
 done:
        return ret;
 }
 
+
+/* common code for the mappings on dma_alloc_coherent mem */
+static int ipath_mmap_mem(struct vm_area_struct *vma,
+                            struct ipath_portdata *pd, unsigned len,
+                            int write_ok, dma_addr_t addr, char *what)
+{
+       struct ipath_devdata *dd = pd->port_dd;
+       unsigned pfn = (unsigned long)addr >> PAGE_SHIFT;
+       int ret;
+
+       if ((vma->vm_end - vma->vm_start) > len) {
+               dev_info(&dd->pcidev->dev,
+                        "FAIL on %s: len %lx > %x\n", what,
+                        vma->vm_end - vma->vm_start, len);
+               ret = -EFAULT;
+               goto bail;
+       }
+
+       if (!write_ok) {
+               if (vma->vm_flags & VM_WRITE) {
+                       dev_info(&dd->pcidev->dev,
+                                "%s must be mapped readonly\n", what);
+                       ret = -EPERM;
+                       goto bail;
+               }
+
+               /* don't allow them to later change with mprotect */
+               vma->vm_flags &= ~VM_MAYWRITE;
+       }
+
+       ret = remap_pfn_range(vma, vma->vm_start, pfn,
+                             len, vma->vm_page_prot);
+       if (ret)
+               dev_info(&dd->pcidev->dev,
+                        "%s port%u mmap of %lx, %x bytes r%c failed: %d\n",
+                        what, pd->port_port, (unsigned long)addr, len,
+                        write_ok?'w':'o', ret);
+       else
+               ipath_cdbg(VERBOSE, "%s port%u mmaped %lx, %x bytes r%c\n",
+                       what, pd->port_port, (unsigned long)addr, len,
+                        write_ok?'w':'o');
+bail:
+       return ret;
+}
+
 static int mmap_ureg(struct vm_area_struct *vma, struct ipath_devdata *dd,
                     u64 ureg)
 {
        unsigned long phys;
        int ret;
 
-       /* it's the real hardware, so io_remap works */
-
+       /*
+        * This is real hardware, so use io_remap.  This is the mechanism
+        * for the user process to update the head registers for their port
+        * in the chip.
+        */
        if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
                dev_info(&dd->pcidev->dev, "FAIL mmap userreg: reqlen "
                         "%lx > PAGE\n", vma->vm_end - vma->vm_start);
@@ -967,10 +964,11 @@ static int mmap_piobufs(struct vm_area_struct *vma,
        int ret;
 
        /*
-        * When we map the PIO buffers, we want to map them as writeonly, no
-        * read possible.
+        * When we map the PIO buffers in the chip, we want to map them as
+        * writeonly, no read possible.   This prevents access to previous
+        * process data, and catches users who might try to read the i/o
+        * space due to a bug.
         */
-
        if ((vma->vm_end - vma->vm_start) >
            (dd->ipath_pbufsport * dd->ipath_palign)) {
                dev_info(&dd->pcidev->dev, "FAIL mmap piobufs: "
@@ -981,11 +979,10 @@ static int mmap_piobufs(struct vm_area_struct *vma,
        }
 
        phys = dd->ipath_physaddr + pd->port_piobufs;
+
        /*
-        * Do *NOT* mark this as non-cached (PWT bit), or we don't get the
+        * Don't mark this as non-cached, or we don't get the
         * write combining behavior we want on the PIO buffers!
-        * vma->vm_page_prot =
-        *        pgprot_noncached(vma->vm_page_prot);
         */
 
        if (vma->vm_flags & VM_READ) {
@@ -997,8 +994,7 @@ static int mmap_piobufs(struct vm_area_struct *vma,
        }
 
        /* don't allow them to later change to readable with mprotect */
-
-       vma->vm_flags &= ~VM_MAYWRITE;
+       vma->vm_flags &= ~VM_MAYREAD;
        vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
 
        ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
@@ -1017,11 +1013,6 @@ static int mmap_rcvegrbufs(struct vm_area_struct *vma,
        dma_addr_t *phys;
        int ret;
 
-       if (!pd->port_rcvegrbuf) {
-               ret = -EFAULT;
-               goto bail;
-       }
-
        size = pd->port_rcvegrbuf_size;
        total_size = pd->port_rcvegrbuf_chunks * size;
        if ((vma->vm_end - vma->vm_start) > total_size) {
@@ -1039,13 +1030,12 @@ static int mmap_rcvegrbufs(struct vm_area_struct *vma,
                ret = -EPERM;
                goto bail;
        }
+       /* don't allow them to later change to writeable with mprotect */
+       vma->vm_flags &= ~VM_MAYWRITE;
 
        start = vma->vm_start;
        phys = pd->port_rcvegrbuf_phys;
 
-       /* don't allow them to later change to writeable with mprotect */
-       vma->vm_flags &= ~VM_MAYWRITE;
-
        for (i = 0; i < pd->port_rcvegrbuf_chunks; i++, start += size) {
                ret = remap_pfn_range(vma, start, phys[i] >> PAGE_SHIFT,
                                      size, vma->vm_page_prot);
@@ -1058,78 +1048,6 @@ bail:
        return ret;
 }
 
-static int mmap_rcvhdrq(struct vm_area_struct *vma,
-                       struct ipath_portdata *pd)
-{
-       struct ipath_devdata *dd = pd->port_dd;
-       size_t total_size;
-       int ret;
-
-       /*
-        * kmalloc'ed memory, physically contiguous; this is from
-        * spi_rcvhdr_base; we allow user to map read-write so they can
-        * write hdrq entries to allow protocol code to directly poll
-        * whether a hdrq entry has been written.
-        */
-       total_size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
-                          sizeof(u32), PAGE_SIZE);
-       if ((vma->vm_end - vma->vm_start) > total_size) {
-               dev_info(&dd->pcidev->dev,
-                        "FAIL on rcvhdrq: reqlen %lx > actual %lx\n",
-                        vma->vm_end - vma->vm_start,
-                        (unsigned long) total_size);
-               ret = -EFAULT;
-               goto bail;
-       }
-
-       ret = remap_pfn_range(vma, vma->vm_start,
-                             pd->port_rcvhdrq_phys >> PAGE_SHIFT,
-                             vma->vm_end - vma->vm_start,
-                             vma->vm_page_prot);
-bail:
-       return ret;
-}
-
-static int mmap_pioavailregs(struct vm_area_struct *vma,
-                            struct ipath_portdata *pd)
-{
-       struct ipath_devdata *dd = pd->port_dd;
-       int ret;
-
-       /*
-        * when we map the PIO bufferavail registers, we want to map them as
-        * readonly, no write possible.
-        *
-        * kmalloc'ed memory, physically contiguous, one page only, readonly
-        */
-
-       if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
-               dev_info(&dd->pcidev->dev, "FAIL on pioavailregs_dma: "
-                        "reqlen %lx > actual %lx\n",
-                        vma->vm_end - vma->vm_start,
-                        (unsigned long) PAGE_SIZE);
-               ret = -EFAULT;
-               goto bail;
-       }
-
-       if (vma->vm_flags & VM_WRITE) {
-               dev_info(&dd->pcidev->dev,
-                        "Can't map pioavailregs as writable (flags=%lx)\n",
-                        vma->vm_flags);
-               ret = -EPERM;
-               goto bail;
-       }
-
-       /* don't allow them to later change with mprotect */
-       vma->vm_flags &= ~VM_MAYWRITE;
-
-       ret = remap_pfn_range(vma, vma->vm_start,
-                             dd->ipath_pioavailregs_phys >> PAGE_SHIFT,
-                             PAGE_SIZE, vma->vm_page_prot);
-bail:
-       return ret;
-}
-
 /**
  * ipath_mmap - mmap various structures into user space
  * @fp: the file pointer
@@ -1149,6 +1067,7 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
 
        pd = port_fp(fp);
        dd = pd->port_dd;
+
        /*
         * This is the ipath_do_user_init() code, mapping the shared buffers
         * into the user process. The address referred to by vm_pgoff is the
@@ -1158,28 +1077,59 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
        pgaddr = vma->vm_pgoff << PAGE_SHIFT;
 
        /*
-        * note that ureg does *NOT* have the kregvirt as part of it, to be
-        * sure that for 32 bit programs, we don't end up trying to map a >
-        * 44 address.  Has to match ipath_get_base_info() code that sets
-        * __spi_uregbase
+        * Must fit in 40 bits for our hardware; some checked elsewhere,
+        * but we'll be paranoid.  Check for 0 is mostly in case one of the
+        * allocations failed, but user called mmap anyway.   We want to catch
+        * that before it can match.
         */
+       if (!pgaddr || pgaddr >= (1ULL<<40))  {
+               ipath_dev_err(dd, "Bad phys addr %llx, start %lx, end %lx\n",
+                       (unsigned long long)pgaddr, vma->vm_start, vma->vm_end);
+               return -EINVAL;
+       }
 
+       /* just the offset of the port user registers, not physical addr */
        ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port;
 
        ipath_cdbg(MM, "ushare: pgaddr %llx vm_start=%lx, vmlen %lx\n",
                   (unsigned long long) pgaddr, vma->vm_start,
                   vma->vm_end - vma->vm_start);
 
-       if (pgaddr == ureg)
+       if (vma->vm_start & (PAGE_SIZE-1)) {
+               ipath_dev_err(dd,
+                       "vm_start not aligned: %lx, end=%lx phys %lx\n",
+                       vma->vm_start, vma->vm_end, (unsigned long)pgaddr);
+               ret = -EINVAL;
+       }
+       else if (pgaddr == ureg)
                ret = mmap_ureg(vma, dd, ureg);
        else if (pgaddr == pd->port_piobufs)
                ret = mmap_piobufs(vma, dd, pd);
        else if (pgaddr == (u64) pd->port_rcvegr_phys)
                ret = mmap_rcvegrbufs(vma, pd);
-       else if (pgaddr == (u64) pd->port_rcvhdrq_phys)
-               ret = mmap_rcvhdrq(vma, pd);
+       else if (pgaddr == (u64) pd->port_rcvhdrq_phys) {
+               /*
+                * The rcvhdrq itself; readonly except on HT-400 (so have
+                * to allow writable mapping), multiple pages, contiguous
+                * from an i/o perspective.
+                */
+               unsigned total_size =
+                       ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize
+                          * sizeof(u32), PAGE_SIZE);
+               ret = ipath_mmap_mem(vma, pd, total_size, 1,
+                                    pd->port_rcvhdrq_phys,
+                                    "rcvhdrq");
+       }
+       else if (pgaddr == (u64)pd->port_rcvhdrqtailaddr_phys)
+               /* in-memory copy of rcvhdrq tail register */
+               ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0,
+                                    pd->port_rcvhdrqtailaddr_phys,
+                                    "rcvhdrq tail");
        else if (pgaddr == dd->ipath_pioavailregs_phys)
-               ret = mmap_pioavailregs(vma, pd);
+               /* in-memory copy of pioavail registers */
+               ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0,
+                                    dd->ipath_pioavailregs_phys,
+                                    "pioavail registers");
        else
                ret = -EINVAL;
 
@@ -1442,16 +1392,16 @@ done:
 
 static int ipath_open(struct inode *in, struct file *fp)
 {
-       int ret, minor;
+       int ret, user_minor;
 
        mutex_lock(&ipath_mutex);
 
-       minor = iminor(in);
+       user_minor = iminor(in) - IPATH_USER_MINOR_BASE;
        ipath_cdbg(VERBOSE, "open on dev %lx (minor %d)\n",
-                  (long)in->i_rdev, minor);
+                  (long)in->i_rdev, user_minor);
 
-       if (minor)
-               ret = find_free_port(minor - 1, fp);
+       if (user_minor)
+               ret = find_free_port(user_minor - 1, fp);
        else
                ret = find_best_unit(fp);
 
@@ -1536,53 +1486,54 @@ static int ipath_close(struct inode *in, struct file *fp)
        }
 
        if (dd->ipath_kregbase) {
-               if (pd->port_rcvhdrtail_uaddr) {
-                       pd->port_rcvhdrtail_uaddr = 0;
-                       pd->port_rcvhdrtail_kvaddr = NULL;
-                       ipath_release_user_pages_on_close(
-                               &pd->port_rcvhdrtail_pagep, 1);
-                       pd->port_rcvhdrtail_pagep = NULL;
-                       ipath_stats.sps_pageunlocks++;
-               }
-               ipath_write_kreg_port(
-                       dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
-                       port, 0ULL);
-               ipath_write_kreg_port(
-                       dd, dd->ipath_kregs->kr_rcvhdraddr,
-                       pd->port_port, 0);
+               int i;
+               /* atomically clear receive enable port. */
+               clear_bit(INFINIPATH_R_PORTENABLE_SHIFT + port,
+                         &dd->ipath_rcvctrl);
+               ipath_write_kreg( dd, dd->ipath_kregs->kr_rcvctrl,
+                       dd->ipath_rcvctrl);
+               /* and read back from chip to be sure that nothing
+                * else is in flight when we do the rest */
+               (void)ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
 
                /* clean up the pkeys for this port user */
                ipath_clean_part_key(pd, dd);
 
-               if (port < dd->ipath_cfgports) {
-                       int i = dd->ipath_pbufsport * (port - 1);
-                       ipath_disarm_piobufs(dd, i, dd->ipath_pbufsport);
 
-                       /* atomically clear receive enable port. */
-                       clear_bit(INFINIPATH_R_PORTENABLE_SHIFT + port,
-                                 &dd->ipath_rcvctrl);
-                       ipath_write_kreg(
-                               dd,
-                               dd->ipath_kregs->kr_rcvctrl,
-                               dd->ipath_rcvctrl);
-
-                       if (dd->ipath_pageshadow)
-                               unlock_expected_tids(pd);
-                       ipath_stats.sps_ports--;
-                       ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n",
-                                  pd->port_comm, pd->port_pid,
-                                  dd->ipath_unit, port);
-               }
+               /*
+                * be paranoid, and never write 0's to these, just use an
+                * unused part of the port 0 tail page.  Of course,
+                * rcvhdraddr points to a large chunk of memory, so this
+                * could still trash things, but at least it won't trash
+                * page 0, and by disabling the port, it should stop "soon",
+                * even if a packet or two is in already in flight after we
+                * disabled the port.
+                */
+               ipath_write_kreg_port(dd,
+                       dd->ipath_kregs->kr_rcvhdrtailaddr, port,
+                       dd->ipath_dummy_hdrq_phys);
+               ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
+                       pd->port_port, dd->ipath_dummy_hdrq_phys);
+
+               i = dd->ipath_pbufsport * (port - 1);
+               ipath_disarm_piobufs(dd, i, dd->ipath_pbufsport);
+
+               if (dd->ipath_pageshadow)
+                       unlock_expected_tids(pd);
+               ipath_stats.sps_ports--;
+               ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n",
+                          pd->port_comm, pd->port_pid,
+                          dd->ipath_unit, port);
+
+               dd->ipath_f_clear_tids(dd, pd->port_port);
        }
 
        pd->port_cnt = 0;
        pd->port_pid = 0;
 
-       dd->ipath_f_clear_tids(dd, pd->port_port);
-
-       ipath_free_pddata(dd, pd->port_port, 0);
-
+       dd->ipath_pd[pd->port_port] = NULL; /* before releasing mutex */
        mutex_unlock(&ipath_mutex);
+       ipath_free_pddata(dd, pd); /* after releasing the mutex */
 
        return ret;
 }
@@ -1859,19 +1810,12 @@ int ipath_user_add(struct ipath_devdata *dd)
                                      "error %d\n", -ret);
                        goto bail;
                }
-               ret = ipath_diag_init();
-               if (ret < 0) {
-                       ipath_dev_err(dd, "Unable to set up diag support: "
-                                     "error %d\n", -ret);
-                       goto bail_sma;
-               }
-
                ret = init_cdev(0, "ipath", &ipath_file_ops, &wildcard_cdev,
                                &wildcard_class_dev);
                if (ret < 0) {
                        ipath_dev_err(dd, "Could not create wildcard "
                                      "minor: error %d\n", -ret);
-                       goto bail_diag;
+                       goto bail_sma;
                }
 
                atomic_set(&user_setup, 1);
@@ -1880,31 +1824,28 @@ int ipath_user_add(struct ipath_devdata *dd)
        snprintf(name, sizeof(name), "ipath%d", dd->ipath_unit);
 
        ret = init_cdev(dd->ipath_unit + 1, name, &ipath_file_ops,
-                       &dd->cdev, &dd->class_dev);
+                       &dd->user_cdev, &dd->user_class_dev);
        if (ret < 0)
                ipath_dev_err(dd, "Could not create user minor %d, %s\n",
                              dd->ipath_unit + 1, name);
 
        goto bail;
 
-bail_diag:
-       ipath_diag_cleanup();
 bail_sma:
        user_cleanup();
 bail:
        return ret;
 }
 
-void ipath_user_del(struct ipath_devdata *dd)
+void ipath_user_remove(struct ipath_devdata *dd)
 {
-       cleanup_cdev(&dd->cdev, &dd->class_dev);
+       cleanup_cdev(&dd->user_cdev, &dd->user_class_dev);
 
        if (atomic_dec_return(&user_count) == 0) {
                if (atomic_read(&user_setup) == 0)
                        goto bail;
 
                cleanup_cdev(&wildcard_cdev, &wildcard_class_dev);
-               ipath_diag_cleanup();
                user_cleanup();
 
                atomic_set(&user_setup, 0);
@@ -1912,3 +1853,4 @@ void ipath_user_del(struct ipath_devdata *dd)
 bail:
        return;
 }
+
index 97f142c5be13d75f6fad23f1be75d44b2083f8a1..0936d8e8d7043bdc51408c5e4bdf9ba25d9bc54f 100644 (file)
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
index fac0a2b74de2e3d08d054322157b773bc5b79967..3db015da6e77813e194ca10f77ce8ddd865fe2f3 100644 (file)
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -1572,7 +1573,6 @@ void ipath_init_ht400_funcs(struct ipath_devdata *dd)
        dd->ipath_f_reset = ipath_setup_ht_reset;
        dd->ipath_f_get_boardname = ipath_ht_boardname;
        dd->ipath_f_init_hwerrors = ipath_ht_init_hwerrors;
-       dd->ipath_f_init_hwerrors = ipath_ht_init_hwerrors;
        dd->ipath_f_early_init = ipath_ht_early_init;
        dd->ipath_f_handle_hwerrors = ipath_ht_handle_hwerrors;
        dd->ipath_f_quiet_serdes = ipath_ht_quiet_serdes;
index dc83250d26a6a1dc3bf31f92385be816b6b7aca4..414cdd1d80a6fa8893121786825bc5646cd8d385 100644 (file)
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -35,7 +36,7 @@
 #include <linux/vmalloc.h>
 
 #include "ipath_kernel.h"
-#include "ips_common.h"
+#include "ipath_common.h"
 
 /*
  * min buffers we want to have per port, after driver
@@ -114,6 +115,7 @@ static int create_port0_egr(struct ipath_devdata *dd)
                                      "eager TID %u\n", e);
                        while (e != 0)
                                dev_kfree_skb(skbs[--e]);
+                       vfree(skbs);
                        ret = -ENOMEM;
                        goto bail;
                }
@@ -275,7 +277,7 @@ static int init_chip_first(struct ipath_devdata *dd,
        pd->port_port = 0;
        pd->port_cnt = 1;
        /* The port 0 pkey table is used by the layer interface. */
-       pd->port_pkeys[0] = IPS_DEFAULT_P_KEY;
+       pd->port_pkeys[0] = IPATH_DEFAULT_P_KEY;
        dd->ipath_rcvtidcnt =
                ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt);
        dd->ipath_rcvtidbase =
@@ -409,17 +411,8 @@ static int init_pioavailregs(struct ipath_devdata *dd)
        /* and its length */
        dd->ipath_freezelen = L1_CACHE_BYTES - sizeof(dd->ipath_statusp[0]);
 
-       if (dd->ipath_unit * 64 > (IPATH_PORT0_RCVHDRTAIL_SIZE - 64)) {
-               ipath_dev_err(dd, "unit %u too large for port 0 "
-                             "rcvhdrtail buffer size\n", dd->ipath_unit);
-               ret = -ENODEV;
-       }
-       else
-               ret = 0;
+       ret = 0;
 
-       /* so we can get current tail in ipath_kreceive(), per chip */
-       dd->ipath_hdrqtailptr = &ipath_port0_rcvhdrtail[
-               dd->ipath_unit * (64 / sizeof(*ipath_port0_rcvhdrtail))];
 done:
        return ret;
 }
@@ -652,8 +645,9 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
 {
        int ret = 0, i;
        u32 val32, kpiobufs;
-       u64 val, atmp;
+       u64 val;
        struct ipath_portdata *pd = NULL; /* keep gcc4 happy */
+       gfp_t gfp_flags = GFP_USER | __GFP_COMP;
 
        ret = init_housekeeping(dd, &pd, reinit);
        if (ret)
@@ -775,24 +769,6 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
                goto done;
        }
 
-       val = ipath_port0_rcvhdrtail_dma + dd->ipath_unit * 64;
-
-       /* verify that the alignment requirement was met */
-       ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
-                             0, val);
-       atmp = ipath_read_kreg64_port(
-               dd, dd->ipath_kregs->kr_rcvhdrtailaddr, 0);
-       if (val != atmp) {
-               ipath_dev_err(dd, "Catastrophic software error, "
-                             "RcvHdrTailAddr0 written as %llx, "
-                             "read back as %llx from %x\n",
-                             (unsigned long long) val,
-                             (unsigned long long) atmp,
-                             dd->ipath_kregs->kr_rcvhdrtailaddr);
-               ret = -EINVAL;
-               goto done;
-       }
-
        ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvbthqp, IPATH_KD_QP);
 
        /*
@@ -836,25 +812,45 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
        /* clear any interrups up to this point (ints still not enabled) */
        ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
 
-       ipath_stats.sps_lid[dd->ipath_unit] = dd->ipath_lid;
-
        /*
         * Set up the port 0 (kernel) rcvhdr q and egr TIDs.  If doing
         * re-init, the simplest way to handle this is to free
         * existing, and re-allocate.
         */
-       if (reinit)
-               ipath_free_pddata(dd, 0, 0);
+       if (reinit) {
+               struct ipath_portdata *pd = dd->ipath_pd[0];
+               dd->ipath_pd[0] = NULL;
+               ipath_free_pddata(dd, pd);
+       }
        dd->ipath_f_tidtemplate(dd);
        ret = ipath_create_rcvhdrq(dd, pd);
-       if (!ret)
+       if (!ret) {
+               dd->ipath_hdrqtailptr =
+                       (volatile __le64 *)pd->port_rcvhdrtail_kvaddr;
                ret = create_port0_egr(dd);
+       }
        if (ret)
                ipath_dev_err(dd, "failed to allocate port 0 (kernel) "
                              "rcvhdrq and/or egr bufs\n");
        else
                enable_chip(dd, pd, reinit);
 
+
+       if (!ret && !reinit) {
+           /* used when we close a port, for DMA already in flight at close */
+               dd->ipath_dummy_hdrq = dma_alloc_coherent(
+                       &dd->pcidev->dev, pd->port_rcvhdrq_size,
+                       &dd->ipath_dummy_hdrq_phys,
+                       gfp_flags);
+               if (!dd->ipath_dummy_hdrq ) {
+                       dev_info(&dd->pcidev->dev,
+                               "Couldn't allocate 0x%lx bytes for dummy hdrq\n",
+                               pd->port_rcvhdrq_size);
+                       /* fallback to just 0'ing */
+                       dd->ipath_dummy_hdrq_phys = 0UL;
+               }
+       }
+
        /*
         * cause retrigger of pending interrupts ignored during init,
         * even if we had errors
index 5e31d0de849b1e8bd9a530a0760018c33084993b..280e732660a19776d69870c5a82893377008be2f 100644 (file)
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
 #include <linux/pci.h>
 
 #include "ipath_kernel.h"
-#include "ips_common.h"
 #include "ipath_layer.h"
+#include "ipath_common.h"
 
+/* These are all rcv-related errors which we want to count for stats */
 #define E_SUM_PKTERRS \
        (INFINIPATH_E_RHDRLEN | INFINIPATH_E_RBADTID | \
         INFINIPATH_E_RBADVERSION | INFINIPATH_E_RHDR | \
@@ -44,6 +46,7 @@
         INFINIPATH_E_RFORMATERR | INFINIPATH_E_RUNSUPVL | \
         INFINIPATH_E_RUNEXPCHAR | INFINIPATH_E_REBP)
 
+/* These are all send-related errors which we want to count for stats */
 #define E_SUM_ERRS \
        (INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM | \
         INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \
         INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SPKTLEN | \
         INFINIPATH_E_INVALIDADDR)
 
+/*
+ * these are errors that can occur when the link changes state while
+ * a packet is being sent or received.  This doesn't cover things
+ * like EBP or VCRC that can be the result of a sending having the
+ * link change state, so we receive a "known bad" packet.
+ */
+#define E_SUM_LINK_PKTERRS \
+       (INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \
+        INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SPKTLEN | \
+        INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RMINPKTLEN | \
+        INFINIPATH_E_RUNEXPCHAR)
+
 static u64 handle_e_sum_errs(struct ipath_devdata *dd, ipath_err_t errs)
 {
        unsigned long sbuf[4];
@@ -100,9 +115,7 @@ static u64 handle_e_sum_errs(struct ipath_devdata *dd, ipath_err_t errs)
                if (ipath_debug & __IPATH_PKTDBG)
                        printk("\n");
        }
-       if ((errs & (INFINIPATH_E_SDROPPEDDATAPKT |
-                    INFINIPATH_E_SDROPPEDSMPPKT |
-                    INFINIPATH_E_SMINPKTLEN)) &&
+       if ((errs & E_SUM_LINK_PKTERRS) &&
            !(dd->ipath_flags & IPATH_LINKACTIVE)) {
                /*
                 * This can happen when SMA is trying to bring the link
@@ -111,11 +124,9 @@ static u64 handle_e_sum_errs(struct ipath_devdata *dd, ipath_err_t errs)
                 * valid.  We don't want to confuse people, so we just
                 * don't print them, except at debug
                 */
-               ipath_dbg("Ignoring pktsend errors %llx, because not "
-                         "yet active\n", (unsigned long long) errs);
-               ignore_this_time = INFINIPATH_E_SDROPPEDDATAPKT |
-                       INFINIPATH_E_SDROPPEDSMPPKT |
-                       INFINIPATH_E_SMINPKTLEN;
+               ipath_dbg("Ignoring packet errors %llx, because link not "
+                         "ACTIVE\n", (unsigned long long) errs);
+               ignore_this_time = errs & E_SUM_LINK_PKTERRS;
        }
 
        return ignore_this_time;
@@ -156,7 +167,29 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
         */
        val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
        lstate = val & IPATH_IBSTATE_MASK;
-       if (lstate == IPATH_IBSTATE_INIT || lstate == IPATH_IBSTATE_ARM ||
+
+       /*
+        * this is confusing enough when it happens that I want to always put it
+        * on the console and in the logs.  If it was a requested state change,
+        * we'll have already cleared the flags, so we won't print this warning
+        */
+       if ((lstate != IPATH_IBSTATE_ARM && lstate != IPATH_IBSTATE_ACTIVE)
+               && (dd->ipath_flags & (IPATH_LINKARMED | IPATH_LINKACTIVE))) {
+               dev_info(&dd->pcidev->dev, "Link state changed from %s to %s\n",
+                                (dd->ipath_flags & IPATH_LINKARMED) ? "ARM" : "ACTIVE",
+                                ib_linkstate(lstate));
+               /*
+                * Flush all queued sends when link went to DOWN or INIT,
+                * to be sure that they don't block SMA and other MAD packets
+                */
+               ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+                                INFINIPATH_S_ABORT);
+               ipath_disarm_piobufs(dd, dd->ipath_lastport_piobuf,
+                                                       (unsigned)(dd->ipath_piobcnt2k +
+                                       dd->ipath_piobcnt4k) -
+                                       dd->ipath_lastport_piobuf);
+       }
+       else if (lstate == IPATH_IBSTATE_INIT || lstate == IPATH_IBSTATE_ARM ||
            lstate == IPATH_IBSTATE_ACTIVE) {
                /*
                 * only print at SMA if there is a change, debug if not
@@ -229,6 +262,7 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
                                     | IPATH_LINKACTIVE |
                                     IPATH_LINKARMED);
                *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
+               dd->ipath_lli_counter = 0;
                if (!noprint) {
                        if (((dd->ipath_lastibcstat >>
                              INFINIPATH_IBCS_LINKSTATE_SHIFT) &
@@ -350,7 +384,7 @@ static unsigned handle_frequent_errors(struct ipath_devdata *dd,
        return supp_msgs;
 }
 
-static void handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
+static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
 {
        char msg[512];
        u64 ignore_this_time = 0;
@@ -379,6 +413,19 @@ static void handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
 
        if (errs & E_SUM_ERRS)
                ignore_this_time = handle_e_sum_errs(dd, errs);
+       else if ((errs & E_SUM_LINK_PKTERRS) &&
+           !(dd->ipath_flags & IPATH_LINKACTIVE)) {
+               /*
+                * This can happen when SMA is trying to bring the link
+                * up, but the IB link changes state at the "wrong" time.
+                * The IB logic then complains that the packet isn't
+                * valid.  We don't want to confuse people, so we just
+                * don't print them, except at debug
+                */
+               ipath_dbg("Ignoring packet errors %llx, because link not "
+                         "ACTIVE\n", (unsigned long long) errs);
+               ignore_this_time = errs & E_SUM_LINK_PKTERRS;
+       }
 
        if (supp_msgs == 250000) {
                /*
@@ -434,7 +481,7 @@ static void handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
                          INFINIPATH_E_IBSTATUSCHANGED);
        }
        if (!errs)
-               return;
+               return 0;
 
        if (!noprint)
                /*
@@ -493,10 +540,10 @@ static void handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
                                continue;
                        if (hd == (tl + 1) ||
                            (!hd && tl == dd->ipath_hdrqlast)) {
-                               dd->ipath_lastrcvhdrqtails[i] = tl;
-                               pd->port_hdrqfull++;
                                if (i == 0)
                                        chkerrpkts = 1;
+                               dd->ipath_lastrcvhdrqtails[i] = tl;
+                               pd->port_hdrqfull++;
                        }
                }
        }
@@ -558,9 +605,7 @@ static void handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
                wake_up_interruptible(&ipath_sma_state_wait);
        }
 
-       if (chkerrpkts)
-               /* process possible error packets in hdrq */
-               ipath_kreceive(dd);
+       return chkerrpkts;
 }
 
 /* this is separate to allow for better optimization of ipath_intr() */
@@ -678,7 +723,12 @@ set:
                         dd->ipath_sendctrl);
 }
 
-static void handle_rcv(struct ipath_devdata *dd, u32 istat)
+/*
+ * Handle receive interrupts for user ports; this means a user
+ * process was waiting for a packet to arrive, and didn't want
+ * to poll
+ */
+static void handle_urcv(struct ipath_devdata *dd, u32 istat)
 {
        u64 portr;
        int i;
@@ -688,22 +738,17 @@ static void handle_rcv(struct ipath_devdata *dd, u32 istat)
                 infinipath_i_rcvavail_mask)
                | ((istat >> INFINIPATH_I_RCVURG_SHIFT) &
                   infinipath_i_rcvurg_mask);
-       for (i = 0; i < dd->ipath_cfgports; i++) {
+       for (i = 1; i < dd->ipath_cfgports; i++) {
                struct ipath_portdata *pd = dd->ipath_pd[i];
-               if (portr & (1 << i) && pd &&
-                   pd->port_cnt) {
-                       if (i == 0)
-                               ipath_kreceive(dd);
-                       else if (test_bit(IPATH_PORT_WAITING_RCV,
-                                         &pd->port_flag)) {
-                               int rcbit;
-                               clear_bit(IPATH_PORT_WAITING_RCV,
-                                         &pd->port_flag);
-                               rcbit = i + INFINIPATH_R_INTRAVAIL_SHIFT;
-                               clear_bit(1UL << rcbit, &dd->ipath_rcvctrl);
-                               wake_up_interruptible(&pd->port_wait);
-                               rcvdint = 1;
-                       }
+               if (portr & (1 << i) && pd && pd->port_cnt &&
+                       test_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag)) {
+                       int rcbit;
+                       clear_bit(IPATH_PORT_WAITING_RCV,
+                                 &pd->port_flag);
+                       rcbit = i + INFINIPATH_R_INTRAVAIL_SHIFT;
+                       clear_bit(1UL << rcbit, &dd->ipath_rcvctrl);
+                       wake_up_interruptible(&pd->port_wait);
+                       rcvdint = 1;
                }
        }
        if (rcvdint) {
@@ -719,16 +764,19 @@ static void handle_rcv(struct ipath_devdata *dd, u32 istat)
 irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs)
 {
        struct ipath_devdata *dd = data;
-       u32 istat;
+       u32 istat, chk0rcv = 0;
        ipath_err_t estat = 0;
-       static unsigned unexpected = 0;
        irqreturn_t ret;
+       u32 oldhead, curtail;
+       static unsigned unexpected = 0;
+       static const u32 port0rbits = (1U<<INFINIPATH_I_RCVAVAIL_SHIFT) |
+                (1U<<INFINIPATH_I_RCVURG_SHIFT);
+
+       ipath_stats.sps_ints++;
 
-       if(!(dd->ipath_flags & IPATH_PRESENT)) {
-               /* this is mostly so we don't try to touch the chip while
-                * it is being reset */
+       if (!(dd->ipath_flags & IPATH_PRESENT)) {
                /*
-                * This return value is perhaps odd, but we do not want the
+                * This return value is not great, but we do not want the
                 * interrupt core code to remove our interrupt handler
                 * because we don't appear to be handling an interrupt
                 * during a chip reset.
@@ -736,7 +784,51 @@ irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs)
                return IRQ_HANDLED;
        }
 
+       /*
+        * this needs to be flags&initted, not statusp, so we keep
+        * taking interrupts even after link goes down, etc.
+        * Also, we *must* clear the interrupt at some point, or we won't
+        * take it again, which can be real bad for errors, etc...
+        */
+
+       if (!(dd->ipath_flags & IPATH_INITTED)) {
+               ipath_bad_intr(dd, &unexpected);
+               ret = IRQ_NONE;
+               goto bail;
+       }
+
+       /*
+        * We try to avoid reading the interrupt status register, since
+        * that's a PIO read, and stalls the processor for up to about
+        * ~0.25 usec. The idea is that if we processed a port0 packet,
+        * we blindly clear the  port 0 receive interrupt bits, and nothing
+        * else, then return.  If other interrupts are pending, the chip
+        * will re-interrupt us as soon as we write the intclear register.
+        * We then won't process any more kernel packets (if not the 2nd
+        * time, then the 3rd or 4th) and we'll then handle the other
+        * interrupts.   We clear the interrupts first so that we don't
+        * lose intr for later packets that arrive while we are processing.
+        */
+       oldhead = dd->ipath_port0head;
+       curtail = (u32)le64_to_cpu(*dd->ipath_hdrqtailptr);
+       if (oldhead != curtail) {
+               if (dd->ipath_flags & IPATH_GPIO_INTR) {
+                       ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear,
+                                        (u64) (1 << 2));
+                       istat = port0rbits | INFINIPATH_I_GPIO;
+               }
+               else
+                       istat = port0rbits;
+               ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat);
+               ipath_kreceive(dd);
+               if (oldhead != dd->ipath_port0head) {
+                       ipath_stats.sps_fastrcvint++;
+                       goto done;
+               }
+       }
+
        istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus);
+
        if (unlikely(!istat)) {
                ipath_stats.sps_nullintr++;
                ret = IRQ_NONE; /* not our interrupt, or already handled */
@@ -749,31 +841,17 @@ irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs)
                goto bail;
        }
 
-       ipath_stats.sps_ints++;
-
-       /*
-        * this needs to be flags&initted, not statusp, so we keep
-        * taking interrupts even after link goes down, etc.
-        * Also, we *must* clear the interrupt at some point, or we won't
-        * take it again, which can be real bad for errors, etc...
-        */
-
-       if (!(dd->ipath_flags & IPATH_INITTED)) {
-               ipath_bad_intr(dd, &unexpected);
-               ret = IRQ_NONE;
-               goto bail;
-       }
        if (unexpected)
                unexpected = 0;
 
-       ipath_cdbg(VERBOSE, "intr stat=0x%x\n", istat);
-
-       if (istat & ~infinipath_i_bitsextant)
+       if (unlikely(istat & ~infinipath_i_bitsextant))
                ipath_dev_err(dd,
                              "interrupt with unknown interrupts %x set\n",
                              istat & (u32) ~ infinipath_i_bitsextant);
+       else
+               ipath_cdbg(VERBOSE, "intr stat=0x%x\n", istat);
 
-       if (istat & INFINIPATH_I_ERROR) {
+       if (unlikely(istat & INFINIPATH_I_ERROR)) {
                ipath_stats.sps_errints++;
                estat = ipath_read_kreg64(dd,
                                          dd->ipath_kregs->kr_errorstatus);
@@ -788,10 +866,18 @@ irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs)
                        ipath_dev_err(dd, "Read of error status failed "
                                      "(all bits set); ignoring\n");
                else
-                       handle_errors(dd, estat);
+                       if (handle_errors(dd, estat))
+                               /* force calling ipath_kreceive() */
+                               chk0rcv = 1;
        }
 
        if (istat & INFINIPATH_I_GPIO) {
+               /*
+                * Packets are available in the port 0 rcv queue.
+                * Eventually this needs to be generalized to check
+                * IPATH_GPIO_INTR, and the specific GPIO bit, if
+                * GPIO interrupts are used for anything else.
+                */
                if (unlikely(!(dd->ipath_flags & IPATH_GPIO_INTR))) {
                        u32 gpiostatus;
                        gpiostatus = ipath_read_kreg32(
@@ -804,27 +890,39 @@ irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs)
                else {
                        /* Clear GPIO status bit 2 */
                        ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear,
-                                        (u64) (1 << 2));
-
-                       /*
-                        * Packets are available in the port 0 rcv queue.
-                        * Eventually this needs to be generalized to check
-                        * IPATH_GPIO_INTR, and the specific GPIO bit, if
-                        * GPIO interrupts are used for anything else.
-                        */
-                       ipath_kreceive(dd);
+                                       (u64) (1 << 2));
+                       chk0rcv = 1;
                }
        }
+       chk0rcv |= istat & port0rbits;
 
        /*
-        * clear the ones we will deal with on this round
-        * We clear it early, mostly for receive interrupts, so we
-        * know the chip will have seen this by the time we process
-        * the queue, and will re-interrupt if necessary.  The processor
-        * itself won't take the interrupt again until we return.
+        * Clear the interrupt bits we found set, unless they are receive
+        * related, in which case we already cleared them above, and don't
+        * want to clear them again, because we might lose an interrupt.
+        * Clear it early, so we "know" know the chip will have seen this by
+        * the time we process the queue, and will re-interrupt if necessary.
+        * The processor itself won't take the interrupt again until we return.
         */
        ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat);
 
+       /*
+        * handle port0 receive  before checking for pio buffers available,
+        * since receives can overflow; piobuf waiters can afford a few
+        * extra cycles, since they were waiting anyway, and user's waiting
+        * for receive are at the bottom.
+        */
+       if (chk0rcv) {
+               ipath_kreceive(dd);
+               istat &= ~port0rbits;
+       }
+
+       if (istat & ((infinipath_i_rcvavail_mask <<
+                     INFINIPATH_I_RCVAVAIL_SHIFT)
+                    | (infinipath_i_rcvurg_mask <<
+                       INFINIPATH_I_RCVURG_SHIFT)))
+               handle_urcv(dd, istat);
+
        if (istat & INFINIPATH_I_SPIOBUFAVAIL) {
                clear_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
                ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
@@ -836,17 +934,7 @@ irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs)
                handle_layer_pioavail(dd);
        }
 
-       /*
-        * we check for both transition from empty to non-empty, and urgent
-        * packets (those with the interrupt bit set in the header)
-        */
-
-       if (istat & ((infinipath_i_rcvavail_mask <<
-                     INFINIPATH_I_RCVAVAIL_SHIFT)
-                    | (infinipath_i_rcvurg_mask <<
-                       INFINIPATH_I_RCVURG_SHIFT)))
-               handle_rcv(dd, istat);
-
+done:
        ret = IRQ_HANDLED;
 
 bail:
index 5d92d57b6f5479310a8206812d6d2516e526f71b..e9f374fb641ef6f1c8e0870e9e47551bc623188f 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef _IPATH_KERNEL_H
 #define _IPATH_KERNEL_H
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -61,9 +62,7 @@ struct ipath_portdata {
        /* rcvhdrq base, needs mmap before useful */
        void *port_rcvhdrq;
        /* kernel virtual address where hdrqtail is updated */
-       u64 *port_rcvhdrtail_kvaddr;
-       /* page * used for uaddr */
-       struct page *port_rcvhdrtail_pagep;
+       volatile __le64 *port_rcvhdrtail_kvaddr;
        /*
         * temp buffer for expected send setup, allocated at open, instead
         * of each setup call
@@ -78,11 +77,7 @@ struct ipath_portdata {
        dma_addr_t port_rcvegr_phys;
        /* mmap of hdrq, must fit in 44 bits */
        dma_addr_t port_rcvhdrq_phys;
-       /*
-        * the actual user address that we ipath_mlock'ed, so we can
-        * ipath_munlock it at close
-        */
-       unsigned long port_rcvhdrtail_uaddr;
+       dma_addr_t port_rcvhdrqtailaddr_phys;
        /*
         * number of opens on this instance (0 or 1; ignoring forks, dup,
         * etc. for now)
@@ -157,17 +152,11 @@ struct ipath_devdata {
        unsigned long ipath_physaddr;
        /* base of memory alloced for ipath_kregbase, for free */
        u64 *ipath_kregalloc;
-       /*
-        * version of kregbase that doesn't have high bits set (for 32 bit
-        * programs, so mmap64 44 bit works)
-        */
-       u64 __iomem *ipath_kregvirt;
        /*
         * virtual address where port0 rcvhdrqtail updated for this unit.
         * only written to by the chip, not the driver.
         */
        volatile __le64 *ipath_hdrqtailptr;
-       dma_addr_t ipath_dma_addr;
        /* ipath_cfgports pointers */
        struct ipath_portdata **ipath_pd;
        /* sk_buffs used by port 0 eager receive queue */
@@ -354,13 +343,17 @@ struct ipath_devdata {
        char *ipath_freezemsg;
        /* pci access data structure */
        struct pci_dev *pcidev;
-       struct cdev *cdev;
-       struct class_device *class_dev;
+       struct cdev *user_cdev;
+       struct cdev *diag_cdev;
+       struct class_device *user_class_dev;
+       struct class_device *diag_class_dev;
        /* timer used to prevent stats overflow, error throttling, etc. */
        struct timer_list ipath_stats_timer;
        /* check for stale messages in rcv queue */
        /* only allow one intr at a time. */
        unsigned long ipath_rcv_pending;
+       void *ipath_dummy_hdrq; /* used after port close */
+       dma_addr_t ipath_dummy_hdrq_phys;
 
        /*
         * Shadow copies of registers; size indicates read access size.
@@ -500,8 +493,11 @@ struct ipath_devdata {
        u16 ipath_lid;
        /* list of pkeys programmed; 0 if not set */
        u16 ipath_pkeys[4];
-       /* ASCII serial number, from flash */
-       u8 ipath_serial[12];
+       /*
+        * ASCII serial number, from flash, large enough for original
+        * all digit strings, and longer QLogic serial number format
+        */
+       u8 ipath_serial[16];
        /* human readable board version */
        u8 ipath_boardversion[80];
        /* chip major rev, from ipath_revision */
@@ -516,12 +512,12 @@ struct ipath_devdata {
        u8 ipath_pci_cacheline;
        /* LID mask control */
        u8 ipath_lmc;
-};
-
-extern volatile __le64 *ipath_port0_rcvhdrtail;
-extern dma_addr_t ipath_port0_rcvhdrtail_dma;
 
-#define IPATH_PORT0_RCVHDRTAIL_SIZE PAGE_SIZE
+       /* local link integrity counter */
+       u32 ipath_lli_counter;
+       /* local link integrity errors */
+       u32 ipath_lli_errors;
+};
 
 extern struct list_head ipath_dev_list;
 extern spinlock_t ipath_devs_lock;
@@ -537,7 +533,7 @@ extern int __ipath_verbs_piobufavail(struct ipath_devdata *);
 extern int __ipath_verbs_rcv(struct ipath_devdata *, void *, void *, u32);
 
 void ipath_layer_add(struct ipath_devdata *);
-void ipath_layer_del(struct ipath_devdata *);
+void ipath_layer_remove(struct ipath_devdata *);
 
 int ipath_init_chip(struct ipath_devdata *, int);
 int ipath_enable_wc(struct ipath_devdata *dd);
@@ -551,14 +547,14 @@ int ipath_cdev_init(int minor, char *name, struct file_operations *fops,
 void ipath_cdev_cleanup(struct cdev **cdevp,
                        struct class_device **class_devp);
 
-int ipath_diag_init(void);
-void ipath_diag_cleanup(void);
+int ipath_diag_add(struct ipath_devdata *);
+void ipath_diag_remove(struct ipath_devdata *);
 void ipath_diag_bringup_link(struct ipath_devdata *);
 
 extern wait_queue_head_t ipath_sma_state_wait;
 
 int ipath_user_add(struct ipath_devdata *dd);
-void ipath_user_del(struct ipath_devdata *dd);
+void ipath_user_remove(struct ipath_devdata *dd);
 
 struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd, gfp_t);
 
@@ -582,7 +578,7 @@ void ipath_disarm_piobufs(struct ipath_devdata *, unsigned first,
                          unsigned cnt);
 
 int ipath_create_rcvhdrq(struct ipath_devdata *, struct ipath_portdata *);
-void ipath_free_pddata(struct ipath_devdata *, u32, int);
+void ipath_free_pddata(struct ipath_devdata *, struct ipath_portdata *);
 
 int ipath_parse_ushort(const char *str, unsigned short *valp);
 
@@ -720,13 +716,8 @@ u64 ipath_read_kreg64_port(const struct ipath_devdata *, ipath_kreg,
  * @port: port number
  *
  * Return the contents of a register that is virtualized to be per port.
- * Prints a debug message and returns -1 on errors (not distinguishable from
- * valid contents at runtime; we may add a separate error variable at some
- * point).
- *
- * This is normally not used by the kernel, but may be for debugging, and
- * has a different implementation than user mode, which is why it's not in
- * _common.h.
+ * Returns -1 on errors (not distinguishable from valid contents at
+ * runtime; we may add a separate error variable at some point).
  */
 static inline u32 ipath_read_ureg32(const struct ipath_devdata *dd,
                                    ipath_ureg regno, int port)
@@ -842,9 +833,10 @@ extern struct mutex ipath_mutex;
 
 #define IPATH_DRV_NAME         "ipath_core"
 #define IPATH_MAJOR            233
+#define IPATH_USER_MINOR_BASE  0
 #define IPATH_SMA_MINOR                128
-#define IPATH_DIAG_MINOR       129
-#define IPATH_NMINORS          130
+#define IPATH_DIAG_MINOR_BASE  129
+#define IPATH_NMINORS          255
 
 #define ipath_dev_err(dd,fmt,...) \
        do { \
index 5ae8761f9dd2e8b3ed5dc86430ac9617c251aab9..46773c673a1af9f587eef5e9fd743503b98f476b 100644 (file)
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -120,6 +121,7 @@ int ipath_lkey_ok(struct ipath_lkey_table *rkt, struct ipath_sge *isge,
                  struct ib_sge *sge, int acc)
 {
        struct ipath_mregion *mr;
+       unsigned n, m;
        size_t off;
        int ret;
 
@@ -151,20 +153,22 @@ int ipath_lkey_ok(struct ipath_lkey_table *rkt, struct ipath_sge *isge,
        }
 
        off += mr->offset;
-       isge->mr = mr;
-       isge->m = 0;
-       isge->n = 0;
-       while (off >= mr->map[isge->m]->segs[isge->n].length) {
-               off -= mr->map[isge->m]->segs[isge->n].length;
-               isge->n++;
-               if (isge->n >= IPATH_SEGSZ) {
-                       isge->m++;
-                       isge->n = 0;
+       m = 0;
+       n = 0;
+       while (off >= mr->map[m]->segs[n].length) {
+               off -= mr->map[m]->segs[n].length;
+               n++;
+               if (n >= IPATH_SEGSZ) {
+                       m++;
+                       n = 0;
                }
        }
-       isge->vaddr = mr->map[isge->m]->segs[isge->n].vaddr + off;
-       isge->length = mr->map[isge->m]->segs[isge->n].length - off;
+       isge->mr = mr;
+       isge->vaddr = mr->map[m]->segs[n].vaddr + off;
+       isge->length = mr->map[m]->segs[n].length - off;
        isge->sge_length = sge->length;
+       isge->m = m;
+       isge->n = n;
 
        ret = 1;
 
@@ -189,6 +193,7 @@ int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss,
        struct ipath_lkey_table *rkt = &dev->lk_table;
        struct ipath_sge *sge = &ss->sge;
        struct ipath_mregion *mr;
+       unsigned n, m;
        size_t off;
        int ret;
 
@@ -206,20 +211,22 @@ int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss,
        }
 
        off += mr->offset;
-       sge->mr = mr;
-       sge->m = 0;
-       sge->n = 0;
-       while (off >= mr->map[sge->m]->segs[sge->n].length) {
-               off -= mr->map[sge->m]->segs[sge->n].length;
-               sge->n++;
-               if (sge->n >= IPATH_SEGSZ) {
-                       sge->m++;
-                       sge->n = 0;
+       m = 0;
+       n = 0;
+       while (off >= mr->map[m]->segs[n].length) {
+               off -= mr->map[m]->segs[n].length;
+               n++;
+               if (n >= IPATH_SEGSZ) {
+                       m++;
+                       n = 0;
                }
        }
-       sge->vaddr = mr->map[sge->m]->segs[sge->n].vaddr + off;
-       sge->length = mr->map[sge->m]->segs[sge->n].length - off;
+       sge->mr = mr;
+       sge->vaddr = mr->map[m]->segs[n].vaddr + off;
+       sge->length = mr->map[m]->segs[n].length - off;
        sge->sge_length = len;
+       sge->m = m;
+       sge->n = n;
        ss->sg_list = NULL;
        ss->num_sge = 1;
 
index 9ec4ac77b87f88df622d5b1d3bc3acf421d3c9b1..b28c6f81c73121b2195ef3667b30396d479ea9a1 100644 (file)
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -40,8 +41,8 @@
 #include <asm/byteorder.h>
 
 #include "ipath_kernel.h"
-#include "ips_common.h"
 #include "ipath_layer.h"
+#include "ipath_common.h"
 
 /* Acquire before ipath_devs_lock. */
 static DEFINE_MUTEX(ipath_layer_mutex);
@@ -299,9 +300,8 @@ bail:
 
 EXPORT_SYMBOL_GPL(ipath_layer_set_mtu);
 
-int ipath_set_sps_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
+int ipath_set_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
 {
-       ipath_stats.sps_lid[dd->ipath_unit] = arg;
        dd->ipath_lid = arg;
        dd->ipath_lmc = lmc;
 
@@ -315,7 +315,7 @@ int ipath_set_sps_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
        return 0;
 }
 
-EXPORT_SYMBOL_GPL(ipath_set_sps_lid);
+EXPORT_SYMBOL_GPL(ipath_set_lid);
 
 int ipath_layer_set_guid(struct ipath_devdata *dd, __be64 guid)
 {
@@ -340,18 +340,26 @@ u32 ipath_layer_get_nguid(struct ipath_devdata *dd)
 
 EXPORT_SYMBOL_GPL(ipath_layer_get_nguid);
 
-int ipath_layer_query_device(struct ipath_devdata *dd, u32 * vendor,
-                            u32 * boardrev, u32 * majrev, u32 * minrev)
+u32 ipath_layer_get_majrev(struct ipath_devdata *dd)
 {
-       *vendor = dd->ipath_vendorid;
-       *boardrev = dd->ipath_boardrev;
-       *majrev = dd->ipath_majrev;
-       *minrev = dd->ipath_minrev;
+       return dd->ipath_majrev;
+}
 
-       return 0;
+EXPORT_SYMBOL_GPL(ipath_layer_get_majrev);
+
+u32 ipath_layer_get_minrev(struct ipath_devdata *dd)
+{
+       return dd->ipath_minrev;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_get_minrev);
+
+u32 ipath_layer_get_pcirev(struct ipath_devdata *dd)
+{
+       return dd->ipath_pcirev;
 }
 
-EXPORT_SYMBOL_GPL(ipath_layer_query_device);
+EXPORT_SYMBOL_GPL(ipath_layer_get_pcirev);
 
 u32 ipath_layer_get_flags(struct ipath_devdata *dd)
 {
@@ -374,6 +382,13 @@ u16 ipath_layer_get_deviceid(struct ipath_devdata *dd)
 
 EXPORT_SYMBOL_GPL(ipath_layer_get_deviceid);
 
+u32 ipath_layer_get_vendorid(struct ipath_devdata *dd)
+{
+       return dd->ipath_vendorid;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_get_vendorid);
+
 u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd)
 {
        return dd->ipath_lastibcstat;
@@ -403,7 +418,7 @@ void ipath_layer_add(struct ipath_devdata *dd)
        mutex_unlock(&ipath_layer_mutex);
 }
 
-void ipath_layer_del(struct ipath_devdata *dd)
+void ipath_layer_remove(struct ipath_devdata *dd)
 {
        mutex_lock(&ipath_layer_mutex);
 
@@ -607,7 +622,7 @@ int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax)
                goto bail;
        }
 
-       ret = ipath_setrcvhdrsize(dd, NUM_OF_EXTRA_WORDS_IN_HEADER_QUEUE);
+       ret = ipath_setrcvhdrsize(dd, IPATH_HEADER_QUEUE_WORDS);
 
        if (ret < 0)
                goto bail;
@@ -616,9 +631,9 @@ int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax)
 
        if (*dd->ipath_statusp & IPATH_STATUS_IB_READY)
                intval |= IPATH_LAYER_INT_IF_UP;
-       if (ipath_stats.sps_lid[dd->ipath_unit])
+       if (dd->ipath_lid)
                intval |= IPATH_LAYER_INT_LID;
-       if (ipath_stats.sps_mlid[dd->ipath_unit])
+       if (dd->ipath_mlid)
                intval |= IPATH_LAYER_INT_BCAST;
        /*
         * do this on open, in case low level is already up and
@@ -884,7 +899,7 @@ static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
 /**
  * ipath_verbs_send - send a packet from the verbs layer
  * @dd: the infinipath device
- * @hdrwords: the number of works in the header
+ * @hdrwords: the number of words in the header
  * @hdr: the packet header
  * @len: the length of the packet in bytes
  * @ss: the SGE to send
@@ -1016,19 +1031,22 @@ int ipath_layer_get_counters(struct ipath_devdata *dd,
                ipath_snap_cntr(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
        cntrs->link_error_recovery_counter =
                ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
+       /*
+        * The link downed counter counts when the other side downs the
+        * connection.  We add in the number of times we downed the link
+        * due to local link integrity errors to compensate.
+        */
        cntrs->link_downed_counter =
                ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkdowncnt);
        cntrs->port_rcv_errors =
                ipath_snap_cntr(dd, dd->ipath_cregs->cr_rxdroppktcnt) +
                ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvovflcnt) +
                ipath_snap_cntr(dd, dd->ipath_cregs->cr_portovflcnt) +
-               ipath_snap_cntr(dd, dd->ipath_cregs->cr_errrcvflowctrlcnt) +
                ipath_snap_cntr(dd, dd->ipath_cregs->cr_err_rlencnt) +
                ipath_snap_cntr(dd, dd->ipath_cregs->cr_invalidrlencnt) +
                ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) +
                ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) +
                ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) +
-               ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlinkcnt) +
                ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt);
        cntrs->port_rcv_remphys_errors =
                ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt);
@@ -1042,6 +1060,8 @@ int ipath_layer_get_counters(struct ipath_devdata *dd,
                ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
        cntrs->port_rcv_packets =
                ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
+       cntrs->local_link_integrity_errors = dd->ipath_lli_errors;
+       cntrs->excessive_buffer_overrun_errors = 0; /* XXX */
 
        ret = 0;
 
@@ -1086,10 +1106,10 @@ int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr)
                }
 
        vlsllnh = *((__be16 *) hdr);
-       if (vlsllnh != htons(IPS_LRH_BTH)) {
+       if (vlsllnh != htons(IPATH_LRH_BTH)) {
                ipath_dbg("Warning: lrh[0] wrong (%x, not %x); "
                          "not sending\n", be16_to_cpu(vlsllnh),
-                         IPS_LRH_BTH);
+                         IPATH_LRH_BTH);
                ret = -EINVAL;
        }
        if (ret)
index 6fefd15bd2da6f2dab304c7a5be54f62ae3d9daf..71485096fcacabc1deb7cb5e8295e0020847fc2b 100644 (file)
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -54,6 +55,8 @@ struct ipath_layer_counters {
        u64 port_rcv_data;
        u64 port_xmit_packets;
        u64 port_rcv_packets;
+       u32 local_link_integrity_errors;
+       u32 excessive_buffer_overrun_errors;
 };
 
 /*
@@ -126,7 +129,7 @@ u16 ipath_layer_get_bcast(struct ipath_devdata *dd);
 u32 ipath_layer_get_cr_errpkey(struct ipath_devdata *dd);
 int ipath_layer_set_linkstate(struct ipath_devdata *dd, u8 state);
 int ipath_layer_set_mtu(struct ipath_devdata *, u16);
-int ipath_set_sps_lid(struct ipath_devdata *, u32, u8);
+int ipath_set_lid(struct ipath_devdata *, u32, u8);
 int ipath_layer_send_hdr(struct ipath_devdata *dd,
                         struct ether_header *hdr);
 int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
@@ -143,11 +146,13 @@ int ipath_layer_want_buffer(struct ipath_devdata *dd);
 int ipath_layer_set_guid(struct ipath_devdata *, __be64 guid);
 __be64 ipath_layer_get_guid(struct ipath_devdata *);
 u32 ipath_layer_get_nguid(struct ipath_devdata *);
-int ipath_layer_query_device(struct ipath_devdata *, u32 * vendor,
-                            u32 * boardrev, u32 * majrev, u32 * minrev);
+u32 ipath_layer_get_majrev(struct ipath_devdata *);
+u32 ipath_layer_get_minrev(struct ipath_devdata *);
+u32 ipath_layer_get_pcirev(struct ipath_devdata *);
 u32 ipath_layer_get_flags(struct ipath_devdata *dd);
 struct device *ipath_layer_get_device(struct ipath_devdata *dd);
 u16 ipath_layer_get_deviceid(struct ipath_devdata *dd);
+u32 ipath_layer_get_vendorid(struct ipath_devdata *);
 u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd);
 u32 ipath_layer_get_ibmtu(struct ipath_devdata *dd);
 int ipath_layer_enable_timer(struct ipath_devdata *dd);
index 1a9d0a2c33c37f6aa4d158f1505dc501548486ad..d3402341b7d0dddc154c1e765e6fe988ad72d091 100644 (file)
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -34,7 +35,7 @@
 
 #include "ipath_kernel.h"
 #include "ipath_verbs.h"
-#include "ips_common.h"
+#include "ipath_common.h"
 
 #define IB_SMP_UNSUP_VERSION   __constant_htons(0x0004)
 #define IB_SMP_UNSUP_METHOD    __constant_htons(0x0008)
@@ -84,7 +85,7 @@ static int recv_subn_get_nodeinfo(struct ib_smp *smp,
 {
        struct nodeinfo *nip = (struct nodeinfo *)&smp->data;
        struct ipath_devdata *dd = to_idev(ibdev)->dd;
-       u32 vendor, boardid, majrev, minrev;
+       u32 vendor, majrev, minrev;
 
        if (smp->attr_mod)
                smp->status |= IB_SMP_INVALID_FIELD;
@@ -104,9 +105,11 @@ static int recv_subn_get_nodeinfo(struct ib_smp *smp,
        nip->port_guid = nip->sys_guid;
        nip->partition_cap = cpu_to_be16(ipath_layer_get_npkeys(dd));
        nip->device_id = cpu_to_be16(ipath_layer_get_deviceid(dd));
-       ipath_layer_query_device(dd, &vendor, &boardid, &majrev, &minrev);
+       majrev = ipath_layer_get_majrev(dd);
+       minrev = ipath_layer_get_minrev(dd);
        nip->revision = cpu_to_be32((majrev << 16) | minrev);
        nip->local_port_num = port;
+       vendor = ipath_layer_get_vendorid(dd);
        nip->vendor_id[0] = 0;
        nip->vendor_id[1] = vendor >> 8;
        nip->vendor_id[2] = vendor;
@@ -215,7 +218,7 @@ static int recv_subn_get_portinfo(struct ib_smp *smp,
        /* P_KeyViolations are counted by hardware. */
        pip->pkey_violations =
                cpu_to_be16((ipath_layer_get_cr_errpkey(dev->dd) -
-                            dev->n_pkey_violations) & 0xFFFF);
+                            dev->z_pkey_violations) & 0xFFFF);
        pip->qkey_violations = cpu_to_be16(dev->qkey_violations);
        /* Only the hardware GUID is supported for now */
        pip->guid_cap = 1;
@@ -303,9 +306,9 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
        lid = be16_to_cpu(pip->lid);
        if (lid != ipath_layer_get_lid(dev->dd)) {
                /* Must be a valid unicast LID address. */
-               if (lid == 0 || lid >= IPS_MULTICAST_LID_BASE)
+               if (lid == 0 || lid >= IPATH_MULTICAST_LID_BASE)
                        goto err;
-               ipath_set_sps_lid(dev->dd, lid, pip->mkeyprot_resv_lmc & 7);
+               ipath_set_lid(dev->dd, lid, pip->mkeyprot_resv_lmc & 7);
                event.event = IB_EVENT_LID_CHANGE;
                ib_dispatch_event(&event);
        }
@@ -313,7 +316,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
        smlid = be16_to_cpu(pip->sm_lid);
        if (smlid != dev->sm_lid) {
                /* Must be a valid unicast LID address. */
-               if (smlid == 0 || smlid >= IPS_MULTICAST_LID_BASE)
+               if (smlid == 0 || smlid >= IPATH_MULTICAST_LID_BASE)
                        goto err;
                dev->sm_lid = smlid;
                event.event = IB_EVENT_SM_CHANGE;
@@ -389,7 +392,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
         * later.
         */
        if (pip->pkey_violations == 0)
-               dev->n_pkey_violations =
+               dev->z_pkey_violations =
                        ipath_layer_get_cr_errpkey(dev->dd);
 
        if (pip->qkey_violations == 0)
@@ -610,6 +613,9 @@ struct ib_pma_portcounters {
 #define IB_PMA_SEL_PORT_RCV_ERRORS             __constant_htons(0x0008)
 #define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS     __constant_htons(0x0010)
 #define IB_PMA_SEL_PORT_XMIT_DISCARDS          __constant_htons(0x0040)
+#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS __constant_htons(0x0200)
+#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS   __constant_htons(0x0400)
+#define IB_PMA_SEL_PORT_VL15_DROPPED           __constant_htons(0x0800)
 #define IB_PMA_SEL_PORT_XMIT_DATA              __constant_htons(0x1000)
 #define IB_PMA_SEL_PORT_RCV_DATA               __constant_htons(0x2000)
 #define IB_PMA_SEL_PORT_XMIT_PACKETS           __constant_htons(0x4000)
@@ -844,18 +850,22 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
        ipath_layer_get_counters(dev->dd, &cntrs);
 
        /* Adjust counters for any resets done. */
-       cntrs.symbol_error_counter -= dev->n_symbol_error_counter;
+       cntrs.symbol_error_counter -= dev->z_symbol_error_counter;
        cntrs.link_error_recovery_counter -=
-               dev->n_link_error_recovery_counter;
-       cntrs.link_downed_counter -= dev->n_link_downed_counter;
+               dev->z_link_error_recovery_counter;
+       cntrs.link_downed_counter -= dev->z_link_downed_counter;
        cntrs.port_rcv_errors += dev->rcv_errors;
-       cntrs.port_rcv_errors -= dev->n_port_rcv_errors;
-       cntrs.port_rcv_remphys_errors -= dev->n_port_rcv_remphys_errors;
-       cntrs.port_xmit_discards -= dev->n_port_xmit_discards;
-       cntrs.port_xmit_data -= dev->n_port_xmit_data;
-       cntrs.port_rcv_data -= dev->n_port_rcv_data;
-       cntrs.port_xmit_packets -= dev->n_port_xmit_packets;
-       cntrs.port_rcv_packets -= dev->n_port_rcv_packets;
+       cntrs.port_rcv_errors -= dev->z_port_rcv_errors;
+       cntrs.port_rcv_remphys_errors -= dev->z_port_rcv_remphys_errors;
+       cntrs.port_xmit_discards -= dev->z_port_xmit_discards;
+       cntrs.port_xmit_data -= dev->z_port_xmit_data;
+       cntrs.port_rcv_data -= dev->z_port_rcv_data;
+       cntrs.port_xmit_packets -= dev->z_port_xmit_packets;
+       cntrs.port_rcv_packets -= dev->z_port_rcv_packets;
+       cntrs.local_link_integrity_errors -=
+               dev->z_local_link_integrity_errors;
+       cntrs.excessive_buffer_overrun_errors -=
+               dev->z_excessive_buffer_overrun_errors;
 
        memset(pmp->data, 0, sizeof(pmp->data));
 
@@ -893,6 +903,16 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
        else
                p->port_xmit_discards =
                        cpu_to_be16((u16)cntrs.port_xmit_discards);
+       if (cntrs.local_link_integrity_errors > 0xFUL)
+               cntrs.local_link_integrity_errors = 0xFUL;
+       if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
+               cntrs.excessive_buffer_overrun_errors = 0xFUL;
+       p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
+               cntrs.excessive_buffer_overrun_errors;
+       if (dev->n_vl15_dropped > 0xFFFFUL)
+               p->vl15_dropped = __constant_cpu_to_be16(0xFFFF);
+       else
+               p->vl15_dropped = cpu_to_be16((u16)dev->n_vl15_dropped);
        if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
                p->port_xmit_data = __constant_cpu_to_be32(0xFFFFFFFF);
        else
@@ -928,10 +948,10 @@ static int recv_pma_get_portcounters_ext(struct ib_perf *pmp,
                                      &rpkts, &xwait);
 
        /* Adjust counters for any resets done. */
-       swords -= dev->n_port_xmit_data;
-       rwords -= dev->n_port_rcv_data;
-       spkts -= dev->n_port_xmit_packets;
-       rpkts -= dev->n_port_rcv_packets;
+       swords -= dev->z_port_xmit_data;
+       rwords -= dev->z_port_rcv_data;
+       spkts -= dev->z_port_xmit_packets;
+       rpkts -= dev->z_port_rcv_packets;
 
        memset(pmp->data, 0, sizeof(pmp->data));
 
@@ -967,37 +987,48 @@ static int recv_pma_set_portcounters(struct ib_perf *pmp,
        ipath_layer_get_counters(dev->dd, &cntrs);
 
        if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
-               dev->n_symbol_error_counter = cntrs.symbol_error_counter;
+               dev->z_symbol_error_counter = cntrs.symbol_error_counter;
 
        if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY)
-               dev->n_link_error_recovery_counter =
+               dev->z_link_error_recovery_counter =
                        cntrs.link_error_recovery_counter;
 
        if (p->counter_select & IB_PMA_SEL_LINK_DOWNED)
-               dev->n_link_downed_counter = cntrs.link_downed_counter;
+               dev->z_link_downed_counter = cntrs.link_downed_counter;
 
        if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS)
-               dev->n_port_rcv_errors =
+               dev->z_port_rcv_errors =
                        cntrs.port_rcv_errors + dev->rcv_errors;
 
        if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS)
-               dev->n_port_rcv_remphys_errors =
+               dev->z_port_rcv_remphys_errors =
                        cntrs.port_rcv_remphys_errors;
 
        if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS)
-               dev->n_port_xmit_discards = cntrs.port_xmit_discards;
+               dev->z_port_xmit_discards = cntrs.port_xmit_discards;
+
+       if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS)
+               dev->z_local_link_integrity_errors =
+                       cntrs.local_link_integrity_errors;
+
+       if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS)
+               dev->z_excessive_buffer_overrun_errors =
+                       cntrs.excessive_buffer_overrun_errors;
+
+       if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED)
+               dev->n_vl15_dropped = 0;
 
        if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)
-               dev->n_port_xmit_data = cntrs.port_xmit_data;
+               dev->z_port_xmit_data = cntrs.port_xmit_data;
 
        if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA)
-               dev->n_port_rcv_data = cntrs.port_rcv_data;
+               dev->z_port_rcv_data = cntrs.port_rcv_data;
 
        if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS)
-               dev->n_port_xmit_packets = cntrs.port_xmit_packets;
+               dev->z_port_xmit_packets = cntrs.port_xmit_packets;
 
        if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS)
-               dev->n_port_rcv_packets = cntrs.port_rcv_packets;
+               dev->z_port_rcv_packets = cntrs.port_rcv_packets;
 
        return recv_pma_get_portcounters(pmp, ibdev, port);
 }
@@ -1014,16 +1045,16 @@ static int recv_pma_set_portcounters_ext(struct ib_perf *pmp,
                                      &rpkts, &xwait);
 
        if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
-               dev->n_port_xmit_data = swords;
+               dev->z_port_xmit_data = swords;
 
        if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA)
-               dev->n_port_rcv_data = rwords;
+               dev->z_port_rcv_data = rwords;
 
        if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS)
-               dev->n_port_xmit_packets = spkts;
+               dev->z_port_xmit_packets = spkts;
 
        if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)
-               dev->n_port_rcv_packets = rpkts;
+               dev->z_port_rcv_packets = rpkts;
 
        if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)
                dev->n_unicast_xmit = 0;
@@ -1272,32 +1303,8 @@ int ipath_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
                      struct ib_wc *in_wc, struct ib_grh *in_grh,
                      struct ib_mad *in_mad, struct ib_mad *out_mad)
 {
-       struct ipath_ibdev *dev = to_idev(ibdev);
        int ret;
 
-       /*
-        * Snapshot current HW counters to "clear" them.
-        * This should be done when the driver is loaded except that for
-        * some reason we get a zillion errors when brining up the link.
-        */
-       if (dev->rcv_errors == 0) {
-               struct ipath_layer_counters cntrs;
-
-               ipath_layer_get_counters(to_idev(ibdev)->dd, &cntrs);
-               dev->rcv_errors++;
-               dev->n_symbol_error_counter = cntrs.symbol_error_counter;
-               dev->n_link_error_recovery_counter =
-                       cntrs.link_error_recovery_counter;
-               dev->n_link_downed_counter = cntrs.link_downed_counter;
-               dev->n_port_rcv_errors = cntrs.port_rcv_errors + 1;
-               dev->n_port_rcv_remphys_errors =
-                       cntrs.port_rcv_remphys_errors;
-               dev->n_port_xmit_discards = cntrs.port_xmit_discards;
-               dev->n_port_xmit_data = cntrs.port_xmit_data;
-               dev->n_port_rcv_data = cntrs.port_rcv_data;
-               dev->n_port_xmit_packets = cntrs.port_xmit_packets;
-               dev->n_port_rcv_packets = cntrs.port_rcv_packets;
-       }
        switch (in_mad->mad_hdr.mgmt_class) {
        case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
        case IB_MGMT_CLASS_SUBN_LID_ROUTED:
index 69ffec66d45da972b01f0118d4ecbec13209831a..4ac31a5da3308dd5c77555d7e86fd69abce9b023 100644 (file)
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -169,6 +170,11 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
        int n, m, i;
        struct ib_mr *ret;
 
+       if (region->length == 0) {
+               ret = ERR_PTR(-EINVAL);
+               goto bail;
+       }
+
        n = 0;
        list_for_each_entry(chunk, &region->chunk_list, list)
                n += chunk->nents;
index 02e8c75b24f6d175ebde6b42a52da54429d0459b..b83f66d8262cfb0830ea61e424ec68822123ce89 100644 (file)
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -44,7 +45,7 @@
 
 /*
  * This file contains all the chip-specific register information and
- * access functions for the PathScale PE800, the PCI-Express chip.
+ * access functions for the QLogic InfiniPath PE800, the PCI-Express chip.
  *
  * This lists the InfiniPath PE800 registers, in the actual chip layout.
  * This structure should never be directly accessed.
@@ -532,7 +533,7 @@ static int ipath_pe_boardname(struct ipath_devdata *dd, char *name,
        if (n)
                snprintf(name, namelen, "%s", n);
 
-       if (dd->ipath_majrev != 4 || dd->ipath_minrev != 1) {
+       if (dd->ipath_majrev != 4 || !dd->ipath_minrev || dd->ipath_minrev>2) {
                ipath_dev_err(dd, "Unsupported PE-800 revision %u.%u!\n",
                              dd->ipath_majrev, dd->ipath_minrev);
                ret = 1;
index 9f8855d970c87483d0f16faf358630e4bb73068f..83e557be591ed4165432cd6d51d0094d20cc719e 100644 (file)
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -34,7 +35,7 @@
 #include <linux/vmalloc.h>
 
 #include "ipath_verbs.h"
-#include "ips_common.h"
+#include "ipath_common.h"
 
 #define BITS_PER_PAGE          (PAGE_SIZE*BITS_PER_BYTE)
 #define BITS_PER_PAGE_MASK     (BITS_PER_PAGE-1)
@@ -332,10 +333,11 @@ static void ipath_reset_qp(struct ipath_qp *qp)
        qp->remote_qpn = 0;
        qp->qkey = 0;
        qp->qp_access_flags = 0;
+       clear_bit(IPATH_S_BUSY, &qp->s_flags);
        qp->s_hdrwords = 0;
        qp->s_psn = 0;
        qp->r_psn = 0;
-       atomic_set(&qp->msn, 0);
+       qp->r_msn = 0;
        if (qp->ibqp.qp_type == IB_QPT_RC) {
                qp->s_state = IB_OPCODE_RC_SEND_LAST;
                qp->r_state = IB_OPCODE_RC_SEND_LAST;
@@ -344,7 +346,8 @@ static void ipath_reset_qp(struct ipath_qp *qp)
                qp->r_state = IB_OPCODE_UC_SEND_LAST;
        }
        qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
-       qp->s_nak_state = 0;
+       qp->r_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
+       qp->r_nak_state = 0;
        qp->s_rnr_timeout = 0;
        qp->s_head = 0;
        qp->s_tail = 0;
@@ -362,10 +365,10 @@ static void ipath_reset_qp(struct ipath_qp *qp)
  * @qp: the QP to put into an error state
  *
  * Flushes both send and receive work queues.
- * QP r_rq.lock and s_lock should be held.
+ * QP s_lock should be held and interrupts disabled.
  */
 
-static void ipath_error_qp(struct ipath_qp *qp)
+void ipath_error_qp(struct ipath_qp *qp)
 {
        struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
        struct ib_wc wc;
@@ -408,12 +411,14 @@ static void ipath_error_qp(struct ipath_qp *qp)
        qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
 
        wc.opcode = IB_WC_RECV;
+       spin_lock(&qp->r_rq.lock);
        while (qp->r_rq.tail != qp->r_rq.head) {
                wc.wr_id = get_rwqe_ptr(&qp->r_rq, qp->r_rq.tail)->wr_id;
                if (++qp->r_rq.tail >= qp->r_rq.size)
                        qp->r_rq.tail = 0;
                ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
        }
+       spin_unlock(&qp->r_rq.lock);
 }
 
 /**
@@ -433,8 +438,7 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        unsigned long flags;
        int ret;
 
-       spin_lock_irqsave(&qp->r_rq.lock, flags);
-       spin_lock(&qp->s_lock);
+       spin_lock_irqsave(&qp->s_lock, flags);
 
        cur_state = attr_mask & IB_QP_CUR_STATE ?
                attr->cur_qp_state : qp->state;
@@ -446,7 +450,7 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 
        if (attr_mask & IB_QP_AV)
                if (attr->ah_attr.dlid == 0 ||
-                   attr->ah_attr.dlid >= IPS_MULTICAST_LID_BASE)
+                   attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE)
                        goto inval;
 
        if (attr_mask & IB_QP_PKEY_INDEX)
@@ -505,34 +509,19 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        }
 
        if (attr_mask & IB_QP_MIN_RNR_TIMER)
-               qp->s_min_rnr_timer = attr->min_rnr_timer;
+               qp->r_min_rnr_timer = attr->min_rnr_timer;
 
        if (attr_mask & IB_QP_QKEY)
                qp->qkey = attr->qkey;
 
-       if (attr_mask & IB_QP_PKEY_INDEX)
-               qp->s_pkey_index = attr->pkey_index;
-
        qp->state = new_state;
-       spin_unlock(&qp->s_lock);
-       spin_unlock_irqrestore(&qp->r_rq.lock, flags);
-
-       /*
-        * If QP1 changed to the RTS state, try to move to the link to INIT
-        * even if it was ACTIVE so the SM will reinitialize the SMA's
-        * state.
-        */
-       if (qp->ibqp.qp_num == 1 && new_state == IB_QPS_RTS) {
-               struct ipath_ibdev *dev = to_idev(ibqp->device);
+       spin_unlock_irqrestore(&qp->s_lock, flags);
 
-               ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKDOWN);
-       }
        ret = 0;
        goto bail;
 
 inval:
-       spin_unlock(&qp->s_lock);
-       spin_unlock_irqrestore(&qp->r_rq.lock, flags);
+       spin_unlock_irqrestore(&qp->s_lock, flags);
        ret = -EINVAL;
 
 bail:
@@ -566,7 +555,7 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        attr->sq_draining = 0;
        attr->max_rd_atomic = 1;
        attr->max_dest_rd_atomic = 1;
-       attr->min_rnr_timer = qp->s_min_rnr_timer;
+       attr->min_rnr_timer = qp->r_min_rnr_timer;
        attr->port_num = 1;
        attr->timeout = 0;
        attr->retry_cnt = qp->s_retry_cnt;
@@ -593,21 +582,17 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  * @qp: the queue pair to compute the AETH for
  *
  * Returns the AETH.
- *
- * The QP s_lock should be held.
  */
 __be32 ipath_compute_aeth(struct ipath_qp *qp)
 {
-       u32 aeth = atomic_read(&qp->msn) & IPS_MSN_MASK;
+       u32 aeth = qp->r_msn & IPATH_MSN_MASK;
 
-       if (qp->s_nak_state) {
-               aeth |= qp->s_nak_state << IPS_AETH_CREDIT_SHIFT;
-       } else if (qp->ibqp.srq) {
+       if (qp->ibqp.srq) {
                /*
                 * Shared receive queues don't generate credits.
                 * Set the credit field to the invalid value.
                 */
-               aeth |= IPS_AETH_CREDIT_INVAL << IPS_AETH_CREDIT_SHIFT;
+               aeth |= IPATH_AETH_CREDIT_INVAL << IPATH_AETH_CREDIT_SHIFT;
        } else {
                u32 min, max, x;
                u32 credits;
@@ -637,7 +622,7 @@ __be32 ipath_compute_aeth(struct ipath_qp *qp)
                        else
                                min = x;
                }
-               aeth |= x << IPS_AETH_CREDIT_SHIFT;
+               aeth |= x << IPATH_AETH_CREDIT_SHIFT;
        }
        return cpu_to_be32(aeth);
 }
@@ -663,12 +648,22 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
        size_t sz;
        struct ib_qp *ret;
 
-       if (init_attr->cap.max_send_sge > 255 ||
-           init_attr->cap.max_recv_sge > 255) {
+       if (init_attr->cap.max_send_sge > ib_ipath_max_sges ||
+           init_attr->cap.max_recv_sge > ib_ipath_max_sges ||
+           init_attr->cap.max_send_wr > ib_ipath_max_qp_wrs ||
+           init_attr->cap.max_recv_wr > ib_ipath_max_qp_wrs) {
                ret = ERR_PTR(-ENOMEM);
                goto bail;
        }
 
+       if (init_attr->cap.max_send_sge +
+           init_attr->cap.max_recv_sge +
+           init_attr->cap.max_send_wr +
+           init_attr->cap.max_recv_wr == 0) {
+               ret = ERR_PTR(-EINVAL);
+               goto bail;
+       }
+
        switch (init_attr->qp_type) {
        case IB_QPT_UC:
        case IB_QPT_RC:
@@ -686,18 +681,26 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
        case IB_QPT_GSI:
                qp = kmalloc(sizeof(*qp), GFP_KERNEL);
                if (!qp) {
+                       vfree(swq);
                        ret = ERR_PTR(-ENOMEM);
                        goto bail;
                }
-               qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
-               sz = sizeof(struct ipath_sge) *
-                       init_attr->cap.max_recv_sge +
-                       sizeof(struct ipath_rwqe);
-               qp->r_rq.wq = vmalloc(qp->r_rq.size * sz);
-               if (!qp->r_rq.wq) {
-                       kfree(qp);
-                       ret = ERR_PTR(-ENOMEM);
-                       goto bail;
+               if (init_attr->srq) {
+                       qp->r_rq.size = 0;
+                       qp->r_rq.max_sge = 0;
+                       qp->r_rq.wq = NULL;
+               } else {
+                       qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
+                       qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
+                       sz = (sizeof(struct ipath_sge) * qp->r_rq.max_sge) +
+                               sizeof(struct ipath_rwqe);
+                       qp->r_rq.wq = vmalloc(qp->r_rq.size * sz);
+                       if (!qp->r_rq.wq) {
+                               kfree(qp);
+                               vfree(swq);
+                               ret = ERR_PTR(-ENOMEM);
+                               goto bail;
+                       }
                }
 
                /*
@@ -708,9 +711,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
                spin_lock_init(&qp->r_rq.lock);
                atomic_set(&qp->refcount, 0);
                init_waitqueue_head(&qp->wait);
-               tasklet_init(&qp->s_task,
-                            init_attr->qp_type == IB_QPT_RC ?
-                            ipath_do_rc_send : ipath_do_uc_send,
+               tasklet_init(&qp->s_task, ipath_do_ruc_send,
                             (unsigned long)qp);
                INIT_LIST_HEAD(&qp->piowait);
                INIT_LIST_HEAD(&qp->timerwait);
@@ -718,7 +719,6 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
                qp->s_wq = swq;
                qp->s_size = init_attr->cap.max_send_wr + 1;
                qp->s_max_sge = init_attr->cap.max_send_sge;
-               qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
                qp->s_flags = init_attr->sq_sig_type == IB_SIGNAL_REQ_WR ?
                        1 << IPATH_S_SIGNAL_REQ_WR : 0;
                dev = to_idev(ibpd->device);
@@ -888,18 +888,18 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
  */
 void ipath_get_credit(struct ipath_qp *qp, u32 aeth)
 {
-       u32 credit = (aeth >> IPS_AETH_CREDIT_SHIFT) & IPS_AETH_CREDIT_MASK;
+       u32 credit = (aeth >> IPATH_AETH_CREDIT_SHIFT) & IPATH_AETH_CREDIT_MASK;
 
        /*
         * If the credit is invalid, we can send
         * as many packets as we like.  Otherwise, we have to
         * honor the credit field.
         */
-       if (credit == IPS_AETH_CREDIT_INVAL) {
+       if (credit == IPATH_AETH_CREDIT_INVAL)
                qp->s_lsn = (u32) -1;
-       else if (qp->s_lsn != (u32) -1) {
+       else if (qp->s_lsn != (u32) -1) {
                /* Compute new LSN (i.e., MSN + credit) */
-               credit = (aeth + credit_table[credit]) & IPS_MSN_MASK;
+               credit = (aeth + credit_table[credit]) & IPATH_MSN_MASK;
                if (ipath_cmp24(credit, qp->s_lsn) > 0)
                        qp->s_lsn = credit;
        }
index 493b1821a93426e8b69e09c54e1e4277edeba410..774d1615ce2f134f7058ffb72880190f25c1adde 100644 (file)
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -31,7 +32,7 @@
  */
 
 #include "ipath_verbs.h"
-#include "ips_common.h"
+#include "ipath_common.h"
 
 /* cut down ridiculously long IB macro names */
 #define OP(x) IB_OPCODE_RC_##x
  * @qp: the QP who's SGE we're restarting
  * @wqe: the work queue to initialize the QP's SGE from
  *
- * The QP s_lock should be held.
+ * The QP s_lock should be held and interrupts disabled.
  */
 static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe)
 {
        struct ipath_ibdev *dev;
        u32 len;
 
-       len = ((qp->s_psn - wqe->psn) & IPS_PSN_MASK) *
+       len = ((qp->s_psn - wqe->psn) & IPATH_PSN_MASK) *
                ib_mtu_enum_to_int(qp->path_mtu);
        qp->s_sge.sge = wqe->sg_list[0];
        qp->s_sge.sg_list = wqe->sg_list + 1;
@@ -72,11 +73,10 @@ static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe)
  * Return bth0 if constructed; otherwise, return 0.
  * Note the QP s_lock must be held.
  */
-static inline u32 ipath_make_rc_ack(struct ipath_qp *qp,
-                                   struct ipath_other_headers *ohdr,
-                                   u32 pmtu)
+u32 ipath_make_rc_ack(struct ipath_qp *qp,
+                     struct ipath_other_headers *ohdr,
+                     u32 pmtu)
 {
-       struct ipath_sge_state *ss;
        u32 hwords;
        u32 len;
        u32 bth0;
@@ -90,13 +90,12 @@ static inline u32 ipath_make_rc_ack(struct ipath_qp *qp,
         */
        switch (qp->s_ack_state) {
        case OP(RDMA_READ_REQUEST):
-               ss = &qp->s_rdma_sge;
+               qp->s_cur_sge = &qp->s_rdma_sge;
                len = qp->s_rdma_len;
                if (len > pmtu) {
                        len = pmtu;
                        qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
-               }
-               else
+               } else
                        qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
                qp->s_rdma_len -= len;
                bth0 = qp->s_ack_state << 24;
@@ -108,7 +107,7 @@ static inline u32 ipath_make_rc_ack(struct ipath_qp *qp,
                qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
                /* FALLTHROUGH */
        case OP(RDMA_READ_RESPONSE_MIDDLE):
-               ss = &qp->s_rdma_sge;
+               qp->s_cur_sge = &qp->s_rdma_sge;
                len = qp->s_rdma_len;
                if (len > pmtu)
                        len = pmtu;
@@ -127,41 +126,50 @@ static inline u32 ipath_make_rc_ack(struct ipath_qp *qp,
                 * We have to prevent new requests from changing
                 * the r_sge state while a ipath_verbs_send()
                 * is in progress.
-                * Changing r_state allows the receiver
-                * to continue processing new packets.
-                * We do it here now instead of above so
-                * that we are sure the packet was sent before
-                * changing the state.
                 */
-               qp->r_state = OP(RDMA_READ_RESPONSE_LAST);
                qp->s_ack_state = OP(ACKNOWLEDGE);
-               return 0;
+               bth0 = 0;
+               goto bail;
 
        case OP(COMPARE_SWAP):
        case OP(FETCH_ADD):
-               ss = NULL;
+               qp->s_cur_sge = NULL;
                len = 0;
-               qp->r_state = OP(SEND_LAST);
-               qp->s_ack_state = OP(ACKNOWLEDGE);
-               bth0 = IB_OPCODE_ATOMIC_ACKNOWLEDGE << 24;
+               /*
+                * Set the s_ack_state so the receive interrupt handler
+                * won't try to send an ACK (out of order) until this one
+                * is actually sent.
+                */
+               qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
+               bth0 = OP(ATOMIC_ACKNOWLEDGE) << 24;
                ohdr->u.at.aeth = ipath_compute_aeth(qp);
-               ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->s_ack_atomic);
+               ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->r_atomic_data);
                hwords += sizeof(ohdr->u.at) / 4;
                break;
 
        default:
                /* Send a regular ACK. */
-               ss = NULL;
+               qp->s_cur_sge = NULL;
                len = 0;
-               qp->s_ack_state = OP(ACKNOWLEDGE);
-               bth0 = qp->s_ack_state << 24;
-               ohdr->u.aeth = ipath_compute_aeth(qp);
+               /*
+                * Set the s_ack_state so the receive interrupt handler
+                * won't try to send an ACK (out of order) until this one
+                * is actually sent.
+                */
+               qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
+               bth0 = OP(ACKNOWLEDGE) << 24;
+               if (qp->s_nak_state)
+                       ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
+                                                   (qp->s_nak_state <<
+                                                    IPATH_AETH_CREDIT_SHIFT));
+               else
+                       ohdr->u.aeth = ipath_compute_aeth(qp);
                hwords++;
        }
        qp->s_hdrwords = hwords;
-       qp->s_cur_sge = ss;
        qp->s_cur_size = len;
 
+bail:
        return bth0;
 }
 
@@ -174,11 +182,11 @@ static inline u32 ipath_make_rc_ack(struct ipath_qp *qp,
  * @bth2p: pointer to the BTH PSN word
  *
  * Return 1 if constructed; otherwise, return 0.
- * Note the QP s_lock must be held.
+ * Note the QP s_lock must be held and interrupts disabled.
  */
-static inline int ipath_make_rc_req(struct ipath_qp *qp,
-                                   struct ipath_other_headers *ohdr,
-                                   u32 pmtu, u32 *bth0p, u32 *bth2p)
+int ipath_make_rc_req(struct ipath_qp *qp,
+                     struct ipath_other_headers *ohdr,
+                     u32 pmtu, u32 *bth0p, u32 *bth2p)
 {
        struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
        struct ipath_sge_state *ss;
@@ -257,7 +265,7 @@ static inline int ipath_make_rc_req(struct ipath_qp *qp,
                        break;
 
                case IB_WR_RDMA_WRITE:
-                       if (newreq)
+                       if (newreq && qp->s_lsn != (u32) -1)
                                qp->s_lsn++;
                        /* FALLTHROUGH */
                case IB_WR_RDMA_WRITE_WITH_IMM:
@@ -283,8 +291,7 @@ static inline int ipath_make_rc_req(struct ipath_qp *qp,
                        else {
                                qp->s_state =
                                        OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
-                               /* Immediate data comes
-                                * after RETH */
+                               /* Immediate data comes after RETH */
                                ohdr->u.rc.imm_data = wqe->wr.imm_data;
                                hwords += 1;
                                if (wqe->wr.send_flags & IB_SEND_SOLICITED)
@@ -304,7 +311,8 @@ static inline int ipath_make_rc_req(struct ipath_qp *qp,
                        qp->s_state = OP(RDMA_READ_REQUEST);
                        hwords += sizeof(ohdr->u.rc.reth) / 4;
                        if (newreq) {
-                               qp->s_lsn++;
+                               if (qp->s_lsn != (u32) -1)
+                                       qp->s_lsn++;
                                /*
                                 * Adjust s_next_psn to count the
                                 * expected number of responses.
@@ -335,7 +343,8 @@ static inline int ipath_make_rc_req(struct ipath_qp *qp,
                                wqe->wr.wr.atomic.compare_add);
                        hwords += sizeof(struct ib_atomic_eth) / 4;
                        if (newreq) {
-                               qp->s_lsn++;
+                               if (qp->s_lsn != (u32) -1)
+                                       qp->s_lsn++;
                                wqe->lpsn = wqe->psn;
                        }
                        if (++qp->s_cur == qp->s_size)
@@ -352,9 +361,14 @@ static inline int ipath_make_rc_req(struct ipath_qp *qp,
                        if (qp->s_tail >= qp->s_size)
                                qp->s_tail = 0;
                }
-               bth2 |= qp->s_psn++ & IPS_PSN_MASK;
+               bth2 |= qp->s_psn++ & IPATH_PSN_MASK;
                if ((int)(qp->s_psn - qp->s_next_psn) > 0)
                        qp->s_next_psn = qp->s_psn;
+               /*
+                * Put the QP on the pending list so lost ACKs will cause
+                * a retry.  More than one request can be pending so the
+                * QP may already be on the dev->pending list.
+                */
                spin_lock(&dev->pending_lock);
                if (list_empty(&qp->timerwait))
                        list_add_tail(&qp->timerwait,
@@ -364,8 +378,8 @@ static inline int ipath_make_rc_req(struct ipath_qp *qp,
 
        case OP(RDMA_READ_RESPONSE_FIRST):
                /*
-                * This case can only happen if a send is restarted.  See
-                * ipath_restart_rc().
+                * This case can only happen if a send is restarted.
+                * See ipath_restart_rc().
                 */
                ipath_init_restart(qp, wqe);
                /* FALLTHROUGH */
@@ -373,7 +387,7 @@ static inline int ipath_make_rc_req(struct ipath_qp *qp,
                qp->s_state = OP(SEND_MIDDLE);
                /* FALLTHROUGH */
        case OP(SEND_MIDDLE):
-               bth2 = qp->s_psn++ & IPS_PSN_MASK;
+               bth2 = qp->s_psn++ & IPATH_PSN_MASK;
                if ((int)(qp->s_psn - qp->s_next_psn) > 0)
                        qp->s_next_psn = qp->s_psn;
                ss = &qp->s_sge;
@@ -415,7 +429,7 @@ static inline int ipath_make_rc_req(struct ipath_qp *qp,
                qp->s_state = OP(RDMA_WRITE_MIDDLE);
                /* FALLTHROUGH */
        case OP(RDMA_WRITE_MIDDLE):
-               bth2 = qp->s_psn++ & IPS_PSN_MASK;
+               bth2 = qp->s_psn++ & IPATH_PSN_MASK;
                if ((int)(qp->s_psn - qp->s_next_psn) > 0)
                        qp->s_next_psn = qp->s_psn;
                ss = &qp->s_sge;
@@ -452,7 +466,7 @@ static inline int ipath_make_rc_req(struct ipath_qp *qp,
                 * See ipath_restart_rc().
                 */
                ipath_init_restart(qp, wqe);
-               len = ((qp->s_psn - wqe->psn) & IPS_PSN_MASK) * pmtu;
+               len = ((qp->s_psn - wqe->psn) & IPATH_PSN_MASK) * pmtu;
                ohdr->u.rc.reth.vaddr =
                        cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len);
                ohdr->u.rc.reth.rkey =
@@ -460,7 +474,7 @@ static inline int ipath_make_rc_req(struct ipath_qp *qp,
                ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len);
                qp->s_state = OP(RDMA_READ_REQUEST);
                hwords += sizeof(ohdr->u.rc.reth) / 4;
-               bth2 = qp->s_psn++ & IPS_PSN_MASK;
+               bth2 = qp->s_psn++ & IPATH_PSN_MASK;
                if ((int)(qp->s_psn - qp->s_next_psn) > 0)
                        qp->s_next_psn = qp->s_psn;
                ss = NULL;
@@ -496,189 +510,169 @@ done:
        return 0;
 }
 
-static inline void ipath_make_rc_grh(struct ipath_qp *qp,
-                                    struct ib_global_route *grh,
-                                    u32 nwords)
-{
-       struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-
-       /* GRH header size in 32-bit words. */
-       qp->s_hdrwords += 10;
-       qp->s_hdr.u.l.grh.version_tclass_flow =
-               cpu_to_be32((6 << 28) |
-                           (grh->traffic_class << 20) |
-                           grh->flow_label);
-       qp->s_hdr.u.l.grh.paylen =
-               cpu_to_be16(((qp->s_hdrwords - 12) + nwords +
-                            SIZE_OF_CRC) << 2);
-       /* next_hdr is defined by C8-7 in ch. 8.4.1 */
-       qp->s_hdr.u.l.grh.next_hdr = 0x1B;
-       qp->s_hdr.u.l.grh.hop_limit = grh->hop_limit;
-       /* The SGID is 32-bit aligned. */
-       qp->s_hdr.u.l.grh.sgid.global.subnet_prefix = dev->gid_prefix;
-       qp->s_hdr.u.l.grh.sgid.global.interface_id =
-               ipath_layer_get_guid(dev->dd);
-       qp->s_hdr.u.l.grh.dgid = grh->dgid;
-}
-
 /**
- * ipath_do_rc_send - perform a send on an RC QP
- * @data: contains a pointer to the QP
+ * send_rc_ack - Construct an ACK packet and send it
+ * @qp: a pointer to the QP
  *
- * Process entries in the send work queue until credit or queue is
- * exhausted.  Only allow one CPU to send a packet per QP (tasklet).
- * Otherwise, after we drop the QP s_lock, two threads could send
- * packets out of order.
+ * This is called from ipath_rc_rcv() and only uses the receive
+ * side QP state.
+ * Note that RDMA reads are handled in the send side QP state and tasklet.
  */
-void ipath_do_rc_send(unsigned long data)
+static void send_rc_ack(struct ipath_qp *qp)
 {
-       struct ipath_qp *qp = (struct ipath_qp *)data;
        struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-       unsigned long flags;
        u16 lrh0;
-       u32 nwords;
-       u32 extra_bytes;
        u32 bth0;
-       u32 bth2;
-       u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
+       u32 hwords;
+       struct ipath_ib_header hdr;
        struct ipath_other_headers *ohdr;
 
-       if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags))
-               goto bail;
-
-       if (unlikely(qp->remote_ah_attr.dlid ==
-                    ipath_layer_get_lid(dev->dd))) {
-               struct ib_wc wc;
-
-               /*
-                * Pass in an uninitialized ib_wc to be consistent with
-                * other places where ipath_ruc_loopback() is called.
-                */
-               ipath_ruc_loopback(qp, &wc);
-               goto clear;
-       }
-
-       ohdr = &qp->s_hdr.u.oth;
-       if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
-               ohdr = &qp->s_hdr.u.l.oth;
-
-again:
-       /* Check for a constructed packet to be sent. */
-       if (qp->s_hdrwords != 0) {
-               /*
-                * If no PIO bufs are available, return.  An interrupt will
-                * call ipath_ib_piobufavail() when one is available.
-                */
-               _VERBS_INFO("h %u %p\n", qp->s_hdrwords, &qp->s_hdr);
-               _VERBS_INFO("d %u %p %u %p %u %u %u %u\n", qp->s_cur_size,
-                           qp->s_cur_sge->sg_list,
-                           qp->s_cur_sge->num_sge,
-                           qp->s_cur_sge->sge.vaddr,
-                           qp->s_cur_sge->sge.sge_length,
-                           qp->s_cur_sge->sge.length,
-                           qp->s_cur_sge->sge.m,
-                           qp->s_cur_sge->sge.n);
-               if (ipath_verbs_send(dev->dd, qp->s_hdrwords,
-                                    (u32 *) &qp->s_hdr, qp->s_cur_size,
-                                    qp->s_cur_sge)) {
-                       ipath_no_bufs_available(qp, dev);
-                       goto bail;
-               }
-               dev->n_unicast_xmit++;
-               /* Record that we sent the packet and s_hdr is empty. */
-               qp->s_hdrwords = 0;
-       }
-
-       /*
-        * The lock is needed to synchronize between setting
-        * qp->s_ack_state, resend timer, and post_send().
-        */
-       spin_lock_irqsave(&qp->s_lock, flags);
-
-       /* Sending responses has higher priority over sending requests. */
-       if (qp->s_ack_state != OP(ACKNOWLEDGE) &&
-           (bth0 = ipath_make_rc_ack(qp, ohdr, pmtu)) != 0)
-               bth2 = qp->s_ack_psn++ & IPS_PSN_MASK;
-       else if (!ipath_make_rc_req(qp, ohdr, pmtu, &bth0, &bth2))
-               goto done;
-
-       spin_unlock_irqrestore(&qp->s_lock, flags);
-
        /* Construct the header. */
-       extra_bytes = (4 - qp->s_cur_size) & 3;
-       nwords = (qp->s_cur_size + extra_bytes) >> 2;
-       lrh0 = IPS_LRH_BTH;
+       ohdr = &hdr.u.oth;
+       lrh0 = IPATH_LRH_BTH;
+       /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
+       hwords = 6;
        if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
-               ipath_make_rc_grh(qp, &qp->remote_ah_attr.grh, nwords);
-               lrh0 = IPS_LRH_GRH;
+               hwords += ipath_make_grh(dev, &hdr.u.l.grh,
+                                        &qp->remote_ah_attr.grh,
+                                        hwords, 0);
+               ohdr = &hdr.u.l.oth;
+               lrh0 = IPATH_LRH_GRH;
        }
+       /* read pkey_index w/o lock (its atomic) */
+       bth0 = ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
+       if (qp->r_nak_state)
+               ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
+                                           (qp->r_nak_state <<
+                                            IPATH_AETH_CREDIT_SHIFT));
+       else
+               ohdr->u.aeth = ipath_compute_aeth(qp);
+       if (qp->r_ack_state >= OP(COMPARE_SWAP)) {
+               bth0 |= OP(ATOMIC_ACKNOWLEDGE) << 24;
+               ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->r_atomic_data);
+               hwords += sizeof(ohdr->u.at.atomic_ack_eth) / 4;
+       } else
+               bth0 |= OP(ACKNOWLEDGE) << 24;
        lrh0 |= qp->remote_ah_attr.sl << 4;
-       qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
-       qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
-       qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords +
-                                      SIZE_OF_CRC);
-       qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
-       bth0 |= ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
-       bth0 |= extra_bytes << 20;
+       hdr.lrh[0] = cpu_to_be16(lrh0);
+       hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
+       hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
+       hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
        ohdr->bth[0] = cpu_to_be32(bth0);
        ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
-       ohdr->bth[2] = cpu_to_be32(bth2);
+       ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK);
 
-       /* Check for more work to do. */
-       goto again;
+       /*
+        * If we can send the ACK, clear the ACK state.
+        */
+       if (ipath_verbs_send(dev->dd, hwords, (u32 *) &hdr, 0, NULL) == 0) {
+               qp->r_ack_state = OP(ACKNOWLEDGE);
+               dev->n_unicast_xmit++;
+       } else {
+               /*
+                * We are out of PIO buffers at the moment.
+                * Pass responsibility for sending the ACK to the
+                * send tasklet so that when a PIO buffer becomes
+                * available, the ACK is sent ahead of other outgoing
+                * packets.
+                */
+               dev->n_rc_qacks++;
+               spin_lock_irq(&qp->s_lock);
+               /* Don't coalesce if a RDMA read or atomic is pending. */
+               if (qp->s_ack_state == OP(ACKNOWLEDGE) ||
+                   qp->s_ack_state < OP(RDMA_READ_REQUEST)) {
+                       qp->s_ack_state = qp->r_ack_state;
+                       qp->s_nak_state = qp->r_nak_state;
+                       qp->s_ack_psn = qp->r_ack_psn;
+                       qp->r_ack_state = OP(ACKNOWLEDGE);
+               }
+               spin_unlock_irq(&qp->s_lock);
 
-done:
-       spin_unlock_irqrestore(&qp->s_lock, flags);
-clear:
-       clear_bit(IPATH_S_BUSY, &qp->s_flags);
-bail:
-       return;
+               /* Call ipath_do_rc_send() in another thread. */
+               tasklet_hi_schedule(&qp->s_task);
+       }
 }
 
-static void send_rc_ack(struct ipath_qp *qp)
+/**
+ * reset_psn - reset the QP state to send starting from PSN
+ * @qp: the QP
+ * @psn: the packet sequence number to restart at
+ *
+ * This is called from ipath_rc_rcv() to process an incoming RC ACK
+ * for the given QP.
+ * Called at interrupt level with the QP s_lock held.
+ */
+static void reset_psn(struct ipath_qp *qp, u32 psn)
 {
-       struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-       u16 lrh0;
-       u32 bth0;
-       struct ipath_other_headers *ohdr;
+       u32 n = qp->s_last;
+       struct ipath_swqe *wqe = get_swqe_ptr(qp, n);
+       u32 opcode;
 
-       /* Construct the header. */
-       ohdr = &qp->s_hdr.u.oth;
-       lrh0 = IPS_LRH_BTH;
-       /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
-       qp->s_hdrwords = 6;
-       if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
-               ipath_make_rc_grh(qp, &qp->remote_ah_attr.grh, 0);
-               ohdr = &qp->s_hdr.u.l.oth;
-               lrh0 = IPS_LRH_GRH;
+       qp->s_cur = n;
+
+       /*
+        * If we are starting the request from the beginning,
+        * let the normal send code handle initialization.
+        */
+       if (ipath_cmp24(psn, wqe->psn) <= 0) {
+               qp->s_state = OP(SEND_LAST);
+               goto done;
        }
-       bth0 = ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
-       ohdr->u.aeth = ipath_compute_aeth(qp);
-       if (qp->s_ack_state >= OP(COMPARE_SWAP)) {
-               bth0 |= IB_OPCODE_ATOMIC_ACKNOWLEDGE << 24;
-               ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->s_ack_atomic);
-               qp->s_hdrwords += sizeof(ohdr->u.at.atomic_ack_eth) / 4;
+
+       /* Find the work request opcode corresponding to the given PSN. */
+       opcode = wqe->wr.opcode;
+       for (;;) {
+               int diff;
+
+               if (++n == qp->s_size)
+                       n = 0;
+               if (n == qp->s_tail)
+                       break;
+               wqe = get_swqe_ptr(qp, n);
+               diff = ipath_cmp24(psn, wqe->psn);
+               if (diff < 0)
+                       break;
+               qp->s_cur = n;
+               /*
+                * If we are starting the request from the beginning,
+                * let the normal send code handle initialization.
+                */
+               if (diff == 0) {
+                       qp->s_state = OP(SEND_LAST);
+                       goto done;
+               }
+               opcode = wqe->wr.opcode;
        }
-       else
-               bth0 |= OP(ACKNOWLEDGE) << 24;
-       lrh0 |= qp->remote_ah_attr.sl << 4;
-       qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
-       qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
-       qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + SIZE_OF_CRC);
-       qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
-       ohdr->bth[0] = cpu_to_be32(bth0);
-       ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
-       ohdr->bth[2] = cpu_to_be32(qp->s_ack_psn & IPS_PSN_MASK);
 
        /*
-        * If we can send the ACK, clear the ACK state.
+        * Set the state to restart in the middle of a request.
+        * Don't change the s_sge, s_cur_sge, or s_cur_size.
+        * See ipath_do_rc_send().
         */
-       if (ipath_verbs_send(dev->dd, qp->s_hdrwords, (u32 *) &qp->s_hdr,
-                            0, NULL) == 0) {
-               qp->s_ack_state = OP(ACKNOWLEDGE);
-               dev->n_rc_qacks++;
-               dev->n_unicast_xmit++;
+       switch (opcode) {
+       case IB_WR_SEND:
+       case IB_WR_SEND_WITH_IMM:
+               qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
+               break;
+
+       case IB_WR_RDMA_WRITE:
+       case IB_WR_RDMA_WRITE_WITH_IMM:
+               qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
+               break;
+
+       case IB_WR_RDMA_READ:
+               qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
+               break;
+
+       default:
+               /*
+                * This case shouldn't happen since its only
+                * one PSN per req.
+                */
+               qp->s_state = OP(SEND_LAST);
        }
+done:
+       qp->s_psn = psn;
 }
 
 /**
@@ -687,13 +681,12 @@ static void send_rc_ack(struct ipath_qp *qp)
  * @psn: packet sequence number for the request
  * @wc: the work completion request
  *
- * The QP s_lock should be held.
+ * The QP s_lock should be held and interrupts disabled.
  */
 void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
 {
        struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
        struct ipath_ibdev *dev;
-       u32 n;
 
        /*
         * If there are no requests pending, we are done.
@@ -735,62 +728,7 @@ void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
        else
                dev->n_rc_resends += (int)qp->s_psn - (int)psn;
 
-       /*
-        * If we are starting the request from the beginning, let the normal
-        * send code handle initialization.
-        */
-       qp->s_cur = qp->s_last;
-       if (ipath_cmp24(psn, wqe->psn) <= 0) {
-               qp->s_state = OP(SEND_LAST);
-               qp->s_psn = wqe->psn;
-       } else {
-               n = qp->s_cur;
-               for (;;) {
-                       if (++n == qp->s_size)
-                               n = 0;
-                       if (n == qp->s_tail) {
-                               if (ipath_cmp24(psn, qp->s_next_psn) >= 0) {
-                                       qp->s_cur = n;
-                                       wqe = get_swqe_ptr(qp, n);
-                               }
-                               break;
-                       }
-                       wqe = get_swqe_ptr(qp, n);
-                       if (ipath_cmp24(psn, wqe->psn) < 0)
-                               break;
-                       qp->s_cur = n;
-               }
-               qp->s_psn = psn;
-
-               /*
-                * Reset the state to restart in the middle of a request.
-                * Don't change the s_sge, s_cur_sge, or s_cur_size.
-                * See ipath_do_rc_send().
-                */
-               switch (wqe->wr.opcode) {
-               case IB_WR_SEND:
-               case IB_WR_SEND_WITH_IMM:
-                       qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
-                       break;
-
-               case IB_WR_RDMA_WRITE:
-               case IB_WR_RDMA_WRITE_WITH_IMM:
-                       qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
-                       break;
-
-               case IB_WR_RDMA_READ:
-                       qp->s_state =
-                               OP(RDMA_READ_RESPONSE_MIDDLE);
-                       break;
-
-               default:
-                       /*
-                        * This case shouldn't happen since its only
-                        * one PSN per req.
-                        */
-                       qp->s_state = OP(SEND_LAST);
-               }
-       }
+       reset_psn(qp, psn);
 
 done:
        tasklet_hi_schedule(&qp->s_task);
@@ -799,77 +737,15 @@ bail:
        return;
 }
 
-/**
- * reset_psn - reset the QP state to send starting from PSN
- * @qp: the QP
- * @psn: the packet sequence number to restart at
- *
- * This is called from ipath_rc_rcv() to process an incoming RC ACK
- * for the given QP.
- * Called at interrupt level with the QP s_lock held.
- */
-static void reset_psn(struct ipath_qp *qp, u32 psn)
-{
-       struct ipath_swqe *wqe;
-       u32 n;
-
-       n = qp->s_cur;
-       wqe = get_swqe_ptr(qp, n);
-       for (;;) {
-               if (++n == qp->s_size)
-                       n = 0;
-               if (n == qp->s_tail) {
-                       if (ipath_cmp24(psn, qp->s_next_psn) >= 0) {
-                               qp->s_cur = n;
-                               wqe = get_swqe_ptr(qp, n);
-                       }
-                       break;
-               }
-               wqe = get_swqe_ptr(qp, n);
-               if (ipath_cmp24(psn, wqe->psn) < 0)
-                       break;
-               qp->s_cur = n;
-       }
-       qp->s_psn = psn;
-
-       /*
-        * Set the state to restart in the middle of a
-        * request.  Don't change the s_sge, s_cur_sge, or
-        * s_cur_size.  See ipath_do_rc_send().
-        */
-       switch (wqe->wr.opcode) {
-       case IB_WR_SEND:
-       case IB_WR_SEND_WITH_IMM:
-               qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
-               break;
-
-       case IB_WR_RDMA_WRITE:
-       case IB_WR_RDMA_WRITE_WITH_IMM:
-               qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
-               break;
-
-       case IB_WR_RDMA_READ:
-               qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
-               break;
-
-       default:
-               /*
-                * This case shouldn't happen since its only
-                * one PSN per req.
-                */
-               qp->s_state = OP(SEND_LAST);
-       }
-}
-
 /**
  * do_rc_ack - process an incoming RC ACK
  * @qp: the QP the ACK came in on
  * @psn: the packet sequence number of the ACK
  * @opcode: the opcode of the request that resulted in the ACK
  *
- * This is called from ipath_rc_rcv() to process an incoming RC ACK
+ * This is called from ipath_rc_rcv_resp() to process an incoming RC ACK
  * for the given QP.
- * Called at interrupt level with the QP s_lock held.
+ * Called at interrupt level with the QP s_lock held and interrupts disabled.
  * Returns 1 if OK, 0 if current operation should be aborted (NAK).
  */
 static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
@@ -1006,26 +882,16 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
                if (qp->s_last == qp->s_tail)
                        goto bail;
 
-               /* The last valid PSN seen is the previous request's. */
-               qp->s_last_psn = wqe->psn - 1;
+               /* The last valid PSN is the previous PSN. */
+               qp->s_last_psn = psn - 1;
 
                dev->n_rc_resends += (int)qp->s_psn - (int)psn;
 
-               /*
-                * If we are starting the request from the beginning, let
-                * the normal send code handle initialization.
-                */
-               qp->s_cur = qp->s_last;
-               wqe = get_swqe_ptr(qp, qp->s_cur);
-               if (ipath_cmp24(psn, wqe->psn) <= 0) {
-                       qp->s_state = OP(SEND_LAST);
-                       qp->s_psn = wqe->psn;
-               } else
-                       reset_psn(qp, psn);
+               reset_psn(qp, psn);
 
                qp->s_rnr_timeout =
-                       ib_ipath_rnr_table[(aeth >> IPS_AETH_CREDIT_SHIFT) &
-                                          IPS_AETH_CREDIT_MASK];
+                       ib_ipath_rnr_table[(aeth >> IPATH_AETH_CREDIT_SHIFT) &
+                                          IPATH_AETH_CREDIT_MASK];
                ipath_insert_rnr_queue(qp);
                goto bail;
 
@@ -1033,8 +899,8 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
                /* The last valid PSN seen is the previous request's. */
                if (qp->s_last != qp->s_tail)
                        qp->s_last_psn = wqe->psn - 1;
-               switch ((aeth >> IPS_AETH_CREDIT_SHIFT) &
-                       IPS_AETH_CREDIT_MASK) {
+               switch ((aeth >> IPATH_AETH_CREDIT_SHIFT) &
+                       IPATH_AETH_CREDIT_MASK) {
                case 0: /* PSN sequence error */
                        dev->n_seq_naks++;
                        /*
@@ -1182,32 +1048,33 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
                        goto ack_done;
                }
        rdma_read:
-       if (unlikely(qp->s_state != OP(RDMA_READ_REQUEST)))
-               goto ack_done;
-       if (unlikely(tlen != (hdrsize + pmtu + 4)))
-               goto ack_done;
-       if (unlikely(pmtu >= qp->s_len))
-               goto ack_done;
-       /* We got a response so update the timeout. */
-       if (unlikely(qp->s_last == qp->s_tail ||
-                    get_swqe_ptr(qp, qp->s_last)->wr.opcode !=
-                    IB_WR_RDMA_READ))
-               goto ack_done;
-       spin_lock(&dev->pending_lock);
-       if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait))
-               list_move_tail(&qp->timerwait,
-                              &dev->pending[dev->pending_index]);
-       spin_unlock(&dev->pending_lock);
-       /*
-        * Update the RDMA receive state but do the copy w/o holding the
-        * locks and blocking interrupts.  XXX Yet another place that
-        * affects relaxed RDMA order since we don't want s_sge modified.
-        */
-       qp->s_len -= pmtu;
-       qp->s_last_psn = psn;
-       spin_unlock_irqrestore(&qp->s_lock, flags);
-       ipath_copy_sge(&qp->s_sge, data, pmtu);
-       goto bail;
+               if (unlikely(qp->s_state != OP(RDMA_READ_REQUEST)))
+                       goto ack_done;
+               if (unlikely(tlen != (hdrsize + pmtu + 4)))
+                       goto ack_done;
+               if (unlikely(pmtu >= qp->s_len))
+                       goto ack_done;
+               /* We got a response so update the timeout. */
+               if (unlikely(qp->s_last == qp->s_tail ||
+                            get_swqe_ptr(qp, qp->s_last)->wr.opcode !=
+                            IB_WR_RDMA_READ))
+                       goto ack_done;
+               spin_lock(&dev->pending_lock);
+               if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait))
+                       list_move_tail(&qp->timerwait,
+                                      &dev->pending[dev->pending_index]);
+               spin_unlock(&dev->pending_lock);
+               /*
+                * Update the RDMA receive state but do the copy w/o
+                * holding the locks and blocking interrupts.
+                * XXX Yet another place that affects relaxed RDMA order
+                * since we don't want s_sge modified.
+                */
+               qp->s_len -= pmtu;
+               qp->s_last_psn = psn;
+               spin_unlock_irqrestore(&qp->s_lock, flags);
+               ipath_copy_sge(&qp->s_sge, data, pmtu);
+               goto bail;
 
        case OP(RDMA_READ_RESPONSE_LAST):
                /* ACKs READ req. */
@@ -1230,18 +1097,12 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
                 * ICRC (4).
                 */
                if (unlikely(tlen <= (hdrsize + pad + 8))) {
-                       /*
-                        * XXX Need to generate an error CQ
-                        * entry.
-                        */
+                       /* XXX Need to generate an error CQ entry. */
                        goto ack_done;
                }
                tlen -= hdrsize + pad + 8;
                if (unlikely(tlen != qp->s_len)) {
-                       /*
-                        * XXX Need to generate an error CQ
-                        * entry.
-                        */
+                       /* XXX Need to generate an error CQ entry. */
                        goto ack_done;
                }
                if (!header_in_data)
@@ -1254,9 +1115,12 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
                if (do_rc_ack(qp, aeth, psn, OP(RDMA_READ_RESPONSE_LAST))) {
                        /*
                         * Change the state so we contimue
-                        * processing new requests.
+                        * processing new requests and wake up the
+                        * tasklet if there are posted sends.
                         */
                        qp->s_state = OP(SEND_LAST);
+                       if (qp->s_tail != qp->s_head)
+                               tasklet_hi_schedule(&qp->s_task);
                }
                goto ack_done;
        }
@@ -1302,18 +1166,16 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
                 * Don't queue the NAK if a RDMA read, atomic, or
                 * NAK is pending though.
                 */
-               spin_lock(&qp->s_lock);
-               if ((qp->s_ack_state >= OP(RDMA_READ_REQUEST) &&
-                    qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE) ||
-                   qp->s_nak_state != 0) {
-                       spin_unlock(&qp->s_lock);
+               if (qp->s_ack_state != OP(ACKNOWLEDGE) ||
+                   qp->r_nak_state != 0)
                        goto done;
+               if (qp->r_ack_state < OP(COMPARE_SWAP)) {
+                       qp->r_ack_state = OP(SEND_ONLY);
+                       qp->r_nak_state = IB_NAK_PSN_ERROR;
+                       /* Use the expected PSN. */
+                       qp->r_ack_psn = qp->r_psn;
                }
-               qp->s_ack_state = OP(SEND_ONLY);
-               qp->s_nak_state = IB_NAK_PSN_ERROR;
-               /* Use the expected PSN. */
-               qp->s_ack_psn = qp->r_psn;
-               goto resched;
+               goto send_ack;
        }
 
        /*
@@ -1327,27 +1189,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
         * send the earliest so that RDMA reads can be restarted at
         * the requester's expected PSN.
         */
-       spin_lock(&qp->s_lock);
-       if (qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE &&
-           ipath_cmp24(psn, qp->s_ack_psn) >= 0) {
-               if (qp->s_ack_state < IB_OPCODE_RDMA_READ_REQUEST)
-                       qp->s_ack_psn = psn;
-               spin_unlock(&qp->s_lock);
-               goto done;
-       }
-       switch (opcode) {
-       case OP(RDMA_READ_REQUEST):
-               /*
-                * We have to be careful to not change s_rdma_sge
-                * while ipath_do_rc_send() is using it and not
-                * holding the s_lock.
-                */
-               if (qp->s_ack_state != OP(ACKNOWLEDGE) &&
-                   qp->s_ack_state >= IB_OPCODE_RDMA_READ_REQUEST) {
-                       spin_unlock(&qp->s_lock);
-                       dev->n_rdma_dup_busy++;
-                       goto done;
-               }
+       if (opcode == OP(RDMA_READ_REQUEST)) {
                /* RETH comes after BTH */
                if (!header_in_data)
                        reth = &ohdr->u.rc.reth;
@@ -1355,6 +1197,22 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
                        reth = (struct ib_reth *)data;
                        data += sizeof(*reth);
                }
+               /*
+                * If we receive a duplicate RDMA request, it means the
+                * requester saw a sequence error and needs to restart
+                * from an earlier point.  We can abort the current
+                * RDMA read send in that case.
+                */
+               spin_lock_irq(&qp->s_lock);
+               if (qp->s_ack_state != OP(ACKNOWLEDGE) &&
+                   (qp->s_hdrwords || ipath_cmp24(psn, qp->s_ack_psn) >= 0)) {
+                       /*
+                        * We are already sending earlier requested data.
+                        * Don't abort it to send later out of sequence data.
+                        */
+                       spin_unlock_irq(&qp->s_lock);
+                       goto done;
+               }
                qp->s_rdma_len = be32_to_cpu(reth->length);
                if (qp->s_rdma_len != 0) {
                        u32 rkey = be32_to_cpu(reth->rkey);
@@ -1368,8 +1226,10 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
                        ok = ipath_rkey_ok(dev, &qp->s_rdma_sge,
                                           qp->s_rdma_len, vaddr, rkey,
                                           IB_ACCESS_REMOTE_READ);
-                       if (unlikely(!ok))
+                       if (unlikely(!ok)) {
+                               spin_unlock_irq(&qp->s_lock);
                                goto done;
+                       }
                } else {
                        qp->s_rdma_sge.sg_list = NULL;
                        qp->s_rdma_sge.num_sge = 0;
@@ -1378,25 +1238,44 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
                        qp->s_rdma_sge.sge.length = 0;
                        qp->s_rdma_sge.sge.sge_length = 0;
                }
-               break;
+               qp->s_ack_state = opcode;
+               qp->s_ack_psn = psn;
+               spin_unlock_irq(&qp->s_lock);
+               tasklet_hi_schedule(&qp->s_task);
+               goto send_ack;
+       }
+
+       /*
+        * A pending RDMA read will ACK anything before it so
+        * ignore earlier duplicate requests.
+        */
+       if (qp->s_ack_state != OP(ACKNOWLEDGE))
+               goto done;
 
+       /*
+        * If an ACK is pending, don't replace the pending ACK
+        * with an earlier one since the later one will ACK the earlier.
+        * Also, if we already have a pending atomic, send it.
+        */
+       if (qp->r_ack_state != OP(ACKNOWLEDGE) &&
+           (ipath_cmp24(psn, qp->r_ack_psn) <= 0 ||
+            qp->r_ack_state >= OP(COMPARE_SWAP)))
+               goto send_ack;
+       switch (opcode) {
        case OP(COMPARE_SWAP):
        case OP(FETCH_ADD):
                /*
-                * Check for the PSN of the last atomic operations
+                * Check for the PSN of the last atomic operation
                 * performed and resend the result if found.
                 */
-               if ((psn & IPS_PSN_MASK) != qp->r_atomic_psn) {
-                       spin_unlock(&qp->s_lock);
+               if ((psn & IPATH_PSN_MASK) != qp->r_atomic_psn)
                        goto done;
-               }
-               qp->s_ack_atomic = qp->r_atomic_data;
                break;
        }
-       qp->s_ack_state = opcode;
-       qp->s_nak_state = 0;
-       qp->s_ack_psn = psn;
-resched:
+       qp->r_ack_state = opcode;
+       qp->r_nak_state = 0;
+       qp->r_ack_psn = psn;
+send_ack:
        return 0;
 
 done:
@@ -1424,7 +1303,6 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
        u32 hdrsize;
        u32 psn;
        u32 pad;
-       unsigned long flags;
        struct ib_wc wc;
        u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
        int diff;
@@ -1453,11 +1331,6 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
                } else
                        psn = be32_to_cpu(ohdr->bth[2]);
        }
-       /*
-        * The opcode is in the low byte when its in network order
-        * (top byte when in host order).
-        */
-       opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
 
        /*
         * Process responses (ACKs) before anything else.  Note that the
@@ -1465,22 +1338,21 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
         * queue rather than the expected receive packet sequence number.
         * In other words, this QP is the requester.
         */
+       opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
        if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
            opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
                ipath_rc_rcv_resp(dev, ohdr, data, tlen, qp, opcode, psn,
                                  hdrsize, pmtu, header_in_data);
-               goto bail;
+               goto done;
        }
 
-       spin_lock_irqsave(&qp->r_rq.lock, flags);
-
        /* Compute 24 bits worth of difference. */
        diff = ipath_cmp24(psn, qp->r_psn);
        if (unlikely(diff)) {
                if (ipath_rc_rcv_error(dev, ohdr, data, qp, opcode,
                                       psn, diff, header_in_data))
                        goto done;
-               goto resched;
+               goto send_ack;
        }
 
        /* Check for opcode sequence errors. */
@@ -1492,22 +1364,19 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
                    opcode == OP(SEND_LAST_WITH_IMMEDIATE))
                        break;
        nack_inv:
-       /*
-        * A NAK will ACK earlier sends and RDMA writes.  Don't queue the
-        * NAK if a RDMA read, atomic, or NAK is pending though.
-        */
-       spin_lock(&qp->s_lock);
-       if (qp->s_ack_state >= OP(RDMA_READ_REQUEST) &&
-           qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE) {
-               spin_unlock(&qp->s_lock);
-               goto done;
-       }
-       /* XXX Flush WQEs */
-       qp->state = IB_QPS_ERR;
-       qp->s_ack_state = OP(SEND_ONLY);
-       qp->s_nak_state = IB_NAK_INVALID_REQUEST;
-       qp->s_ack_psn = qp->r_psn;
-       goto resched;
+               /*
+                * A NAK will ACK earlier sends and RDMA writes.
+                * Don't queue the NAK if a RDMA read, atomic, or NAK
+                * is pending though.
+                */
+               if (qp->r_ack_state >= OP(COMPARE_SWAP))
+                       goto send_ack;
+               /* XXX Flush WQEs */
+               qp->state = IB_QPS_ERR;
+               qp->r_ack_state = OP(SEND_ONLY);
+               qp->r_nak_state = IB_NAK_INVALID_REQUEST;
+               qp->r_ack_psn = qp->r_psn;
+               goto send_ack;
 
        case OP(RDMA_WRITE_FIRST):
        case OP(RDMA_WRITE_MIDDLE):
@@ -1517,20 +1386,6 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
                        break;
                goto nack_inv;
 
-       case OP(RDMA_READ_REQUEST):
-       case OP(COMPARE_SWAP):
-       case OP(FETCH_ADD):
-               /*
-                * Drop all new requests until a response has been sent.  A
-                * new request then ACKs the RDMA response we sent.  Relaxed
-                * ordering would allow new requests to be processed but we
-                * would need to keep a queue of rwqe's for all that are in
-                * progress.  Note that we can't RNR NAK this request since
-                * the RDMA READ or atomic response is already queued to be
-                * sent (unless we implement a response send queue).
-                */
-               goto done;
-
        default:
                if (opcode == OP(SEND_MIDDLE) ||
                    opcode == OP(SEND_LAST) ||
@@ -1539,6 +1394,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
                    opcode == OP(RDMA_WRITE_LAST) ||
                    opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
                        goto nack_inv;
+               /*
+                * Note that it is up to the requester to not send a new
+                * RDMA read or atomic operation before receiving an ACK
+                * for the previous operation.
+                */
                break;
        }
 
@@ -1555,17 +1415,12 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
                         * Don't queue the NAK if a RDMA read or atomic
                         * is pending though.
                         */
-                       spin_lock(&qp->s_lock);
-                       if (qp->s_ack_state >=
-                           OP(RDMA_READ_REQUEST) &&
-                           qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE) {
-                               spin_unlock(&qp->s_lock);
-                               goto done;
-                       }
-                       qp->s_ack_state = OP(SEND_ONLY);
-                       qp->s_nak_state = IB_RNR_NAK | qp->s_min_rnr_timer;
-                       qp->s_ack_psn = qp->r_psn;
-                       goto resched;
+                       if (qp->r_ack_state >= OP(COMPARE_SWAP))
+                               goto send_ack;
+                       qp->r_ack_state = OP(SEND_ONLY);
+                       qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
+                       qp->r_ack_psn = qp->r_psn;
+                       goto send_ack;
                }
                qp->r_rcv_len = 0;
                /* FALLTHROUGH */
@@ -1622,7 +1477,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
                if (unlikely(wc.byte_len > qp->r_len))
                        goto nack_inv;
                ipath_copy_sge(&qp->r_sge, data, tlen);
-               atomic_inc(&qp->msn);
+               qp->r_msn++;
                if (opcode == OP(RDMA_WRITE_LAST) ||
                    opcode == OP(RDMA_WRITE_ONLY))
                        break;
@@ -1666,29 +1521,8 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
                        ok = ipath_rkey_ok(dev, &qp->r_sge,
                                           qp->r_len, vaddr, rkey,
                                           IB_ACCESS_REMOTE_WRITE);
-                       if (unlikely(!ok)) {
-                       nack_acc:
-                               /*
-                                * A NAK will ACK earlier sends and RDMA
-                                * writes.  Don't queue the NAK if a RDMA
-                                * read, atomic, or NAK is pending though.
-                                */
-                               spin_lock(&qp->s_lock);
-                               if (qp->s_ack_state >=
-                                   OP(RDMA_READ_REQUEST) &&
-                                   qp->s_ack_state !=
-                                   IB_OPCODE_ACKNOWLEDGE) {
-                                       spin_unlock(&qp->s_lock);
-                                       goto done;
-                               }
-                               /* XXX Flush WQEs */
-                               qp->state = IB_QPS_ERR;
-                               qp->s_ack_state = OP(RDMA_WRITE_ONLY);
-                               qp->s_nak_state =
-                                       IB_NAK_REMOTE_ACCESS_ERROR;
-                               qp->s_ack_psn = qp->r_psn;
-                               goto resched;
-                       }
+                       if (unlikely(!ok))
+                               goto nack_acc;
                } else {
                        qp->r_sge.sg_list = NULL;
                        qp->r_sge.sge.mr = NULL;
@@ -1715,12 +1549,10 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
                        reth = (struct ib_reth *)data;
                        data += sizeof(*reth);
                }
-               spin_lock(&qp->s_lock);
-               if (qp->s_ack_state != OP(ACKNOWLEDGE) &&
-                   qp->s_ack_state >= IB_OPCODE_RDMA_READ_REQUEST) {
-                       spin_unlock(&qp->s_lock);
-                       goto done;
-               }
+               if (unlikely(!(qp->qp_access_flags &
+                              IB_ACCESS_REMOTE_READ)))
+                       goto nack_acc;
+               spin_lock_irq(&qp->s_lock);
                qp->s_rdma_len = be32_to_cpu(reth->length);
                if (qp->s_rdma_len != 0) {
                        u32 rkey = be32_to_cpu(reth->rkey);
@@ -1732,7 +1564,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
                                           qp->s_rdma_len, vaddr, rkey,
                                           IB_ACCESS_REMOTE_READ);
                        if (unlikely(!ok)) {
-                               spin_unlock(&qp->s_lock);
+                               spin_unlock_irq(&qp->s_lock);
                                goto nack_acc;
                        }
                        /*
@@ -1749,21 +1581,25 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
                        qp->s_rdma_sge.sge.length = 0;
                        qp->s_rdma_sge.sge.sge_length = 0;
                }
-               if (unlikely(!(qp->qp_access_flags &
-                              IB_ACCESS_REMOTE_READ)))
-                       goto nack_acc;
                /*
                 * We need to increment the MSN here instead of when we
                 * finish sending the result since a duplicate request would
                 * increment it more than once.
                 */
-               atomic_inc(&qp->msn);
+               qp->r_msn++;
+
                qp->s_ack_state = opcode;
-               qp->s_nak_state = 0;
                qp->s_ack_psn = psn;
+               spin_unlock_irq(&qp->s_lock);
+
                qp->r_psn++;
                qp->r_state = opcode;
-               goto rdmadone;
+               qp->r_nak_state = 0;
+
+               /* Call ipath_do_rc_send() in another thread. */
+               tasklet_hi_schedule(&qp->s_task);
+
+               goto done;
 
        case OP(COMPARE_SWAP):
        case OP(FETCH_ADD): {
@@ -1792,7 +1628,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
                        goto nack_acc;
                /* Perform atomic OP and save result. */
                sdata = be64_to_cpu(ateth->swap_data);
-               spin_lock(&dev->pending_lock);
+               spin_lock_irq(&dev->pending_lock);
                qp->r_atomic_data = *(u64 *) qp->r_sge.sge.vaddr;
                if (opcode == OP(FETCH_ADD))
                        *(u64 *) qp->r_sge.sge.vaddr =
@@ -1800,9 +1636,9 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
                else if (qp->r_atomic_data ==
                         be64_to_cpu(ateth->compare_data))
                        *(u64 *) qp->r_sge.sge.vaddr = sdata;
-               spin_unlock(&dev->pending_lock);
-               atomic_inc(&qp->msn);
-               qp->r_atomic_psn = psn & IPS_PSN_MASK;
+               spin_unlock_irq(&dev->pending_lock);
+               qp->r_msn++;
+               qp->r_atomic_psn = psn & IPATH_PSN_MASK;
                psn |= 1 << 31;
                break;
        }
@@ -1813,44 +1649,39 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
        }
        qp->r_psn++;
        qp->r_state = opcode;
+       qp->r_nak_state = 0;
        /* Send an ACK if requested or required. */
        if (psn & (1 << 31)) {
                /*
                 * Coalesce ACKs unless there is a RDMA READ or
                 * ATOMIC pending.
                 */
-               spin_lock(&qp->s_lock);
-               if (qp->s_ack_state == OP(ACKNOWLEDGE) ||
-                   qp->s_ack_state < IB_OPCODE_RDMA_READ_REQUEST) {
-                       qp->s_ack_state = opcode;
-                       qp->s_nak_state = 0;
-                       qp->s_ack_psn = psn;
-                       qp->s_ack_atomic = qp->r_atomic_data;
-                       goto resched;
+               if (qp->r_ack_state < OP(COMPARE_SWAP)) {
+                       qp->r_ack_state = opcode;
+                       qp->r_ack_psn = psn;
                }
-               spin_unlock(&qp->s_lock);
+               goto send_ack;
        }
-done:
-       spin_unlock_irqrestore(&qp->r_rq.lock, flags);
-       goto bail;
+       goto done;
 
-resched:
+nack_acc:
        /*
-        * Try to send ACK right away but not if ipath_do_rc_send() is
-        * active.
+        * A NAK will ACK earlier sends and RDMA writes.
+        * Don't queue the NAK if a RDMA read, atomic, or NAK
+        * is pending though.
         */
-       if (qp->s_hdrwords == 0 &&
-           (qp->s_ack_state < IB_OPCODE_RDMA_READ_REQUEST ||
-            qp->s_ack_state >= IB_OPCODE_COMPARE_SWAP))
+       if (qp->r_ack_state < OP(COMPARE_SWAP)) {
+               /* XXX Flush WQEs */
+               qp->state = IB_QPS_ERR;
+               qp->r_ack_state = OP(RDMA_WRITE_ONLY);
+               qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
+               qp->r_ack_psn = qp->r_psn;
+       }
+send_ack:
+       /* Send ACK right away unless the send tasklet has a pending ACK. */
+       if (qp->s_ack_state == OP(ACKNOWLEDGE))
                send_rc_ack(qp);
 
-rdmadone:
-       spin_unlock(&qp->s_lock);
-       spin_unlock_irqrestore(&qp->r_rq.lock, flags);
-
-       /* Call ipath_do_rc_send() in another thread. */
-       tasklet_hi_schedule(&qp->s_task);
-
-bail:
+done:
        return;
 }
index 402126eb79c96067bd0d7eef629fd5b1a0f5f36a..89df8f5ea998087c42af9e77c097933a6bffa35d 100644 (file)
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
index d38f4f3cfd1dd291dd33e17d3780fc9b7ed1d17e..772bc59fb85c314fc4e1084c1656039a10c1fa6f 100644 (file)
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -31,6 +32,7 @@
  */
 
 #include "ipath_verbs.h"
+#include "ipath_common.h"
 
 /*
  * Convert the AETH RNR timeout code into the number of milliseconds.
@@ -111,20 +113,23 @@ void ipath_insert_rnr_queue(struct ipath_qp *qp)
  *
  * Return 0 if no RWQE is available, otherwise return 1.
  *
- * Called at interrupt level with the QP r_rq.lock held.
+ * Can be called from interrupt level.
  */
 int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
 {
+       unsigned long flags;
        struct ipath_rq *rq;
        struct ipath_srq *srq;
        struct ipath_rwqe *wqe;
-       int ret;
+       int ret = 1;
 
        if (!qp->ibqp.srq) {
                rq = &qp->r_rq;
+               spin_lock_irqsave(&rq->lock, flags);
+
                if (unlikely(rq->tail == rq->head)) {
                        ret = 0;
-                       goto bail;
+                       goto done;
                }
                wqe = get_rwqe_ptr(rq, rq->tail);
                qp->r_wr_id = wqe->wr_id;
@@ -136,17 +141,16 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
                }
                if (++rq->tail >= rq->size)
                        rq->tail = 0;
-               ret = 1;
-               goto bail;
+               goto done;
        }
 
        srq = to_isrq(qp->ibqp.srq);
        rq = &srq->rq;
-       spin_lock(&rq->lock);
+       spin_lock_irqsave(&rq->lock, flags);
+
        if (unlikely(rq->tail == rq->head)) {
-               spin_unlock(&rq->lock);
                ret = 0;
-               goto bail;
+               goto done;
        }
        wqe = get_rwqe_ptr(rq, rq->tail);
        qp->r_wr_id = wqe->wr_id;
@@ -168,18 +172,18 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
                        n = rq->head - rq->tail;
                if (n < srq->limit) {
                        srq->limit = 0;
-                       spin_unlock(&rq->lock);
+                       spin_unlock_irqrestore(&rq->lock, flags);
                        ev.device = qp->ibqp.device;
                        ev.element.srq = qp->ibqp.srq;
                        ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
                        srq->ibsrq.event_handler(&ev,
                                                 srq->ibsrq.srq_context);
-               } else
-                       spin_unlock(&rq->lock);
-       } else
-               spin_unlock(&rq->lock);
-       ret = 1;
+                       goto bail;
+               }
+       }
 
+done:
+       spin_unlock_irqrestore(&rq->lock, flags);
 bail:
        return ret;
 }
@@ -187,7 +191,6 @@ bail:
 /**
  * ipath_ruc_loopback - handle UC and RC lookback requests
  * @sqp: the loopback QP
- * @wc: the work completion entry
  *
  * This is called from ipath_do_uc_send() or ipath_do_rc_send() to
  * forward a WQE addressed to the same HCA.
@@ -196,13 +199,14 @@ bail:
  * receive interrupts since this is a connected protocol and all packets
  * will pass through here.
  */
-void ipath_ruc_loopback(struct ipath_qp *sqp, struct ib_wc *wc)
+static void ipath_ruc_loopback(struct ipath_qp *sqp)
 {
        struct ipath_ibdev *dev = to_idev(sqp->ibqp.device);
        struct ipath_qp *qp;
        struct ipath_swqe *wqe;
        struct ipath_sge *sge;
        unsigned long flags;
+       struct ib_wc wc;
        u64 sdata;
 
        qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn);
@@ -233,8 +237,8 @@ again:
        wqe = get_swqe_ptr(sqp, sqp->s_last);
        spin_unlock_irqrestore(&sqp->s_lock, flags);
 
-       wc->wc_flags = 0;
-       wc->imm_data = 0;
+       wc.wc_flags = 0;
+       wc.imm_data = 0;
 
        sqp->s_sge.sge = wqe->sg_list[0];
        sqp->s_sge.sg_list = wqe->sg_list + 1;
@@ -242,39 +246,34 @@ again:
        sqp->s_len = wqe->length;
        switch (wqe->wr.opcode) {
        case IB_WR_SEND_WITH_IMM:
-               wc->wc_flags = IB_WC_WITH_IMM;
-               wc->imm_data = wqe->wr.imm_data;
+               wc.wc_flags = IB_WC_WITH_IMM;
+               wc.imm_data = wqe->wr.imm_data;
                /* FALLTHROUGH */
        case IB_WR_SEND:
-               spin_lock_irqsave(&qp->r_rq.lock, flags);
                if (!ipath_get_rwqe(qp, 0)) {
                rnr_nak:
-                       spin_unlock_irqrestore(&qp->r_rq.lock, flags);
                        /* Handle RNR NAK */
                        if (qp->ibqp.qp_type == IB_QPT_UC)
                                goto send_comp;
                        if (sqp->s_rnr_retry == 0) {
-                               wc->status = IB_WC_RNR_RETRY_EXC_ERR;
+                               wc.status = IB_WC_RNR_RETRY_EXC_ERR;
                                goto err;
                        }
                        if (sqp->s_rnr_retry_cnt < 7)
                                sqp->s_rnr_retry--;
                        dev->n_rnr_naks++;
                        sqp->s_rnr_timeout =
-                               ib_ipath_rnr_table[sqp->s_min_rnr_timer];
+                               ib_ipath_rnr_table[sqp->r_min_rnr_timer];
                        ipath_insert_rnr_queue(sqp);
                        goto done;
                }
-               spin_unlock_irqrestore(&qp->r_rq.lock, flags);
                break;
 
        case IB_WR_RDMA_WRITE_WITH_IMM:
-               wc->wc_flags = IB_WC_WITH_IMM;
-               wc->imm_data = wqe->wr.imm_data;
-               spin_lock_irqsave(&qp->r_rq.lock, flags);
+               wc.wc_flags = IB_WC_WITH_IMM;
+               wc.imm_data = wqe->wr.imm_data;
                if (!ipath_get_rwqe(qp, 1))
                        goto rnr_nak;
-               spin_unlock_irqrestore(&qp->r_rq.lock, flags);
                /* FALLTHROUGH */
        case IB_WR_RDMA_WRITE:
                if (wqe->length == 0)
@@ -284,20 +283,20 @@ again:
                                            wqe->wr.wr.rdma.rkey,
                                            IB_ACCESS_REMOTE_WRITE))) {
                acc_err:
-                       wc->status = IB_WC_REM_ACCESS_ERR;
+                       wc.status = IB_WC_REM_ACCESS_ERR;
                err:
-                       wc->wr_id = wqe->wr.wr_id;
-                       wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
-                       wc->vendor_err = 0;
-                       wc->byte_len = 0;
-                       wc->qp_num = sqp->ibqp.qp_num;
-       &nb