* 'audit.b22' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/audit-current:
[PATCH] audit syscall classes
[PATCH] audit: support for object context filters
[PATCH] audit: rename AUDIT_SE_* constants
[PATCH] add rule filterkey
KBUILD_VERBOSE = 0
endif
-# Call sparse as part of compilation of C files
-# Use 'make C=1' to enable sparse checking
+# Call checker as part of compilation of C files
+# Use 'make C=1' to enable checking (sparse, by default)
+# Override with 'make C=1 CHECK=checker_executable CHECKFLAGS=....'
ifdef C
ifeq ("$(origin C)", "command line")
@echo ' make V=0|1 [targets] 0 => quiet build (default), 1 => verbose build'
@echo ' make O=dir [targets] Locate all output files in "dir", including .config'
- @echo ' make C=1 [targets] Check all c source with $$CHECK (sparse)'
- @echo ' make C=2 [targets] Force check of all c source with $$CHECK (sparse)'
+ @echo ' make C=1 [targets] Check all c source with $$CHECK (sparse by default)'
+ @echo ' make C=2 [targets] Force check of all c source with $$CHECK'
@echo ''
@echo 'Execute "make" or "make all" to build all targets marked with [*] '
@echo 'For further info see the ./README file'
a_flags = -Wp,-MD,$(depfile) $(AFLAGS) $(AFLAGS_KERNEL) \
$(NOSTDINC_FLAGS) $(CPPFLAGS) \
- $(modkern_aflags) $(EXTRA_AFLAGS) $(AFLAGS_$(*F).o)
+ $(modkern_aflags) $(EXTRA_AFLAGS) $(AFLAGS_$(basetarget).o)
quiet_cmd_as_o_S = AS $@
cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $<
}
}
+#ifdef CONFIG_SMP
+
static void alternatives_smp_save(struct alt_instr *start, struct alt_instr *end)
{
struct alt_instr *a;
spin_unlock_irqrestore(&smp_alt, flags);
}
+#endif
+
void __init alternative_instructions(void)
{
if (no_replacement) {
smp_alt_once = 1;
#endif
+#ifdef CONFIG_SMP
if (smp_alt_once) {
if (1 == num_possible_cpus()) {
printk(KERN_INFO "SMP alternatives: switching to UP code\n");
_text, _etext);
alternatives_smp_switch(0);
}
+#endif
}
#ifdef CONFIG_DEBUG_RODATA
-extern char __start_rodata, __end_rodata;
void mark_rodata_ro(void)
{
- unsigned long addr = (unsigned long)&__start_rodata;
+ unsigned long addr = (unsigned long)__start_rodata;
- for (; addr < (unsigned long)&__end_rodata; addr += PAGE_SIZE)
+ for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
change_page_attr(virt_to_page(addr), 1, PAGE_KERNEL_RO);
- printk ("Write protecting the kernel read-only data: %luk\n",
- (unsigned long)(&__end_rodata - &__start_rodata) >> 10);
+ printk("Write protecting the kernel read-only data: %uk\n",
+ (__end_rodata - __start_rodata) >> 10);
/*
* change_page_attr() requires a global_flush_tlb() call after it.
#include <asm/tlb.h>
#include <asm/pdc_chassis.h>
#include <asm/mmzone.h>
+#include <asm/sections.h>
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
#ifdef CONFIG_DEBUG_RODATA
void mark_rodata_ro(void)
{
- extern char __start_rodata, __end_rodata;
/* rodata memory was already mapped with KERNEL_RO access rights by
pagetable_init() and map_pages(). No need to do additional stuff here */
printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n",
- (unsigned long)(&__end_rodata - &__start_rodata) >> 10);
+ (unsigned long)(__end_rodata - __start_rodata) >> 10);
}
#endif
sysc_nr_ok:
mvc SP_ARGS(4,%r15),SP_R7(%r15)
sysc_do_restart:
+ l %r8,BASED(.Lsysc_table)
tm __TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
- l %r8,sys_call_table-system_call(%r7,%r13) # get system call addr.
+ l %r8,0(%r7,%r8) # get system call addr.
bnz BASED(sysc_tracesys)
basr %r14,%r8 # call sys_xxxx
st %r2,SP_R2(%r15) # store return value (change R2 on stack)
basr %r14,%r1
clc SP_R2(4,%r15),BASED(.Lnr_syscalls)
bnl BASED(sysc_tracenogo)
+ l %r8,BASED(.Lsysc_table)
l %r7,SP_R2(%r15) # strace might have changed the
sll %r7,2 # system call
- l %r8,sys_call_table-system_call(%r7,%r13)
+ l %r8,0(%r7,%r8)
sysc_tracego:
lm %r3,%r6,SP_R3(%r15)
l %r2,SP_ORIG_R2(%r15)
.Ltrace: .long syscall_trace
.Lvfork: .long sys_vfork
.Lschedtail: .long schedule_tail
+.Lsysc_table: .long sys_call_table
.Lcritical_start:
.long __critical_start + 0x80000000
.Lcleanup_critical:
.long cleanup_critical
+ .section .rodata, "a"
#define SYSCALL(esa,esame,emu) .long esa
sys_call_table:
#include "syscalls.S"
#undef SYSCALL
-
.Lcritical_end:
.quad __critical_end
+ .section .rodata, "a"
#define SYSCALL(esa,esame,emu) .long esame
sys_call_table:
#include "syscalls.S"
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/bootmem.h>
+#include <linux/pfn.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/lowcore.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
+#include <asm/sections.h>
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
printk("%d pages swap cached\n",cached);
}
-/* References to section boundaries */
-
-extern unsigned long _text;
-extern unsigned long _etext;
-extern unsigned long _edata;
-extern unsigned long __bss_start;
-extern unsigned long _end;
-
-extern unsigned long __init_begin;
-extern unsigned long __init_end;
-
extern unsigned long __initdata zholes_size[];
/*
* paging_init() sets up the page tables
unsigned long pfn = 0;
unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
static const int ssm_mask = 0x04000000L;
+ unsigned long ro_start_pfn, ro_end_pfn;
+
+ ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
+ ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
/* unmap whole virtual address space */
pg_dir++;
for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) {
- pte = pfn_pte(pfn, PAGE_KERNEL);
+ if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
+ pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
+ else
+ pte = pfn_pte(pfn, PAGE_KERNEL);
if (pfn >= max_low_pfn)
pte_clear(&init_mm, 0, &pte);
set_pte(pg_table, pte);
}
#else /* CONFIG_64BIT */
+
void __init paging_init(void)
{
pgd_t * pg_dir;
unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) |
_KERN_REGION_TABLE;
static const int ssm_mask = 0x04000000L;
-
unsigned long zones_size[MAX_NR_ZONES];
unsigned long dma_pfn, high_pfn;
+ unsigned long ro_start_pfn, ro_end_pfn;
memset(zones_size, 0, sizeof(zones_size));
dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
high_pfn = max_low_pfn;
+ ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
+ ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
if (dma_pfn > high_pfn)
zones_size[ZONE_DMA] = high_pfn;
pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) {
- pte = pfn_pte(pfn, PAGE_KERNEL);
+ if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
+ pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
+ else
+ pte = pfn_pte(pfn, PAGE_KERNEL);
if (pfn >= max_low_pfn) {
pte_clear(&init_mm, 0, &pte);
continue;
reservedpages << (PAGE_SHIFT-10),
datasize >>10,
initsize >> 10);
+ printk("Write protected kernel read-only data: %#lx - %#lx\n",
+ (unsigned long)&__start_rodata,
+ PFN_ALIGN((unsigned long)&__end_rodata) - 1);
}
void free_initmem(void)
#include "linux/kernel.h"
#include "linux/string.h"
#include "linux/fs.h"
+#include "linux/hardirq.h"
#include "linux/highmem.h"
#include "asm/page.h"
#include "asm/pgtable.h"
return((unsigned long) phys);
}
-static int do_op(unsigned long addr, int len, int is_write,
+static int do_op_one_page(unsigned long addr, int len, int is_write,
int (*op)(unsigned long addr, int len, void *arg), void *arg)
{
struct page *page;
return(-1);
page = phys_to_page(addr);
- addr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
+ addr = (unsigned long) kmap_atomic(page, KM_UML_USERCOPY) + (addr & ~PAGE_MASK);
+
n = (*op)(addr, len, arg);
- kunmap(page);
+
+ kunmap_atomic(page, KM_UML_USERCOPY);
return(n);
}
remain = len;
current->thread.fault_catcher = jmpbuf;
- n = do_op(addr, size, is_write, op, arg);
+ n = do_op_one_page(addr, size, is_write, op, arg);
if(n != 0){
*res = (n < 0 ? remain : 0);
goto out;
}
while(addr < ((addr + remain) & PAGE_MASK)){
- n = do_op(addr, PAGE_SIZE, is_write, op, arg);
+ n = do_op_one_page(addr, PAGE_SIZE, is_write, op, arg);
if(n != 0){
*res = (n < 0 ? remain : 0);
goto out;
goto out;
}
- n = do_op(addr, remain, is_write, op, arg);
+ n = do_op_one_page(addr, remain, is_write, op, arg);
if(n != 0)
*res = (n < 0 ? remain : 0);
else *res = 0;
return err;
}
-static int actually_do_remove(char *dir)
+/*
+ * Unlinks the files contained in @dir and then removes @dir.
+ * Doesn't handle directory trees, so it's not like rm -rf, but almost such. We
+ * ignore ENOENT errors for anything (they happen, strangely enough - possibly due
+ * to races between multiple dying UML threads).
+ */
+static int remove_files_and_dir(char *dir)
{
DIR *directory;
struct dirent *ent;
int len;
char file[256];
+ int ret;
directory = opendir(dir);
- if(directory == NULL)
- return -errno;
+ if (directory == NULL) {
+ if (errno != ENOENT)
+ return -errno;
+ else
+ return 0;
+ }
- while((ent = readdir(directory)) != NULL){
- if(!strcmp(ent->d_name, ".") || !strcmp(ent->d_name, ".."))
+ while ((ent = readdir(directory)) != NULL) {
+ if (!strcmp(ent->d_name, ".") || !strcmp(ent->d_name, ".."))
continue;
len = strlen(dir) + sizeof("/") + strlen(ent->d_name) + 1;
- if(len > sizeof(file))
- return -E2BIG;
+ if (len > sizeof(file)) {
+ ret = -E2BIG;
+ goto out;
+ }
sprintf(file, "%s/%s", dir, ent->d_name);
- if(unlink(file) < 0)
- return -errno;
+ if (unlink(file) < 0 && errno != ENOENT) {
+ ret = -errno;
+ goto out;
+ }
}
- if(rmdir(dir) < 0)
- return -errno;
- return 0;
+ if (rmdir(dir) < 0 && errno != ENOENT) {
+ ret = -errno;
+ goto out;
+ }
+
+ ret = 0;
+out:
+ closedir(directory);
+ return ret;
}
/* This says that there isn't already a user of the specified directory even if
* something other than UML sticking stuff in the directory
* this boot racing with a shutdown of the other UML
* In any of these cases, the directory isn't useful for anything else.
+ *
+ * Boolean return: 1 if in use, 0 otherwise.
*/
-
-static int not_dead_yet(char *dir)
+static inline int is_umdir_used(char *dir)
{
char file[strlen(uml_dir) + UMID_LEN + sizeof("/pid\0")];
char pid[sizeof("nnnnn\0")], *end;
n = snprintf(file, sizeof(file), "%s/pid", dir);
if(n >= sizeof(file)){
- printk("not_dead_yet - pid filename too long\n");
+ printk("is_umdir_used - pid filename too long\n");
err = -E2BIG;
goto out;
}
if(fd < 0) {
fd = -errno;
if(fd != -ENOENT){
- printk("not_dead_yet : couldn't open pid file '%s', "
+ printk("is_umdir_used : couldn't open pid file '%s', "
"err = %d\n", file, -fd);
}
goto out;
err = 0;
n = read(fd, pid, sizeof(pid));
if(n < 0){
- printk("not_dead_yet : couldn't read pid file '%s', "
+ printk("is_umdir_used : couldn't read pid file '%s', "
"err = %d\n", file, errno);
goto out_close;
} else if(n == 0){
- printk("not_dead_yet : couldn't read pid file '%s', "
+ printk("is_umdir_used : couldn't read pid file '%s', "
"0-byte read\n", file);
goto out_close;
}
p = strtoul(pid, &end, 0);
if(end == pid){
- printk("not_dead_yet : couldn't parse pid file '%s', "
+ printk("is_umdir_used : couldn't parse pid file '%s', "
"errno = %d\n", file, errno);
goto out_close;
}
return 1;
}
- err = actually_do_remove(dir);
- if(err)
- printk("not_dead_yet - actually_do_remove failed with "
- "err = %d\n", err);
-
- return err;
-
out_close:
close(fd);
out:
return 0;
}
+/*
+ * Try to remove the directory @dir unless it's in use.
+ * Precondition: @dir exists.
+ * Returns 0 for success, < 0 for failure in removal or if the directory is in
+ * use.
+ */
+static int umdir_take_if_dead(char *dir)
+{
+ int ret;
+ if (is_umdir_used(dir))
+ return -EEXIST;
+
+ ret = remove_files_and_dir(dir);
+ if (ret) {
+ printk("is_umdir_used - remove_files_and_dir failed with "
+ "err = %d\n", ret);
+ }
+ return ret;
+}
+
static void __init create_pid_file(void)
{
char file[strlen(uml_dir) + UMID_LEN + sizeof("/pid\0")];
if(err != -EEXIST)
goto err;
- /* 1 -> this umid is already in use
- * < 0 -> we couldn't remove the umid directory
- * In either case, we can't use this umid, so return -EEXIST.
- */
- if(not_dead_yet(tmp) != 0)
+ if (umdir_take_if_dead(tmp) < 0)
goto err;
err = mkdir(tmp, 0777);
char dir[strlen(uml_dir) + UMID_LEN + 1], err;
sprintf(dir, "%s%s", uml_dir, umid);
- err = actually_do_remove(dir);
+ err = remove_files_and_dir(dir);
if(err)
- printf("remove_umid_dir - actually_do_remove failed with "
+ printf("remove_umid_dir - remove_files_and_dir failed with "
"err = %d\n", err);
}
USER_OBJS := $(foreach file,$(USER_OBJS),$(obj)/$(file))
$(USER_OBJS:.o=.%): \
- c_flags = -Wp,-MD,$(depfile) $(USER_CFLAGS) $(CFLAGS_$(*F).o)
+ c_flags = -Wp,-MD,$(depfile) $(USER_CFLAGS) $(CFLAGS_$(basetarget).o)
$(USER_OBJS) : CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ \
-Dunix -D__unix__ -D__$(SUBARCH)__
UNPROFILE_OBJS := $(foreach file,$(UNPROFILE_OBJS),$(obj)/$(file))
$(UNPROFILE_OBJS:.o=.%): \
- c_flags = -Wp,-MD,$(depfile) $(call unprofile,$(USER_CFLAGS)) $(CFLAGS_$(*F).o)
+ c_flags = -Wp,-MD,$(depfile) $(call unprofile,$(USER_CFLAGS)) $(CFLAGS_$(basetarget).o)
$(UNPROFILE_OBJS) : CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ \
-Dunix -D__unix__ -D__$(SUBARCH)__
#ifdef CONFIG_DEBUG_RODATA
-extern char __start_rodata, __end_rodata;
void mark_rodata_ro(void)
{
- unsigned long addr = (unsigned long)&__start_rodata;
+ unsigned long addr = (unsigned long)__start_rodata;
- for (; addr < (unsigned long)&__end_rodata; addr += PAGE_SIZE)
+ for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
printk ("Write protecting the kernel read-only data: %luk\n",
- (&__end_rodata - &__start_rodata) >> 10);
+ (__end_rodata - __start_rodata) >> 10);
/*
* change_page_attr_addr() requires a global_flush_tlb() call after it.
/* Make sure this is a valid target state */
if (!device->flags.power_manageable) {
- printk(KERN_DEBUG "Device `[%s]is not power manageable",
+ printk(KERN_DEBUG "Device `[%s]' is not power manageable",
device->kobj.name);
return -ENODEV;
}
#define DBG_RX 0x0200
#define DBG_TX 0x0400
static unsigned int debugflags;
-static unsigned int nbds_max = 16;
#endif /* NDEBUG */
+static unsigned int nbds_max = 16;
static struct nbd_device nbd_dev[MAX_NBD];
/*
#include <linux/slab.h>
#include "edac_mc.h"
-#define AMD76X_REVISION " Ver: 2.0.0 " __DATE__
-
+#define AMD76X_REVISION " Ver: 2.0.1 " __DATE__
+#define EDAC_MOD_STR "amd76x_edac"
#define amd76x_printk(level, fmt, arg...) \
edac_printk(level, "amd76x", fmt, ##arg)
#include <linux/slab.h>
#include "edac_mc.h"
-#define E752X_REVISION " Ver: 2.0.0 " __DATE__
+#define E752X_REVISION " Ver: 2.0.1 " __DATE__
+#define EDAC_MOD_STR "e752x_edac"
static int force_function_unhide;
#include <linux/slab.h>
#include "edac_mc.h"
-#define E7XXX_REVISION " Ver: 2.0.0 " __DATE__
+#define E7XXX_REVISION " Ver: 2.0.1 " __DATE__
+#define EDAC_MOD_STR "e7xxx_edac"
#define e7xxx_printk(level, fmt, arg...) \
edac_printk(level, "e7xxx", fmt, ##arg)
#endif /* !CONFIG_EDAC_DEBUG */
-#define edac_xstr(s) edac_str(s)
-#define edac_str(s) #s
-#define EDAC_MOD_STR edac_xstr(KBUILD_BASENAME)
-
#define BIT(x) (1 << (x))
#define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, \
#include <linux/slab.h>
#include "edac_mc.h"
-#define I82860_REVISION " Ver: 2.0.0 " __DATE__
+#define I82860_REVISION " Ver: 2.0.1 " __DATE__
+#define EDAC_MOD_STR "i82860_edac"
#define i82860_printk(level, fmt, arg...) \
edac_printk(level, "i82860", fmt, ##arg)
#include <linux/slab.h>
#include "edac_mc.h"
-#define I82875P_REVISION " Ver: 2.0.0 " __DATE__
+#define I82875P_REVISION " Ver: 2.0.1 " __DATE__
+#define EDAC_MOD_STR "i82875p_edac"
#define i82875p_printk(level, fmt, arg...) \
edac_printk(level, "i82875p", fmt, ##arg)
#include <linux/slab.h>
#include "edac_mc.h"
-#define R82600_REVISION " Ver: 2.0.0 " __DATE__
+#define R82600_REVISION " Ver: 2.0.1 " __DATE__
+#define EDAC_MOD_STR "r82600_edac"
#define r82600_printk(level, fmt, arg...) \
edac_printk(level, "r82600", fmt, ##arg)
}
ide_set_hwifdata(hwif, idev);
+ hwif->atapi_dma = 1;
+
pci_read_config_byte(hwif->pci_dev, 0x50, &conf);
if(conf & 1) {
idev->smart = 1;
config IPATH_CORE
- tristate "PathScale InfiniPath Driver"
+ tristate "QLogic InfiniPath Driver"
depends on 64BIT && PCI_MSI && NET
---help---
- This is a low-level driver for PathScale InfiniPath host channel
+ This is a low-level driver for QLogic InfiniPath host channel
adapters (HCAs) based on the HT-400 and PE-800 chips.
config INFINIBAND_IPATH
- tristate "PathScale InfiniPath Verbs Driver"
+ tristate "QLogic InfiniPath Verbs Driver"
depends on IPATH_CORE && INFINIBAND
---help---
This is a driver that provides InfiniBand verbs support for
- PathScale InfiniPath host channel adapters (HCAs). This
+ QLogic InfiniPath host channel adapters (HCAs). This
allows these devices to be used with both kernel upper level
protocols such as IP-over-InfiniBand as well as with userspace
applications (in conjunction with InfiniBand userspace access).
-EXTRA_CFLAGS += -DIPATH_IDSTR='"PathScale kernel.org driver"' \
+EXTRA_CFLAGS += -DIPATH_IDSTR='"QLogic kernel.org driver"' \
-DIPATH_KERN_TYPE=0
obj-$(CONFIG_IPATH_CORE) += ipath_core.o
/*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* to communicate between kernel and user code.
*/
-/* This is the IEEE-assigned OUI for PathScale, Inc. */
+
+/* This is the IEEE-assigned OUI for QLogic Inc. InfiniPath */
#define IPATH_SRC_OUI_1 0x00
#define IPATH_SRC_OUI_2 0x11
#define IPATH_SRC_OUI_3 0x75
__u64 sps_hwerrs;
/* number of times IB link changed state unexpectedly */
__u64 sps_iblink;
- /* no longer used; left for compatibility */
- __u64 sps_unused3;
+ /* kernel receive interrupts that didn't read intstat */
+ __u64 sps_fastrcvint;
/* number of kernel (port0) packets received */
__u64 sps_port0pkts;
/* number of "ethernet" packets sent by driver */
__u64 sps_ports;
/* list of pkeys (other than default) accepted (0 means not set) */
__u16 sps_pkeys[4];
- /* lids for up to 4 infinipaths, indexed by infinipath # */
- __u16 sps_lid[4];
+ __u16 sps_unused16[4]; /* available; maintaining compatible layout */
/* number of user ports per chip (not IB ports) */
__u32 sps_nports;
/* not our interrupt, or already handled */
* packets if ipath not configured, sma/mad, etc.)
*/
__u64 sps_krdrops;
- /* mlids for up to 4 infinipaths, indexed by infinipath # */
- __u16 sps_mlid[4];
/* pad for future growth */
- __u64 __sps_pad[45];
+ __u64 __sps_pad[46];
};
/*
__u32 spi_rcv_egrchunksize;
/* total size of mmap to cover full rcvegrbuffers */
__u32 spi_rcv_egrbuftotlen;
+ __u32 spi_filler_for_align;
+ /* address of readonly memory copy of the rcvhdrq tail register. */
+ __u64 spi_rcvhdr_tailaddr;
} __attribute__ ((aligned(8)));
/*
* Similarly, this is the kernel version going back to the user. It's
* slightly different, in that we want to tell if the driver was built as
- * part of a PathScale release, or from the driver from OpenIB, kernel.org,
- * or a standard distribution, for support reasons. The high bit is 0 for
- * non-PathScale, and 1 for PathScale-built/supplied.
+ * part of a QLogic release, or from the driver from openfabrics.org,
+ * kernel.org, or a standard distribution, for support reasons.
+ * The high bit is 0 for non-QLogic and 1 for QLogic-built/supplied.
*
* It's returned by the driver to the user code during initialization in the
* spi_sw_version field of ipath_base_info, so the user code can in turn
*/
__u32 spu_rcvhdrsize;
- /*
- * cache line aligned (64 byte) user address to
- * which the rcvhdrtail register will be written by infinipath
- * whenever it changes, so that no chip registers are read in
- * the performance path.
- */
- __u64 spu_rcvhdraddr;
+ __u64 spu_unused; /* kept for compatible layout */
/*
* address of struct base_info to write to
* Data layout in I2C flash (for GUID, etc.)
* All fields are little-endian binary unless otherwise stated
*/
-#define IPATH_FLASH_VERSION 1
+#define IPATH_FLASH_VERSION 2
struct ipath_flash {
/* flash layout version (IPATH_FLASH_VERSION) */
__u8 if_fversion;
__u8 if_csum;
/*
* valid length (in use, protected by if_csum), including
- * if_fversion and if_sum themselves)
+ * if_fversion and if_csum themselves)
*/
__u8 if_length;
/* the GUID, in network order */
__u8 if_guid[8];
/* number of GUIDs to use, starting from if_guid */
__u8 if_numguid;
- /* the board serial number, in ASCII */
+ /* the (last 10 characters of) board serial number, in ASCII */
char if_serial[12];
/* board mfg date (YYYYMMDD ASCII) */
char if_mfgdate[8];
__u8 if_powerhour[2];
/* ASCII free-form comment field */
char if_comment[32];
- /* 78 bytes used, min flash size is 128 bytes */
- __u8 if_future[50];
+ /* Backwards compatible prefix for longer QLogic Serial Numbers */
+ char if_sprefix[4];
+ /* 82 bytes used, min flash size is 128 bytes */
+ __u8 if_future[46];
};
/*
#define INFINIPATH_KPF_INTR 0x1
/* SendPIO per-buffer control */
-#define INFINIPATH_SP_LENGTHP1_MASK 0x3FF
-#define INFINIPATH_SP_LENGTHP1_SHIFT 0
-#define INFINIPATH_SP_INTR 0x80000000
-#define INFINIPATH_SP_TEST 0x40000000
-#define INFINIPATH_SP_TESTEBP 0x20000000
+#define INFINIPATH_SP_TEST 0x40
+#define INFINIPATH_SP_TESTEBP 0x20
/* SendPIOAvail bits */
#define INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT 1
#define INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT 0
+/* infinipath header format */
+struct ipath_header {
+ /*
+ * Version - 4 bits, Port - 4 bits, TID - 10 bits and Offset -
+ * 14 bits before ECO change ~28 Dec 03. After that, Vers 4,
+ * Port 3, TID 11, offset 14.
+ */
+ __le32 ver_port_tid_offset;
+ __le16 chksum;
+ __le16 pkt_flags;
+};
+
+/* infinipath user message header format.
+ * This structure contains the first 4 fields common to all protocols
+ * that employ infinipath.
+ */
+struct ipath_message_header {
+ __be16 lrh[4];
+ __be32 bth[3];
+ /* fields below this point are in host byte order */
+ struct ipath_header iph;
+ __u8 sub_opcode;
+};
+
+/* infinipath ethernet header format */
+struct ether_header {
+ __be16 lrh[4];
+ __be32 bth[3];
+ struct ipath_header iph;
+ __u8 sub_opcode;
+ __u8 cmd;
+ __be16 lid;
+ __u16 mac[3];
+ __u8 frag_num;
+ __u8 seq_num;
+ __le32 len;
+ /* MUST be of word size due to PIO write requirements */
+ __le32 csum;
+ __le16 csum_offset;
+ __le16 flags;
+ __u16 first_2_bytes;
+ __u8 unused[2]; /* currently unused */
+};
+
+
+/* IB - LRH header consts */
+#define IPATH_LRH_GRH 0x0003 /* 1. word of IB LRH - next header: GRH */
+#define IPATH_LRH_BTH 0x0002 /* 1. word of IB LRH - next header: BTH */
+
+/* misc. */
+#define SIZE_OF_CRC 1
+
+#define IPATH_DEFAULT_P_KEY 0xFFFF
+#define IPATH_PERMISSIVE_LID 0xFFFF
+#define IPATH_AETH_CREDIT_SHIFT 24
+#define IPATH_AETH_CREDIT_MASK 0x1F
+#define IPATH_AETH_CREDIT_INVAL 0x1F
+#define IPATH_PSN_MASK 0xFFFFFF
+#define IPATH_MSN_MASK 0xFFFFFF
+#define IPATH_QPN_MASK 0xFFFFFF
+#define IPATH_MULTICAST_LID_BASE 0xC000
+#define IPATH_MULTICAST_QPN 0xFFFFFF
+
+/* Receive Header Queue: receive type (from infinipath) */
+#define RCVHQ_RCV_TYPE_EXPECTED 0
+#define RCVHQ_RCV_TYPE_EAGER 1
+#define RCVHQ_RCV_TYPE_NON_KD 2
+#define RCVHQ_RCV_TYPE_ERROR 3
+
+
+/* sub OpCodes - ith4x */
+#define IPATH_ITH4X_OPCODE_ENCAP 0x81
+#define IPATH_ITH4X_OPCODE_LID_ARP 0x82
+
+#define IPATH_HEADER_QUEUE_WORDS 9
+
+/* functions for extracting fields from rcvhdrq entries for the driver.
+ */
+static inline __u32 ipath_hdrget_err_flags(const __le32 * rbuf)
+{
+ return __le32_to_cpu(rbuf[1]);
+}
+
+static inline __u32 ipath_hdrget_rcv_type(const __le32 * rbuf)
+{
+ return (__le32_to_cpu(rbuf[0]) >> INFINIPATH_RHF_RCVTYPE_SHIFT)
+ & INFINIPATH_RHF_RCVTYPE_MASK;
+}
+
+static inline __u32 ipath_hdrget_length_in_bytes(const __le32 * rbuf)
+{
+ return ((__le32_to_cpu(rbuf[0]) >> INFINIPATH_RHF_LENGTH_SHIFT)
+ & INFINIPATH_RHF_LENGTH_MASK) << 2;
+}
+
+static inline __u32 ipath_hdrget_index(const __le32 * rbuf)
+{
+ return (__le32_to_cpu(rbuf[0]) >> INFINIPATH_RHF_EGRINDEX_SHIFT)
+ & INFINIPATH_RHF_EGRINDEX_MASK;
+}
+
+static inline __u32 ipath_hdrget_ipath_ver(__le32 hdrword)
+{
+ return (__le32_to_cpu(hdrword) >> INFINIPATH_I_VERS_SHIFT)
+ & INFINIPATH_I_VERS_MASK;
+}
+
#endif /* _IPATH_COMMON_H */
/*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
struct ib_ucontext *context,
struct ib_udata *udata)
{
+ struct ipath_ibdev *dev = to_idev(ibdev);
struct ipath_cq *cq;
struct ib_wc *wc;
struct ib_cq *ret;
+ if (entries > ib_ipath_max_cqes) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail;
+ }
+
+ if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
/*
* Need to use vmalloc() if we want to support large #s of
* entries.
ret = &cq->ibcq;
+ dev->n_cqs_allocated++;
+
bail:
return ret;
}
*/
int ipath_destroy_cq(struct ib_cq *ibcq)
{
+ struct ipath_ibdev *dev = to_idev(ibcq->device);
struct ipath_cq *cq = to_icq(ibcq);
tasklet_kill(&cq->comptask);
+ dev->n_cqs_allocated--;
vfree(cq->queue);
kfree(cq);
/*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
/*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
#include <linux/pci.h>
#include <asm/uaccess.h>
-#include "ipath_common.h"
#include "ipath_kernel.h"
-#include "ips_common.h"
#include "ipath_layer.h"
+#include "ipath_common.h"
int ipath_diag_inuse;
static int diag_set_link;
.release = ipath_diag_release
};
-static struct cdev *diag_cdev;
-static struct class_device *diag_class_dev;
-
-int ipath_diag_init(void)
+int ipath_diag_add(struct ipath_devdata *dd)
{
- return ipath_cdev_init(IPATH_DIAG_MINOR, "ipath_diag",
- &diag_file_ops, &diag_cdev, &diag_class_dev);
+ char name[16];
+
+ snprintf(name, sizeof(name), "ipath_diag%d", dd->ipath_unit);
+
+ return ipath_cdev_init(IPATH_DIAG_MINOR_BASE + dd->ipath_unit, name,
+ &diag_file_ops, &dd->diag_cdev,
+ &dd->diag_class_dev);
}
-void ipath_diag_cleanup(void)
+void ipath_diag_remove(struct ipath_devdata *dd)
{
- ipath_cdev_cleanup(&diag_cdev, &diag_class_dev);
+ ipath_cdev_cleanup(&dd->diag_cdev, &dd->diag_class_dev);
}
/**
int ret;
/* not very efficient, but it works for now */
- if (reg_addr < dd->ipath_kregbase ||
- reg_end > dd->ipath_kregend) {
+ if (reg_addr < dd->ipath_kregbase || reg_end > dd->ipath_kregend) {
ret = -EINVAL;
goto bail;
}
goto bail;
}
reg_addr++;
- uaddr++;
+ uaddr += sizeof(u64);
}
ret = 0;
bail:
int ret;
/* not very efficient, but it works for now */
- if (reg_addr < dd->ipath_kregbase ||
- reg_end > dd->ipath_kregend) {
+ if (reg_addr < dd->ipath_kregbase || reg_end > dd->ipath_kregend) {
ret = -EINVAL;
goto bail;
}
writeq(data, reg_addr);
reg_addr++;
- uaddr++;
+ uaddr += sizeof(u64);
}
ret = 0;
bail:
}
reg_addr++;
- uaddr++;
+ uaddr += sizeof(u32);
+
}
ret = 0;
bail:
writel(data, reg_addr);
reg_addr++;
- uaddr++;
+ uaddr += sizeof(u32);
}
ret = 0;
bail:
static int ipath_diag_open(struct inode *in, struct file *fp)
{
+ int unit = iminor(in) - IPATH_DIAG_MINOR_BASE;
struct ipath_devdata *dd;
- int unit = 0; /* XXX this is bogus */
- unsigned long flags;
int ret;
- dd = ipath_lookup(unit);
-
mutex_lock(&ipath_mutex);
- spin_lock_irqsave(&ipath_devs_lock, flags);
if (ipath_diag_inuse) {
ret = -EBUSY;
goto bail;
}
- list_for_each_entry(dd, &ipath_dev_list, ipath_list) {
- /*
- * we need at least one infinipath device to be present
- * (don't use INITTED, because we want to be able to open
- * even if device is in freeze mode, which cleared INITTED).
- * There is a small amount of risk to this, which is why we
- * also verify kregbase is set.
- */
-
- if (!(dd->ipath_flags & IPATH_PRESENT) ||
- !dd->ipath_kregbase)
- continue;
-
- ipath_diag_inuse = 1;
- diag_set_link = 0;
- ret = 0;
+ dd = ipath_lookup(unit);
+
+ if (dd == NULL || !(dd->ipath_flags & IPATH_PRESENT) ||
+ !dd->ipath_kregbase) {
+ ret = -ENODEV;
goto bail;
}
- ret = -ENODEV;
-
-bail:
- spin_unlock_irqrestore(&ipath_devs_lock, flags);
+ fp->private_data = dd;
+ ipath_diag_inuse = 1;
+ diag_set_link = 0;
+ ret = 0;
/* Only expose a way to reset the device if we
make it into diag mode. */
- if (ret == 0)
- ipath_expose_reset(&dd->pcidev->dev);
+ ipath_expose_reset(&dd->pcidev->dev);
+bail:
mutex_unlock(&ipath_mutex);
return ret;
}
-static int ipath_diag_release(struct inode *i, struct file *f)
+static int ipath_diag_release(struct inode *in, struct file *fp)
{
mutex_lock(&ipath_mutex);
ipath_diag_inuse = 0;
+ fp->private_data = NULL;
mutex_unlock(&ipath_mutex);
return 0;
}
static ssize_t ipath_diag_read(struct file *fp, char __user *data,
size_t count, loff_t *off)
{
- int unit = 0; /* XXX provide for reads on other units some day */
- struct ipath_devdata *dd;
+ struct ipath_devdata *dd = fp->private_data;
void __iomem *kreg_base;
ssize_t ret;
- dd = ipath_lookup(unit);
- if (!dd) {
- ret = -ENODEV;
- goto bail;
- }
-
kreg_base = dd->ipath_kregbase;
if (count == 0)
ret = count;
}
-bail:
return ret;
}
static ssize_t ipath_diag_write(struct file *fp, const char __user *data,
size_t count, loff_t *off)
{
- int unit = 0; /* XXX this is bogus */
- struct ipath_devdata *dd;
+ struct ipath_devdata *dd = fp->private_data;
void __iomem *kreg_base;
ssize_t ret;
- dd = ipath_lookup(unit);
- if (!dd) {
- ret = -ENODEV;
- goto bail;
- }
kreg_base = dd->ipath_kregbase;
if (count == 0)
ret = count;
}
-bail:
return ret;
}
/*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
#include <linux/vmalloc.h>
#include "ipath_kernel.h"
-#include "ips_common.h"
#include "ipath_layer.h"
+#include "ipath_common.h"
static void ipath_update_pio_bufs(struct ipath_devdata *);
EXPORT_SYMBOL_GPL(ipath_get_unit_name);
-#define DRIVER_LOAD_MSG "PathScale " IPATH_DRV_NAME " loaded: "
+#define DRIVER_LOAD_MSG "QLogic " IPATH_DRV_NAME " loaded: "
#define PFX IPATH_DRV_NAME ": "
/*
EXPORT_SYMBOL_GPL(ipath_debug);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("PathScale <support@pathscale.com>");
-MODULE_DESCRIPTION("Pathscale InfiniPath driver");
+MODULE_AUTHOR("QLogic <support@pathscale.com>");
+MODULE_DESCRIPTION("QLogic InfiniPath driver");
const char *ipath_ibcstatus_str[] = {
"Disabled",
.id_table = ipath_pci_tbl,
};
-/*
- * This is where port 0's rcvhdrtail register is written back; we also
- * want nothing else sharing the cache line, so make it a cache line
- * in size. Used for all units.
- */
-volatile __le64 *ipath_port0_rcvhdrtail;
-dma_addr_t ipath_port0_rcvhdrtail_dma;
-static int port0_rcvhdrtail_refs;
static inline void read_bars(struct ipath_devdata *dd, struct pci_dev *dev,
u32 *bar0, u32 *bar1)
list_del(&dd->ipath_list);
spin_unlock_irqrestore(&ipath_devs_lock, flags);
}
- dma_free_coherent(&pdev->dev, sizeof(*dd), dd, dd->ipath_dma_addr);
+ vfree(dd);
}
static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
{
unsigned long flags;
struct ipath_devdata *dd;
- dma_addr_t dma_addr;
int ret;
if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
goto bail;
}
- dd = dma_alloc_coherent(&pdev->dev, sizeof(*dd), &dma_addr,
- GFP_KERNEL);
-
+ dd = vmalloc(sizeof(*dd));
if (!dd) {
dd = ERR_PTR(-ENOMEM);
goto bail;
}
-
- dd->ipath_dma_addr = dma_addr;
+ memset(dd, 0, sizeof(*dd));
dd->ipath_unit = -1;
spin_lock_irqsave(&ipath_devs_lock, flags);
return nunits;
}
-static int init_port0_rcvhdrtail(struct pci_dev *pdev)
-{
- int ret;
-
- mutex_lock(&ipath_mutex);
-
- if (!ipath_port0_rcvhdrtail) {
- ipath_port0_rcvhdrtail =
- dma_alloc_coherent(&pdev->dev,
- IPATH_PORT0_RCVHDRTAIL_SIZE,
- &ipath_port0_rcvhdrtail_dma,
- GFP_KERNEL);
-
- if (!ipath_port0_rcvhdrtail) {
- ret = -ENOMEM;
- goto bail;
- }
- }
- port0_rcvhdrtail_refs++;
- ret = 0;
-
-bail:
- mutex_unlock(&ipath_mutex);
-
- return ret;
-}
-
-static void cleanup_port0_rcvhdrtail(struct pci_dev *pdev)
-{
- mutex_lock(&ipath_mutex);
-
- if (!--port0_rcvhdrtail_refs) {
- dma_free_coherent(&pdev->dev, IPATH_PORT0_RCVHDRTAIL_SIZE,
- (void *) ipath_port0_rcvhdrtail,
- ipath_port0_rcvhdrtail_dma);
- ipath_port0_rcvhdrtail = NULL;
- }
-
- mutex_unlock(&ipath_mutex);
-}
-
/*
* These next two routines are placeholders in case we don't have per-arch
* code for controlling write combining. If explicit control of write
u32 bar0 = 0, bar1 = 0;
u8 rev;
- ret = init_port0_rcvhdrtail(pdev);
- if (ret < 0) {
- printk(KERN_ERR IPATH_DRV_NAME
- ": Could not allocate port0_rcvhdrtail: error %d\n",
- -ret);
- goto bail;
- }
-
dd = ipath_alloc_devdata(pdev);
if (IS_ERR(dd)) {
ret = PTR_ERR(dd);
printk(KERN_ERR IPATH_DRV_NAME
": Could not allocate devdata: error %d\n", -ret);
- goto bail_rcvhdrtail;
+ goto bail;
}
ipath_cdbg(VERBOSE, "initializing unit #%u\n", dd->ipath_unit);
*/
ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
if (ret) {
- dev_info(&pdev->dev, "pci_set_dma_mask unit %u "
- "fails: %d\n", dd->ipath_unit, ret);
+ dev_info(&pdev->dev,
+ "Unable to set DMA mask for unit %u: %d\n",
+ dd->ipath_unit, ret);
goto bail_regions;
}
- else
+ else {
ipath_dbg("No 64bit DMA mask, used 32 bit mask\n");
+ ret = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+ if (ret)
+ dev_info(&pdev->dev,
+ "Unable to set DMA consistent mask "
+ "for unit %u: %d\n",
+ dd->ipath_unit, ret);
+
+ }
+ }
+ else {
+ ret = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
+ if (ret)
+ dev_info(&pdev->dev,
+ "Unable to set DMA consistent mask "
+ "for unit %u: %d\n",
+ dd->ipath_unit, ret);
}
pci_set_master(pdev);
ipath_init_pe800_funcs(dd);
break;
default:
- ipath_dev_err(dd, "Found unknown PathScale deviceid 0x%x, "
+ ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, "
"failing\n", ent->device);
return -ENODEV;
}
((void __iomem *)dd->ipath_kregbase + len);
dd->ipath_physaddr = addr; /* used for io_remap, etc. */
/* for user mmap */
- dd->ipath_kregvirt = (u64 __iomem *) phys_to_virt(addr);
- ipath_cdbg(VERBOSE, "mapped io addr %llx to kregbase %p "
- "kregvirt %p\n", addr, dd->ipath_kregbase,
- dd->ipath_kregvirt);
+ ipath_cdbg(VERBOSE, "mapped io addr %llx to kregbase %p\n",
+ addr, dd->ipath_kregbase);
/*
* clear ipath_flags here instead of in ipath_init_chip as it is set
* by ipath_setup_htconfig.
*/
dd->ipath_flags = 0;
+ dd->ipath_lli_counter = 0;
+ dd->ipath_lli_errors = 0;
if (dd->ipath_f_bus(dd, pdev))
ipath_dev_err(dd, "Failed to setup config space; "
ipath_device_create_group(&pdev->dev, dd);
ipathfs_add_device(dd);
ipath_user_add(dd);
+ ipath_diag_add(dd);
ipath_layer_add(dd);
goto bail;
bail_devdata:
ipath_free_devdata(pdev, dd);
-bail_rcvhdrtail:
- cleanup_port0_rcvhdrtail(pdev);
-
bail:
return ret;
}
return;
dd = pci_get_drvdata(pdev);
- ipath_layer_del(dd);
- ipath_user_del(dd);
+ ipath_layer_remove(dd);
+ ipath_diag_remove(dd);
+ ipath_user_remove(dd);
ipathfs_remove_device(dd);
ipath_device_remove_group(&pdev->dev, dd);
ipath_cdbg(VERBOSE, "Releasing pci memory regions, dd %p, "
pci_disable_device(pdev);
ipath_free_devdata(pdev, dd);
- cleanup_port0_rcvhdrtail(pdev);
}
/* general driver use */
u8 pad, *bthbytes;
struct sk_buff *skb, *nskb;
- if (dd->ipath_port0_skbs && hdr->sub_opcode == OPCODE_ENCAP) {
+ if (dd->ipath_port0_skbs &&
+ hdr->sub_opcode == IPATH_ITH4X_OPCODE_ENCAP) {
/*
* Allocate a new sk_buff to replace the one we give
* to the network stack.
/* another ether packet received */
ipath_stats.sps_ether_rpkts++;
}
- else if (hdr->sub_opcode == OPCODE_LID_ARP)
+ else if (hdr->sub_opcode == IPATH_ITH4X_OPCODE_LID_ARP)
__ipath_layer_rcv_lid(dd, hdr);
}
const u32 rsize = dd->ipath_rcvhdrentsize; /* words */
const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize; /* words */
u32 etail = -1, l, hdrqtail;
- struct ips_message_header *hdr;
- u32 eflags, i, etype, tlen, pkttot = 0;
+ struct ipath_message_header *hdr;
+ u32 eflags, i, etype, tlen, pkttot = 0, updegr=0, reloop=0;
static u64 totcalls; /* stats, may eventually remove */
char emsg[128];
if (test_and_set_bit(0, &dd->ipath_rcv_pending))
goto bail;
- if (dd->ipath_port0head ==
- (u32)le64_to_cpu(*dd->ipath_hdrqtailptr))
+ l = dd->ipath_port0head;
+ hdrqtail = (u32) le64_to_cpu(*dd->ipath_hdrqtailptr);
+ if (l == hdrqtail)
goto done;
-gotmore:
- /*
- * read only once at start. If in flood situation, this helps
- * performance slightly. If more arrive while we are processing,
- * we'll come back here and do them
- */
- hdrqtail = (u32)le64_to_cpu(*dd->ipath_hdrqtailptr);
-
- for (i = 0, l = dd->ipath_port0head; l != hdrqtail; i++) {
+reloop:
+ for (i = 0; l != hdrqtail; i++) {
u32 qp;
u8 *bthbytes;
rc = (u64 *) (dd->ipath_pd[0]->port_rcvhdrq + (l << 2));
- hdr = (struct ips_message_header *)&rc[1];
+ hdr = (struct ipath_message_header *)&rc[1];
/*
* could make a network order version of IPATH_KD_QP, and
* do the obvious shift before masking to speed this up.
qp = ntohl(hdr->bth[1]) & 0xffffff;
bthbytes = (u8 *) hdr->bth;
- eflags = ips_get_hdr_err_flags((__le32 *) rc);
- etype = ips_get_rcv_type((__le32 *) rc);
+ eflags = ipath_hdrget_err_flags((__le32 *) rc);
+ etype = ipath_hdrget_rcv_type((__le32 *) rc);
/* total length */
- tlen = ips_get_length_in_bytes((__le32 *) rc);
+ tlen = ipath_hdrget_length_in_bytes((__le32 *) rc);
ebuf = NULL;
if (etype != RCVHQ_RCV_TYPE_EXPECTED) {
/*
* set ebuf (so we try to copy data) unless the
* length requires it.
*/
- etail = ips_get_index((__le32 *) rc);
+ etail = ipath_hdrget_index((__le32 *) rc);
if (tlen > sizeof(*hdr) ||
etype == RCVHQ_RCV_TYPE_NON_KD)
ebuf = ipath_get_egrbuf(dd, etail, 0);
*/
if (etype != RCVHQ_RCV_TYPE_NON_KD && etype !=
- RCVHQ_RCV_TYPE_ERROR && ips_get_ipath_ver(
+ RCVHQ_RCV_TYPE_ERROR && ipath_hdrget_ipath_ver(
hdr->iph.ver_port_tid_offset) !=
IPS_PROTO_VERSION) {
ipath_cdbg(PKT, "Bad InfiniPath protocol version "
ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u "
"tlen=%x opcode=%x egridx=%x: %s\n",
eflags, l, etype, tlen, bthbytes[0],
- ips_get_index((__le32 *) rc), emsg);
+ ipath_hdrget_index((__le32 *) rc), emsg);
+ /* Count local link integrity errors. */
+ if (eflags & (INFINIPATH_RHF_H_ICRCERR |
+ INFINIPATH_RHF_H_VCRCERR)) {
+ u8 n = (dd->ipath_ibcctrl >>
+ INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
+ INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
+
+ if (++dd->ipath_lli_counter > n) {
+ dd->ipath_lli_counter = 0;
+ dd->ipath_lli_errors++;
+ }
+ }
} else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
int ret = __ipath_verbs_rcv(dd, rc + 1,
ebuf, tlen);
ipath_cdbg(VERBOSE,
"received IB packet, "
"not SMA (QP=%x)\n", qp);
+ if (dd->ipath_lli_counter)
+ dd->ipath_lli_counter--;
+
} else if (etype == RCVHQ_RCV_TYPE_EAGER) {
if (qp == IPATH_KD_QP &&
bthbytes[0] == ipath_layer_rcv_opcode &&
l += rsize;
if (l >= maxcnt)
l = 0;
+ if (etype != RCVHQ_RCV_TYPE_EXPECTED)
+ updegr = 1;
/*
- * update for each packet, to help prevent overflows if we
- * have lots of packets.
+ * update head regs on last packet, and every 16 packets.
+ * Reduce bus traffic, while still trying to prevent
+ * rcvhdrq overflows, for when the queue is nearly full
*/
- (void)ipath_write_ureg(dd, ur_rcvhdrhead,
- dd->ipath_rhdrhead_intr_off | l, 0);
- if (etype != RCVHQ_RCV_TYPE_EXPECTED)
- (void)ipath_write_ureg(dd, ur_rcvegrindexhead,
- etail, 0);
+ if (l == hdrqtail || (i && !(i&0xf))) {
+ u64 lval;
+ if (l == hdrqtail) /* PE-800 interrupt only on last */
+ lval = dd->ipath_rhdrhead_intr_off | l;
+ else
+ lval = l;
+ (void)ipath_write_ureg(dd, ur_rcvhdrhead, lval, 0);
+ if (updegr) {
+ (void)ipath_write_ureg(dd, ur_rcvegrindexhead,
+ etail, 0);
+ updegr = 0;
+ }
+ }
+ }
+
+ if (!dd->ipath_rhdrhead_intr_off && !reloop) {
+ /* HT-400 workaround; we can have a race clearing chip
+ * interrupt with another interrupt about to be delivered,
+ * and can clear it before it is delivered on the GPIO
+ * workaround. By doing the extra check here for the
+ * in-memory tail register updating while we were doing
+ * earlier packets, we "almost" guarantee we have covered
+ * that case.
+ */
+ u32 hqtail = (u32)le64_to_cpu(*dd->ipath_hdrqtailptr);
+ if (hqtail != hdrqtail) {
+ hdrqtail = hqtail;
+ reloop = 1; /* loop 1 extra time at most */
+ goto reloop;
+ }
}
pkttot += i;
dd->ipath_port0head = l;
- if (hdrqtail != (u32)le64_to_cpu(*dd->ipath_hdrqtailptr))
- /* more arrived while we handled first batch */
- goto gotmore;
-
if (pkttot > ipath_stats.sps_maxpkts_call)
ipath_stats.sps_maxpkts_call = pkttot;
ipath_stats.sps_port0pkts += pkttot;
* @dd: the infinipath device
* @pd: the port data
*
- * this *must* be physically contiguous memory, and for now,
- * that limits it to what kmalloc can do.
+ * this must be contiguous memory (from an i/o perspective), and must be
+ * DMA'able (which means for some systems, it will go through an IOMMU,
+ * or be forced into a low address range).
*/
int ipath_create_rcvhdrq(struct ipath_devdata *dd,
struct ipath_portdata *pd)
{
- int ret = 0, amt;
+ int ret = 0;
- amt = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
- sizeof(u32), PAGE_SIZE);
if (!pd->port_rcvhdrq) {
- /*
- * not using REPEAT isn't viable; at 128KB, we can easily
- * fail this. The problem with REPEAT is we can block here
- * "forever". There isn't an inbetween, unfortunately. We
- * could reduce the risk by never freeing the rcvhdrq except
- * at unload, but even then, the first time a port is used,
- * we could delay for some time...
- */
+ dma_addr_t phys_hdrqtail;
gfp_t gfp_flags = GFP_USER | __GFP_COMP;
+ int amt = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
+ sizeof(u32), PAGE_SIZE);
pd->port_rcvhdrq = dma_alloc_coherent(
&dd->pcidev->dev, amt, &pd->port_rcvhdrq_phys,
ret = -ENOMEM;
goto bail;
}
+ pd->port_rcvhdrtail_kvaddr = dma_alloc_coherent(
+ &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail, GFP_KERNEL);
+ if (!pd->port_rcvhdrtail_kvaddr) {
+ ipath_dev_err(dd, "attempt to allocate 1 page "
+ "for port %u rcvhdrqtailaddr failed\n",
+ pd->port_port);
+ ret = -ENOMEM;
+ goto bail;
+ }
+ pd->port_rcvhdrqtailaddr_phys = phys_hdrqtail;
pd->port_rcvhdrq_size = amt;
(unsigned long) pd->port_rcvhdrq_phys,
(unsigned long) pd->port_rcvhdrq_size,
pd->port_port);
- } else {
- /*
- * clear for security, sanity, and/or debugging, each
- * time we reuse
- */
- memset(pd->port_rcvhdrq, 0, amt);
+
+ ipath_cdbg(VERBOSE, "port %d hdrtailaddr, %llx physical\n",
+ pd->port_port,
+ (unsigned long long) phys_hdrqtail);
}
+ else
+ ipath_cdbg(VERBOSE, "reuse port %d rcvhdrq @%p %llx phys; "
+ "hdrtailaddr@%p %llx physical\n",
+ pd->port_port, pd->port_rcvhdrq,
+ pd->port_rcvhdrq_phys, pd->port_rcvhdrtail_kvaddr,
+ (unsigned long long)pd->port_rcvhdrqtailaddr_phys);
+
+ /* clear for security and sanity on each use */
+ memset(pd->port_rcvhdrq, 0, pd->port_rcvhdrq_size);
+ memset((void *)pd->port_rcvhdrtail_kvaddr, 0, PAGE_SIZE);
/*
* tell chip each time we init it, even if we are re-using previous
- * memory (we zero it at process close)
+ * memory (we zero the register at process close)
*/
- ipath_cdbg(VERBOSE, "writing port %d rcvhdraddr as %lx\n",
- pd->port_port, (unsigned long) pd->port_rcvhdrq_phys);
+ ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
+ pd->port_port, pd->port_rcvhdrqtailaddr_phys);
ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
pd->port_port, pd->port_rcvhdrq_phys);
[INFINIPATH_IBCC_LINKCMD_ARMED] = "ARMED",
[INFINIPATH_IBCC_LINKCMD_ACTIVE] = "ACTIVE"
};
+ int linkcmd = (which >> INFINIPATH_IBCC_LINKCMD_SHIFT) &
+ INFINIPATH_IBCC_LINKCMD_MASK;
+
ipath_cdbg(SMA, "Trying to move unit %u to %s, current ltstate "
"is %s\n", dd->ipath_unit,
- what[(which >> INFINIPATH_IBCC_LINKCMD_SHIFT) &
- INFINIPATH_IBCC_LINKCMD_MASK],
+ what[linkcmd],
ipath_ibcstatus_str[
(ipath_read_kreg64
(dd, dd->ipath_kregs->kr_ibcstatus) >>
INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
INFINIPATH_IBCS_LINKTRAININGSTATE_MASK]);
+ /* flush all queued sends when going to DOWN or INIT, to be sure that
+ * they don't block SMA and other MAD packets */
+ if (!linkcmd || linkcmd == INFINIPATH_IBCC_LINKCMD_INIT) {
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+ INFINIPATH_S_ABORT);
+ ipath_disarm_piobufs(dd, dd->ipath_lastport_piobuf,
+ (unsigned)(dd->ipath_piobcnt2k +
+ dd->ipath_piobcnt4k) -
+ dd->ipath_lastport_piobuf);
+ }
ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
dd->ipath_ibcctrl | which);
/* disable IBC */
dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
- dd->ipath_control);
+ dd->ipath_control | INFINIPATH_C_FREEZEMODE);
/*
* clear SerdesEnable and turn the leds off; do this here because
/**
* ipath_free_pddata - free a port's allocated data
* @dd: the infinipath device
- * @port: the port
- * @freehdrq: free the port data structure if true
+ * @pd: the portdata structure
*
- * when closing, free up any allocated data for a port, if the
- * reference count goes to zero
- * Note: this also optionally frees the portdata itself!
- * Any changes here have to be matched up with the reinit case
- * of ipath_init_chip(), which calls this routine on reinit after reset.
+ * free up any allocated data for a port
+ * This should not touch anything that would affect a simultaneous
+ * re-allocation of port data, because it is called after ipath_mutex
+ * is released (and can be called from reinit as well).
+ * It should never change any chip state, or global driver state.
+ * (The only exception to global state is freeing the port0 port0_skbs.)
*/
-void ipath_free_pddata(struct ipath_devdata *dd, u32 port, int freehdrq)
+void ipath_free_pddata(struct ipath_devdata *dd, struct ipath_portdata *pd)
{
- struct ipath_portdata *pd = dd->ipath_pd[port];
-
if (!pd)
return;
- if (freehdrq)
- /*
- * only clear and free portdata if we are going to also
- * release the hdrq, otherwise we leak the hdrq on each
- * open/close cycle
- */
- dd->ipath_pd[port] = NULL;
- if (freehdrq && pd->port_rcvhdrq) {
+
+ if (pd->port_rcvhdrq) {
ipath_cdbg(VERBOSE, "free closed port %d rcvhdrq @ %p "
"(size=%lu)\n", pd->port_port, pd->port_rcvhdrq,
(unsigned long) pd->port_rcvhdrq_size);
dma_free_coherent(&dd->pcidev->dev, pd->port_rcvhdrq_size,
pd->port_rcvhdrq, pd->port_rcvhdrq_phys);
pd->port_rcvhdrq = NULL;
+ if (pd->port_rcvhdrtail_kvaddr) {
+ dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
+ (void *)pd->port_rcvhdrtail_kvaddr,
+ pd->port_rcvhdrqtailaddr_phys);
+ pd->port_rcvhdrtail_kvaddr = NULL;
+ }
}
- if (port && pd->port_rcvegrbuf) {
- /* always free this */
- if (pd->port_rcvegrbuf) {
- unsigned e;
-
- for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
- void *base = pd->port_rcvegrbuf[e];
- size_t size = pd->port_rcvegrbuf_size;
-
- ipath_cdbg(VERBOSE, "egrbuf free(%p, %lu), "
- "chunk %u/%u\n", base,
- (unsigned long) size,
- e, pd->port_rcvegrbuf_chunks);
- dma_free_coherent(
- &dd->pcidev->dev, size, base,
- pd->port_rcvegrbuf_phys[e]);
- }
- vfree(pd->port_rcvegrbuf);
- pd->port_rcvegrbuf = NULL;
- vfree(pd->port_rcvegrbuf_phys);
- pd->port_rcvegrbuf_phys = NULL;
+ if (pd->port_port && pd->port_rcvegrbuf) {
+ unsigned e;
+
+ for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
+ void *base = pd->port_rcvegrbuf[e];
+ size_t size = pd->port_rcvegrbuf_size;
+
+ ipath_cdbg(VERBOSE, "egrbuf free(%p, %lu), "
+ "chunk %u/%u\n", base,
+ (unsigned long) size,
+ e, pd->port_rcvegrbuf_chunks);
+ dma_free_coherent(&dd->pcidev->dev, size,
+ base, pd->port_rcvegrbuf_phys[e]);
}
+ vfree(pd->port_rcvegrbuf);
+ pd->port_rcvegrbuf = NULL;
+ vfree(pd->port_rcvegrbuf_phys);
+ pd->port_rcvegrbuf_phys = NULL;
pd->port_rcvegrbuf_chunks = 0;
- } else if (port == 0 && dd->ipath_port0_skbs) {
+ } else if (pd->port_port == 0 && dd->ipath_port0_skbs) {
unsigned e;
struct sk_buff **skbs = dd->ipath_port0_skbs;
dev_kfree_skb(skbs[e]);
vfree(skbs);
}
- if (freehdrq) {
- kfree(pd->port_tid_pg_list);
- kfree(pd);
- }
+ kfree(pd->port_tid_pg_list);
+ kfree(pd);
}
static int __init infinipath_init(void)
* re-init
*/
dd->ipath_kregbase = NULL;
- dd->ipath_kregvirt = NULL;
dd->ipath_uregbase = 0;
dd->ipath_sregbase = 0;
dd->ipath_cregbase = 0;
dd->ipath_pioavailregs_phys);
dd->ipath_pioavailregs_dma = NULL;
}
+ if (dd->ipath_dummy_hdrq) {
+ dma_free_coherent(&dd->pcidev->dev,
+ dd->ipath_pd[0]->port_rcvhdrq_size,
+ dd->ipath_dummy_hdrq, dd->ipath_dummy_hdrq_phys);
+ dd->ipath_dummy_hdrq = NULL;
+ }
if (dd->ipath_pageshadow) {
struct page **tmpp = dd->ipath_pageshadow;
/*
* free any resources still in use (usually just kernel ports)
- * at unload
+ * at unload; we do for portcnt, not cfgports, because cfgports
+ * could have changed while we were loaded.
*/
- for (port = 0; port < dd->ipath_cfgports; port++)
- ipath_free_pddata(dd, port, 1);
+ for (port = 0; port < dd->ipath_portcnt; port++) {
+ struct ipath_portdata *pd = dd->ipath_pd[port];
+ dd->ipath_pd[port] = NULL;
+ ipath_free_pddata(dd, pd);
+ }
kfree(dd->ipath_pd);
/*
* debuggability, in case some cleanup path tries to use it
/*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
guid = *(__be64 *) ifp->if_guid;
dd->ipath_guid = guid;
dd->ipath_nguid = ifp->if_numguid;
- memcpy(dd->ipath_serial, ifp->if_serial,
- sizeof(ifp->if_serial));
+ /*
+ * Things are slightly complicated by the desire to transparently
+ * support both the Pathscale 10-digit serial number and the QLogic
+ * 13-character version.
+ */
+ if ((ifp->if_fversion > 1) && ifp->if_sprefix[0]
+ && ((u8 *)ifp->if_sprefix)[0] != 0xFF) {
+ /* This board has a Serial-prefix, which is stored
+ * elsewhere for backward-compatibility.
+ */
+ char *snp = dd->ipath_serial;
+ int len;
+ memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix);
+ snp[sizeof ifp->if_sprefix] = '\0';
+ len = strlen(snp);
+ snp += len;
+ len = (sizeof dd->ipath_serial) - len;
+ if (len > sizeof ifp->if_serial) {
+ len = sizeof ifp->if_serial;
+ }
+ memcpy(snp, ifp->if_serial, len);
+ } else
+ memcpy(dd->ipath_serial, ifp->if_serial,
+ sizeof ifp->if_serial);
+
ipath_cdbg(VERBOSE, "Initted GUID to %llx from eeprom\n",
(unsigned long long) be64_to_cpu(dd->ipath_guid));
/*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
#include <asm/pgtable.h>
#include "ipath_kernel.h"
-#include "ips_common.h"
#include "ipath_layer.h"
+#include "ipath_common.h"
static int ipath_open(struct inode *, struct file *);
static int ipath_close(struct inode *, struct file *);
* on to yet another method of dealing with this
*/
kinfo->spi_rcvhdr_base = (u64) pd->port_rcvhdrq_phys;
+ kinfo->spi_rcvhdr_tailaddr = (u64)pd->port_rcvhdrqtailaddr_phys;
kinfo->spi_rcv_egrbufs = (u64) pd->port_rcvegr_phys;
kinfo->spi_pioavailaddr = (u64) dd->ipath_pioavailregs_phys;
kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
u16 lkey = key & 0x7FFF;
int ret;
- if (lkey == (IPS_DEFAULT_P_KEY & 0x7FFF)) {
+ if (lkey == (IPATH_DEFAULT_P_KEY & 0x7FFF)) {
/* nothing to do; this key always valid */
ret = 0;
goto bail;
unsigned e, egrcnt, alloced, egrperchunk, chunk, egrsize, egroff;
size_t size;
int ret;
+ gfp_t gfp_flags;
+
+ /*
+ * GFP_USER, but without GFP_FS, so buffer cache can be
+ * coalesced (we hope); otherwise, even at order 4,
+ * heavy filesystem activity makes these fail, and we can
+ * use compound pages.
+ */
+ gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP;
egrcnt = dd->ipath_rcvegrcnt;
/* TID number offset for this port */
* memory pressure (creating large files and then copying them over
* NFS while doing lots of MPI jobs), we hit some allocation
* failures, even though we can sleep... (2.6.10) Still get
- * failures at 64K. 32K is the lowest we can go without waiting
- * more memory again. It seems likely that the coalescing in
- * free_pages, etc. still has issues (as it has had previously
- * during 2.6.x development).
+ * failures at 64K. 32K is the lowest we can go without wasting
+ * additional memory.
*/
size = 0x8000;
alloced = ALIGN(egrsize * egrcnt, size);
goto bail_rcvegrbuf;
}
for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
- /*
- * GFP_USER, but without GFP_FS, so buffer cache can be
- * coalesced (we hope); otherwise, even at order 4,
- * heavy filesystem activity makes these fail
- */
- gfp_t gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP;
pd->port_rcvegrbuf[e] = dma_alloc_coherent(
&dd->pcidev->dev, size, &pd->port_rcvegrbuf_phys[e],
bail_rcvegrbuf_phys:
for (e = 0; e < pd->port_rcvegrbuf_chunks &&
- pd->port_rcvegrbuf[e]; e++)
+ pd->port_rcvegrbuf[e]; e++) {
dma_free_coherent(&dd->pcidev->dev, size,
pd->port_rcvegrbuf[e],
pd->port_rcvegrbuf_phys[e]);
+ }
vfree(pd->port_rcvegrbuf_phys);
pd->port_rcvegrbuf_phys = NULL;
bail_rcvegrbuf:
{
int ret = 0;
struct ipath_devdata *dd = pd->port_dd;
- u64 physaddr, uaddr, off, atmp;
- struct page *pagep;
u32 head32;
- u64 head;
/* for now, if major version is different, bail */
if ((uinfo->spu_userversion >> 16) != IPATH_USER_SWMAJOR) {
/* for now we do nothing with rcvhdrcnt: uinfo->spu_rcvhdrcnt */
- /* set up for the rcvhdr Q tail register writeback to user memory */
- if (!uinfo->spu_rcvhdraddr ||
- !access_ok(VERIFY_WRITE, (u64 __user *) (unsigned long)
- uinfo->spu_rcvhdraddr, sizeof(u64))) {
- ipath_dbg("Port %d rcvhdrtail addr %llx not valid\n",
- pd->port_port,
- (unsigned long long) uinfo->spu_rcvhdraddr);
- ret = -EINVAL;
- goto done;
- }
-
- off = offset_in_page(uinfo->spu_rcvhdraddr);
- uaddr = PAGE_MASK & (unsigned long) uinfo->spu_rcvhdraddr;
- ret = ipath_get_user_pages_nocopy(uaddr, &pagep);
- if (ret) {
- dev_info(&dd->pcidev->dev, "Failed to lookup and lock "
- "address %llx for rcvhdrtail: errno %d\n",
- (unsigned long long) uinfo->spu_rcvhdraddr, -ret);
- goto done;
- }
- ipath_stats.sps_pagelocks++;
- pd->port_rcvhdrtail_uaddr = uaddr;
- pd->port_rcvhdrtail_pagep = pagep;
- pd->port_rcvhdrtail_kvaddr =
- page_address(pagep);
- pd->port_rcvhdrtail_kvaddr += off;
- physaddr = page_to_phys(pagep) + off;
- ipath_cdbg(VERBOSE, "port %d user addr %llx hdrtailaddr, %llx "
- "physical (off=%llx)\n",
- pd->port_port,
- (unsigned long long) uinfo->spu_rcvhdraddr,
- (unsigned long long) physaddr, (unsigned long long) off);
- ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
- pd->port_port, physaddr);
- atmp = ipath_read_kreg64_port(dd,
- dd->ipath_kregs->kr_rcvhdrtailaddr,
- pd->port_port);
- if (physaddr != atmp) {
- ipath_dev_err(dd,
- "Catastrophic software error, "
- "RcvHdrTailAddr%u written as %llx, "
- "read back as %llx\n", pd->port_port,
- (unsigned long long) physaddr,
- (unsigned long long) atmp);
- ret = -EINVAL;
- goto done;
- }
-
/* for right now, kernel piobufs are at end, so port 1 is at 0 */
pd->port_piobufs = dd->ipath_piobufbase +
dd->ipath_pbufsport * (pd->port_port -
ret = ipath_create_user_egr(pd);
if (ret)
goto done;
- /* enable receives now */
- /* atomically set enable bit for this port */
- set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port,
- &dd->ipath_rcvctrl);
/*
- * set the head registers for this port to the current values
+ * set the eager head register for this port to the current values
* of the tail pointers, since we don't know if they were
* updated on last use of the port.
*/
- head32 = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
- head = (u64) head32;
- ipath_write_ureg(dd, ur_rcvhdrhead, head, pd->port_port);
head32 = ipath_read_ureg32(dd, ur_rcvegrindextail, pd->port_port);
ipath_write_ureg(dd, ur_rcvegrindexhead, head32, pd->port_port);
dd->ipath_lastegrheads[pd->port_port] = -1;
dd->ipath_lastrcvhdrqtails[pd->port_port] = -1;
- ipath_cdbg(VERBOSE, "Wrote port%d head %llx, egrhead %x from "
- "tail regs\n", pd->port_port,
- (unsigned long long) head, head32);
+ ipath_cdbg(VERBOSE, "Wrote port%d egrhead %x from tail regs\n",
+ pd->port_port, head32);
pd->port_tidcursor = 0; /* start at beginning after open */
/*
* now enable the port; the tail registers will be written to memory
* transition from 0 to 1, so clear it first, then set it as part of
* enabling the port. This will (very briefly) affect any other
* open ports, but it shouldn't be long enough to be an issue.
+ * We explictly set the in-memory copy to 0 beforehand, so we don't
+ * have to wait to be sure the DMA update has happened.
*/
+ *pd->port_rcvhdrtail_kvaddr = 0ULL;
+ set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port,
+ &dd->ipath_rcvctrl);
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
dd->ipath_rcvctrl & ~INFINIPATH_R_TAILUPD);
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
dd->ipath_rcvctrl);
-
done:
return ret;
}
+
+/* common code for the mappings on dma_alloc_coherent mem */
+static int ipath_mmap_mem(struct vm_area_struct *vma,
+ struct ipath_portdata *pd, unsigned len,
+ int write_ok, dma_addr_t addr, char *what)
+{
+ struct ipath_devdata *dd = pd->port_dd;
+ unsigned pfn = (unsigned long)addr >> PAGE_SHIFT;
+ int ret;
+
+ if ((vma->vm_end - vma->vm_start) > len) {
+ dev_info(&dd->pcidev->dev,
+ "FAIL on %s: len %lx > %x\n", what,
+ vma->vm_end - vma->vm_start, len);
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ if (!write_ok) {
+ if (vma->vm_flags & VM_WRITE) {
+ dev_info(&dd->pcidev->dev,
+ "%s must be mapped readonly\n", what);
+ ret = -EPERM;
+ goto bail;
+ }
+
+ /* don't allow them to later change with mprotect */
+ vma->vm_flags &= ~VM_MAYWRITE;
+ }
+
+ ret = remap_pfn_range(vma, vma->vm_start, pfn,
+ len, vma->vm_page_prot);
+ if (ret)
+ dev_info(&dd->pcidev->dev,
+ "%s port%u mmap of %lx, %x bytes r%c failed: %d\n",
+ what, pd->port_port, (unsigned long)addr, len,
+ write_ok?'w':'o', ret);
+ else
+ ipath_cdbg(VERBOSE, "%s port%u mmaped %lx, %x bytes r%c\n",
+ what, pd->port_port, (unsigned long)addr, len,
+ write_ok?'w':'o');
+bail:
+ return ret;
+}
+
static int mmap_ureg(struct vm_area_struct *vma, struct ipath_devdata *dd,
u64 ureg)
{
unsigned long phys;
int ret;
- /* it's the real hardware, so io_remap works */
-
+ /*
+ * This is real hardware, so use io_remap. This is the mechanism
+ * for the user process to update the head registers for their port
+ * in the chip.
+ */
if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
dev_info(&dd->pcidev->dev, "FAIL mmap userreg: reqlen "
"%lx > PAGE\n", vma->vm_end - vma->vm_start);
int ret;
/*
- * When we map the PIO buffers, we want to map them as writeonly, no
- * read possible.
+ * When we map the PIO buffers in the chip, we want to map them as
+ * writeonly, no read possible. This prevents access to previous
+ * process data, and catches users who might try to read the i/o
+ * space due to a bug.
*/
-
if ((vma->vm_end - vma->vm_start) >
(dd->ipath_pbufsport * dd->ipath_palign)) {
dev_info(&dd->pcidev->dev, "FAIL mmap piobufs: "
}
phys = dd->ipath_physaddr + pd->port_piobufs;
+
/*
- * Do *NOT* mark this as non-cached (PWT bit), or we don't get the
+ * Don't mark this as non-cached, or we don't get the
* write combining behavior we want on the PIO buffers!
- * vma->vm_page_prot =
- * pgprot_noncached(vma->vm_page_prot);
*/
if (vma->vm_flags & VM_READ) {
}
/* don't allow them to later change to readable with mprotect */
-
- vma->vm_flags &= ~VM_MAYWRITE;
+ vma->vm_flags &= ~VM_MAYREAD;
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
dma_addr_t *phys;
int ret;
- if (!pd->port_rcvegrbuf) {
- ret = -EFAULT;
- goto bail;
- }
-
size = pd->port_rcvegrbuf_size;
total_size = pd->port_rcvegrbuf_chunks * size;
if ((vma->vm_end - vma->vm_start) > total_size) {
ret = -EPERM;
goto bail;
}
+ /* don't allow them to later change to writeable with mprotect */
+ vma->vm_flags &= ~VM_MAYWRITE;
start = vma->vm_start;
phys = pd->port_rcvegrbuf_phys;
- /* don't allow them to later change to writeable with mprotect */
- vma->vm_flags &= ~VM_MAYWRITE;
-
for (i = 0; i < pd->port_rcvegrbuf_chunks; i++, start += size) {
ret = remap_pfn_range(vma, start, phys[i] >> PAGE_SHIFT,
size, vma->vm_page_prot);
return ret;
}
-static int mmap_rcvhdrq(struct vm_area_struct *vma,
- struct ipath_portdata *pd)
-{
- struct ipath_devdata *dd = pd->port_dd;
- size_t total_size;
- int ret;
-
- /*
- * kmalloc'ed memory, physically contiguous; this is from
- * spi_rcvhdr_base; we allow user to map read-write so they can
- * write hdrq entries to allow protocol code to directly poll
- * whether a hdrq entry has been written.
- */
- total_size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
- sizeof(u32), PAGE_SIZE);
- if ((vma->vm_end - vma->vm_start) > total_size) {
- dev_info(&dd->pcidev->dev,
- "FAIL on rcvhdrq: reqlen %lx > actual %lx\n",
- vma->vm_end - vma->vm_start,
- (unsigned long) total_size);
- ret = -EFAULT;
- goto bail;
- }
-
- ret = remap_pfn_range(vma, vma->vm_start,
- pd->port_rcvhdrq_phys >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
-bail:
- return ret;
-}
-
-static int mmap_pioavailregs(struct vm_area_struct *vma,
- struct ipath_portdata *pd)
-{
- struct ipath_devdata *dd = pd->port_dd;
- int ret;
-
- /*
- * when we map the PIO bufferavail registers, we want to map them as
- * readonly, no write possible.
- *
- * kmalloc'ed memory, physically contiguous, one page only, readonly
- */
-
- if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
- dev_info(&dd->pcidev->dev, "FAIL on pioavailregs_dma: "
- "reqlen %lx > actual %lx\n",
- vma->vm_end - vma->vm_start,
- (unsigned long) PAGE_SIZE);
- ret = -EFAULT;
- goto bail;
- }
-
- if (vma->vm_flags & VM_WRITE) {
- dev_info(&dd->pcidev->dev,
- "Can't map pioavailregs as writable (flags=%lx)\n",
- vma->vm_flags);
- ret = -EPERM;
- goto bail;
- }
-
- /* don't allow them to later change with mprotect */
- vma->vm_flags &= ~VM_MAYWRITE;
-
- ret = remap_pfn_range(vma, vma->vm_start,
- dd->ipath_pioavailregs_phys >> PAGE_SHIFT,
- PAGE_SIZE, vma->vm_page_prot);
-bail:
- return ret;
-}
-
/**
* ipath_mmap - mmap various structures into user space
* @fp: the file pointer
pd = port_fp(fp);
dd = pd->port_dd;
+
/*
* This is the ipath_do_user_init() code, mapping the shared buffers
* into the user process. The address referred to by vm_pgoff is the
pgaddr = vma->vm_pgoff << PAGE_SHIFT;
/*
- * note that ureg does *NOT* have the kregvirt as part of it, to be
- * sure that for 32 bit programs, we don't end up trying to map a >
- * 44 address. Has to match ipath_get_base_info() code that sets
- * __spi_uregbase
+ * Must fit in 40 bits for our hardware; some checked elsewhere,
+ * but we'll be paranoid. Check for 0 is mostly in case one of the
+ * allocations failed, but user called mmap anyway. We want to catch
+ * that before it can match.
*/
+ if (!pgaddr || pgaddr >= (1ULL<<40)) {
+ ipath_dev_err(dd, "Bad phys addr %llx, start %lx, end %lx\n",
+ (unsigned long long)pgaddr, vma->vm_start, vma->vm_end);
+ return -EINVAL;
+ }
+ /* just the offset of the port user registers, not physical addr */
ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port;
ipath_cdbg(MM, "ushare: pgaddr %llx vm_start=%lx, vmlen %lx\n",
(unsigned long long) pgaddr, vma->vm_start,
vma->vm_end - vma->vm_start);
- if (pgaddr == ureg)
+ if (vma->vm_start & (PAGE_SIZE-1)) {
+ ipath_dev_err(dd,
+ "vm_start not aligned: %lx, end=%lx phys %lx\n",
+ vma->vm_start, vma->vm_end, (unsigned long)pgaddr);
+ ret = -EINVAL;
+ }
+ else if (pgaddr == ureg)
ret = mmap_ureg(vma, dd, ureg);
else if (pgaddr == pd->port_piobufs)
ret = mmap_piobufs(vma, dd, pd);
else if (pgaddr == (u64) pd->port_rcvegr_phys)
ret = mmap_rcvegrbufs(vma, pd);
- else if (pgaddr == (u64) pd->port_rcvhdrq_phys)
- ret = mmap_rcvhdrq(vma, pd);
+ else if (pgaddr == (u64) pd->port_rcvhdrq_phys) {
+ /*
+ * The rcvhdrq itself; readonly except on HT-400 (so have
+ * to allow writable mapping), multiple pages, contiguous
+ * from an i/o perspective.
+ */
+ unsigned total_size =
+ ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize
+ * sizeof(u32), PAGE_SIZE);
+ ret = ipath_mmap_mem(vma, pd, total_size, 1,
+ pd->port_rcvhdrq_phys,
+ "rcvhdrq");
+ }
+ else if (pgaddr == (u64)pd->port_rcvhdrqtailaddr_phys)
+ /* in-memory copy of rcvhdrq tail register */
+ ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0,
+ pd->port_rcvhdrqtailaddr_phys,
+ "rcvhdrq tail");
else if (pgaddr == dd->ipath_pioavailregs_phys)
- ret = mmap_pioavailregs(vma, pd);
+ /* in-memory copy of pioavail registers */
+ ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0,
+ dd->ipath_pioavailregs_phys,
+ "pioavail registers");
else
ret = -EINVAL;
static int ipath_open(struct inode *in, struct file *fp)
{
- int ret, minor;
+ int ret, user_minor;
mutex_lock(&ipath_mutex);
- minor = iminor(in);
+ user_minor = iminor(in) - IPATH_USER_MINOR_BASE;
ipath_cdbg(VERBOSE, "open on dev %lx (minor %d)\n",
- (long)in->i_rdev, minor);
+ (long)in->i_rdev, user_minor);
- if (minor)
- ret = find_free_port(minor - 1, fp);
+ if (user_minor)
+ ret = find_free_port(user_minor - 1, fp);
else
ret = find_best_unit(fp);
}
if (dd->ipath_kregbase) {
- if (pd->port_rcvhdrtail_uaddr) {
- pd->port_rcvhdrtail_uaddr = 0;
- pd->port_rcvhdrtail_kvaddr = NULL;
- ipath_release_user_pages_on_close(
- &pd->port_rcvhdrtail_pagep, 1);
- pd->port_rcvhdrtail_pagep = NULL;
- ipath_stats.sps_pageunlocks++;
- }
- ipath_write_kreg_port(
- dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
- port, 0ULL);
- ipath_write_kreg_port(
- dd, dd->ipath_kregs->kr_rcvhdraddr,
- pd->port_port, 0);
+ int i;
+ /* atomically clear receive enable port. */
+ clear_bit(INFINIPATH_R_PORTENABLE_SHIFT + port,
+ &dd->ipath_rcvctrl);
+ ipath_write_kreg( dd, dd->ipath_kregs->kr_rcvctrl,
+ dd->ipath_rcvctrl);
+ /* and read back from chip to be sure that nothing
+ * else is in flight when we do the rest */
+ (void)ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
/* clean up the pkeys for this port user */
ipath_clean_part_key(pd, dd);
- if (port < dd->ipath_cfgports) {
- int i = dd->ipath_pbufsport * (port - 1);
- ipath_disarm_piobufs(dd, i, dd->ipath_pbufsport);
- /* atomically clear receive enable port. */
- clear_bit(INFINIPATH_R_PORTENABLE_SHIFT + port,
- &dd->ipath_rcvctrl);
- ipath_write_kreg(
- dd,
- dd->ipath_kregs->kr_rcvctrl,
- dd->ipath_rcvctrl);
-
- if (dd->ipath_pageshadow)
- unlock_expected_tids(pd);
- ipath_stats.sps_ports--;
- ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n",
- pd->port_comm, pd->port_pid,
- dd->ipath_unit, port);
- }
+ /*
+ * be paranoid, and never write 0's to these, just use an
+ * unused part of the port 0 tail page. Of course,
+ * rcvhdraddr points to a large chunk of memory, so this
+ * could still trash things, but at least it won't trash
+ * page 0, and by disabling the port, it should stop "soon",
+ * even if a packet or two is in already in flight after we
+ * disabled the port.
+ */
+ ipath_write_kreg_port(dd,
+ dd->ipath_kregs->kr_rcvhdrtailaddr, port,
+ dd->ipath_dummy_hdrq_phys);
+ ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
+ pd->port_port, dd->ipath_dummy_hdrq_phys);
+
+ i = dd->ipath_pbufsport * (port - 1);
+ ipath_disarm_piobufs(dd, i, dd->ipath_pbufsport);
+
+ if (dd->ipath_pageshadow)
+ unlock_expected_tids(pd);
+ ipath_stats.sps_ports--;
+ ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n",
+ pd->port_comm, pd->port_pid,
+ dd->ipath_unit, port);
+
+ dd->ipath_f_clear_tids(dd, pd->port_port);
}
pd->port_cnt = 0;
pd->port_pid = 0;
- dd->ipath_f_clear_tids(dd, pd->port_port);
-
- ipath_free_pddata(dd, pd->port_port, 0);
-
+ dd->ipath_pd[pd->port_port] = NULL; /* before releasing mutex */
mutex_unlock(&ipath_mutex);
+ ipath_free_pddata(dd, pd); /* after releasing the mutex */
return ret;
}
"error %d\n", -ret);
goto bail;
}
- ret = ipath_diag_init();
- if (ret < 0) {
- ipath_dev_err(dd, "Unable to set up diag support: "
- "error %d\n", -ret);
- goto bail_sma;
- }
-
ret = init_cdev(0, "ipath", &ipath_file_ops, &wildcard_cdev,
&wildcard_class_dev);
if (ret < 0) {
ipath_dev_err(dd, "Could not create wildcard "
"minor: error %d\n", -ret);
- goto bail_diag;
+ goto bail_sma;
}
atomic_set(&user_setup, 1);
snprintf(name, sizeof(name), "ipath%d", dd->ipath_unit);
ret = init_cdev(dd->ipath_unit + 1, name, &ipath_file_ops,
- &dd->cdev, &dd->class_dev);
+ &dd->user_cdev, &dd->user_class_dev);
if (ret < 0)
ipath_dev_err(dd, "Could not create user minor %d, %s\n",
dd->ipath_unit + 1, name);
goto bail;
-bail_diag:
- ipath_diag_cleanup();
bail_sma:
user_cleanup();
bail:
return ret;
}
-void ipath_user_del(struct ipath_devdata *dd)
+void ipath_user_remove(struct ipath_devdata *dd)
{
- cleanup_cdev(&dd->cdev, &dd->class_dev);
+ cleanup_cdev(&dd->user_cdev, &dd->user_class_dev);
if (atomic_dec_return(&user_count) == 0) {
if (atomic_read(&user_setup) == 0)
goto bail;
cleanup_cdev(&wildcard_cdev, &wildcard_class_dev);
- ipath_diag_cleanup();
user_cleanup();
atomic_set(&user_setup, 0);
bail:
return;
}
+
/*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
* Copyright (c) 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
/*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
dd->ipath_f_reset = ipath_setup_ht_reset;
dd->ipath_f_get_boardname = ipath_ht_boardname;
dd->ipath_f_init_hwerrors = ipath_ht_init_hwerrors;
- dd->ipath_f_init_hwerrors = ipath_ht_init_hwerrors;
dd->ipath_f_early_init = ipath_ht_early_init;
dd->ipath_f_handle_hwerrors = ipath_ht_handle_hwerrors;
dd->ipath_f_quiet_serdes = ipath_ht_quiet_serdes;
/*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
#include <linux/vmalloc.h>
#include "ipath_kernel.h"
-#include "ips_common.h"
+#include "ipath_common.h"
/*
* min buffers we want to have per port, after driver
"eager TID %u\n", e);
while (e != 0)
dev_kfree_skb(skbs[--e]);
+ vfree(skbs);
ret = -ENOMEM;
goto bail;
}
pd->port_port = 0;
pd->port_cnt = 1;
/* The port 0 pkey table is used by the layer interface. */
- pd->port_pkeys[0] = IPS_DEFAULT_P_KEY;
+ pd->port_pkeys[0] = IPATH_DEFAULT_P_KEY;
dd->ipath_rcvtidcnt =
ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt);
dd->ipath_rcvtidbase =
/* and its length */
dd->ipath_freezelen = L1_CACHE_BYTES - sizeof(dd->ipath_statusp[0]);
- if (dd->ipath_unit * 64 > (IPATH_PORT0_RCVHDRTAIL_SIZE - 64)) {
- ipath_dev_err(dd, "unit %u too large for port 0 "
- "rcvhdrtail buffer size\n", dd->ipath_unit);
- ret = -ENODEV;
- }
- else
- ret = 0;
+ ret = 0;
- /* so we can get current tail in ipath_kreceive(), per chip */
- dd->ipath_hdrqtailptr = &ipath_port0_rcvhdrtail[
- dd->ipath_unit * (64 / sizeof(*ipath_port0_rcvhdrtail))];
done:
return ret;
}
{
int ret = 0, i;
u32 val32, kpiobufs;
- u64 val, atmp;
+ u64 val;
struct ipath_portdata *pd = NULL; /* keep gcc4 happy */
+ gfp_t gfp_flags = GFP_USER | __GFP_COMP;
ret = init_housekeeping(dd, &pd, reinit);
if (ret)
goto done;
}
- val = ipath_port0_rcvhdrtail_dma + dd->ipath_unit * 64;
-
- /* verify that the alignment requirement was met */
- ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
- 0, val);
- atmp = ipath_read_kreg64_port(
- dd, dd->ipath_kregs->kr_rcvhdrtailaddr, 0);
- if (val != atmp) {
- ipath_dev_err(dd, "Catastrophic software error, "
- "RcvHdrTailAddr0 written as %llx, "
- "read back as %llx from %x\n",
- (unsigned long long) val,
- (unsigned long long) atmp,
- dd->ipath_kregs->kr_rcvhdrtailaddr);
- ret = -EINVAL;
- goto done;
- }
-
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvbthqp, IPATH_KD_QP);
/*
/* clear any interrups up to this point (ints still not enabled) */
ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
- ipath_stats.sps_lid[dd->ipath_unit] = dd->ipath_lid;
-
/*
* Set up the port 0 (kernel) rcvhdr q and egr TIDs. If doing
* re-init, the simplest way to handle this is to free
* existing, and re-allocate.
*/
- if (reinit)
- ipath_free_pddata(dd, 0, 0);
+ if (reinit) {
+ struct ipath_portdata *pd = dd->ipath_pd[0];
+ dd->ipath_pd[0] = NULL;
+ ipath_free_pddata(dd, pd);
+ }
dd->ipath_f_tidtemplate(dd);
ret = ipath_create_rcvhdrq(dd, pd);
- if (!ret)
+ if (!ret) {
+ dd->ipath_hdrqtailptr =
+ (volatile __le64 *)pd->port_rcvhdrtail_kvaddr;
ret = create_port0_egr(dd);
+ }
if (ret)
ipath_dev_err(dd, "failed to allocate port 0 (kernel) "
"rcvhdrq and/or egr bufs\n");
else
enable_chip(dd, pd, reinit);
+
+ if (!ret && !reinit) {
+ /* used when we close a port, for DMA already in flight at close */
+ dd->ipath_dummy_hdrq = dma_alloc_coherent(
+ &dd->pcidev->dev, pd->port_rcvhdrq_size,
+ &dd->ipath_dummy_hdrq_phys,
+ gfp_flags);
+ if (!dd->ipath_dummy_hdrq ) {
+ dev_info(&dd->pcidev->dev,
+ "Couldn't allocate 0x%lx bytes for dummy hdrq\n",
+ pd->port_rcvhdrq_size);
+ /* fallback to just 0'ing */
+ dd->ipath_dummy_hdrq_phys = 0UL;
+ }
+ }
+
/*
* cause retrigger of pending interrupts ignored during init,
* even if we had errors
/*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
#include <linux/pci.h>
#include "ipath_kernel.h"
-#include "ips_common.h"
#include "ipath_layer.h"
+#include "ipath_common.h"
+/* These are all rcv-related errors which we want to count for stats */
#define E_SUM_PKTERRS \
(INFINIPATH_E_RHDRLEN | INFINIPATH_E_RBADTID | \
INFINIPATH_E_RBADVERSION | INFINIPATH_E_RHDR | \
INFINIPATH_E_RFORMATERR | INFINIPATH_E_RUNSUPVL | \
INFINIPATH_E_RUNEXPCHAR | INFINIPATH_E_REBP)
+/* These are all send-related errors which we want to count for stats */
#define E_SUM_ERRS \
(INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM | \
INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \
INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SPKTLEN | \
INFINIPATH_E_INVALIDADDR)
+/*
+ * these are errors that can occur when the link changes state while
+ * a packet is being sent or received. This doesn't cover things
+ * like EBP or VCRC that can be the result of a sending having the
+ * link change state, so we receive a "known bad" packet.
+ */
+#define E_SUM_LINK_PKTERRS \
+ (INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \
+ INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SPKTLEN | \
+ INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RMINPKTLEN | \
+ INFINIPATH_E_RUNEXPCHAR)
+
static u64 handle_e_sum_errs(struct ipath_devdata *dd, ipath_err_t errs)
{
unsigned long sbuf[4];
if (ipath_debug & __IPATH_PKTDBG)
printk("\n");
}
- if ((errs & (INFINIPATH_E_SDROPPEDDATAPKT |
- INFINIPATH_E_SDROPPEDSMPPKT |
- INFINIPATH_E_SMINPKTLEN)) &&
+ if ((errs & E_SUM_LINK_PKTERRS) &&
!(dd->ipath_flags & IPATH_LINKACTIVE)) {
/*
* This can happen when SMA is trying to bring the link
* valid. We don't want to confuse people, so we just
* don't print them, except at debug
*/
- ipath_dbg("Ignoring pktsend errors %llx, because not "
- "yet active\n", (unsigned long long) errs);
- ignore_this_time = INFINIPATH_E_SDROPPEDDATAPKT |
- INFINIPATH_E_SDROPPEDSMPPKT |
- INFINIPATH_E_SMINPKTLEN;
+ ipath_dbg("Ignoring packet errors %llx, because link not "
+ "ACTIVE\n", (unsigned long long) errs);
+ ignore_this_time = errs & E_SUM_LINK_PKTERRS;
}
return ignore_this_time;
*/
val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
lstate = val & IPATH_IBSTATE_MASK;
- if (lstate == IPATH_IBSTATE_INIT || lstate == IPATH_IBSTATE_ARM ||
+
+ /*
+ * this is confusing enough when it happens that I want to always put it
+ * on the console and in the logs. If it was a requested state change,
+ * we'll have already cleared the flags, so we won't print this warning
+ */
+ if ((lstate != IPATH_IBSTATE_ARM && lstate != IPATH_IBSTATE_ACTIVE)
+ && (dd->ipath_flags & (IPATH_LINKARMED | IPATH_LINKACTIVE))) {
+ dev_info(&dd->pcidev->dev, "Link state changed from %s to %s\n",
+ (dd->ipath_flags & IPATH_LINKARMED) ? "ARM" : "ACTIVE",
+ ib_linkstate(lstate));
+ /*
+ * Flush all queued sends when link went to DOWN or INIT,
+ * to be sure that they don't block SMA and other MAD packets
+ */
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+ INFINIPATH_S_ABORT);
+ ipath_disarm_piobufs(dd, dd->ipath_lastport_piobuf,
+ (unsigned)(dd->ipath_piobcnt2k +
+ dd->ipath_piobcnt4k) -
+ dd->ipath_lastport_piobuf);
+ }
+ else if (lstate == IPATH_IBSTATE_INIT || lstate == IPATH_IBSTATE_ARM ||
lstate == IPATH_IBSTATE_ACTIVE) {
/*
* only print at SMA if there is a change, debug if not
| IPATH_LINKACTIVE |
IPATH_LINKARMED);
*dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
+ dd->ipath_lli_counter = 0;
if (!noprint) {
if (((dd->ipath_lastibcstat >>
INFINIPATH_IBCS_LINKSTATE_SHIFT) &
return supp_msgs;
}
-static void handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
+static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
{
char msg[512];
u64 ignore_this_time = 0;
if (errs & E_SUM_ERRS)
ignore_this_time = handle_e_sum_errs(dd, errs);
+ else if ((errs & E_SUM_LINK_PKTERRS) &&
+ !(dd->ipath_flags & IPATH_LINKACTIVE)) {
+ /*
+ * This can happen when SMA is trying to bring the link
+ * up, but the IB link changes state at the "wrong" time.
+ * The IB logic then complains that the packet isn't
+ * valid. We don't want to confuse people, so we just
+ * don't print them, except at debug
+ */
+ ipath_dbg("Ignoring packet errors %llx, because link not "
+ "ACTIVE\n", (unsigned long long) errs);
+ ignore_this_time = errs & E_SUM_LINK_PKTERRS;
+ }
if (supp_msgs == 250000) {
/*
INFINIPATH_E_IBSTATUSCHANGED);
}
if (!errs)
- return;
+ return 0;
if (!noprint)
/*
continue;
if (hd == (tl + 1) ||
(!hd && tl == dd->ipath_hdrqlast)) {
- dd->ipath_lastrcvhdrqtails[i] = tl;
- pd->port_hdrqfull++;
if (i == 0)
chkerrpkts = 1;
+ dd->ipath_lastrcvhdrqtails[i] = tl;
+ pd->port_hdrqfull++;
}
}
}
wake_up_interruptible(&ipath_sma_state_wait);
}
- if (chkerrpkts)
- /* process possible error packets in hdrq */
- ipath_kreceive(dd);
+ return chkerrpkts;
}
/* this is separate to allow for better optimization of ipath_intr() */
dd->ipath_sendctrl);
}
-static void handle_rcv(struct ipath_devdata *dd, u32 istat)
+/*
+ * Handle receive interrupts for user ports; this means a user
+ * process was waiting for a packet to arrive, and didn't want
+ * to poll
+ */
+static void handle_urcv(struct ipath_devdata *dd, u32 istat)
{
u64 portr;
int i;
infinipath_i_rcvavail_mask)
| ((istat >> INFINIPATH_I_RCVURG_SHIFT) &
infinipath_i_rcvurg_mask);
- for (i = 0; i < dd->ipath_cfgports; i++) {
+ for (i = 1; i < dd->ipath_cfgports; i++) {
struct ipath_portdata *pd = dd->ipath_pd[i];
- if (portr & (1 << i) && pd &&
- pd->port_cnt) {
- if (i == 0)
- ipath_kreceive(dd);
- else if (test_bit(IPATH_PORT_WAITING_RCV,
- &pd->port_flag)) {
- int rcbit;
- clear_bit(IPATH_PORT_WAITING_RCV,
- &pd->port_flag);
- rcbit = i + INFINIPATH_R_INTRAVAIL_SHIFT;
- clear_bit(1UL << rcbit, &dd->ipath_rcvctrl);
- wake_up_interruptible(&pd->port_wait);
- rcvdint = 1;
- }
+ if (portr & (1 << i) && pd && pd->port_cnt &&
+ test_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag)) {
+ int rcbit;
+ clear_bit(IPATH_PORT_WAITING_RCV,
+ &pd->port_flag);
+ rcbit = i + INFINIPATH_R_INTRAVAIL_SHIFT;
+ clear_bit(1UL << rcbit, &dd->ipath_rcvctrl);
+ wake_up_interruptible(&pd->port_wait);
+ rcvdint = 1;
}
}
if (rcvdint) {
irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs)
{
struct ipath_devdata *dd = data;
- u32 istat;
+ u32 istat, chk0rcv = 0;
ipath_err_t estat = 0;
- static unsigned unexpected = 0;
irqreturn_t ret;
+ u32 oldhead, curtail;
+ static unsigned unexpected = 0;
+ static const u32 port0rbits = (1U<<INFINIPATH_I_RCVAVAIL_SHIFT) |
+ (1U<<INFINIPATH_I_RCVURG_SHIFT);
+
+ ipath_stats.sps_ints++;
- if(!(dd->ipath_flags & IPATH_PRESENT)) {
- /* this is mostly so we don't try to touch the chip while
- * it is being reset */
+ if (!(dd->ipath_flags & IPATH_PRESENT)) {
/*
- * This return value is perhaps odd, but we do not want the
+ * This return value is not great, but we do not want the
* interrupt core code to remove our interrupt handler
* because we don't appear to be handling an interrupt
* during a chip reset.
return IRQ_HANDLED;
}
+ /*
+ * this needs to be flags&initted, not statusp, so we keep
+ * taking interrupts even after link goes down, etc.
+ * Also, we *must* clear the interrupt at some point, or we won't
+ * take it again, which can be real bad for errors, etc...
+ */
+
+ if (!(dd->ipath_flags & IPATH_INITTED)) {
+ ipath_bad_intr(dd, &unexpected);
+ ret = IRQ_NONE;
+ goto bail;
+ }
+
+ /*
+ * We try to avoid reading the interrupt status register, since
+ * that's a PIO read, and stalls the processor for up to about
+ * ~0.25 usec. The idea is that if we processed a port0 packet,
+ * we blindly clear the port 0 receive interrupt bits, and nothing
+ * else, then return. If other interrupts are pending, the chip
+ * will re-interrupt us as soon as we write the intclear register.
+ * We then won't process any more kernel packets (if not the 2nd
+ * time, then the 3rd or 4th) and we'll then handle the other
+ * interrupts. We clear the interrupts first so that we don't
+ * lose intr for later packets that arrive while we are processing.
+ */
+ oldhead = dd->ipath_port0head;
+ curtail = (u32)le64_to_cpu(*dd->ipath_hdrqtailptr);
+ if (oldhead != curtail) {
+ if (dd->ipath_flags & IPATH_GPIO_INTR) {
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear,
+ (u64) (1 << 2));
+ istat = port0rbits | INFINIPATH_I_GPIO;
+ }
+ else
+ istat = port0rbits;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat);
+ ipath_kreceive(dd);
+ if (oldhead != dd->ipath_port0head) {
+ ipath_stats.sps_fastrcvint++;
+ goto done;
+ }
+ }
+
istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus);
+
if (unlikely(!istat)) {
ipath_stats.sps_nullintr++;
ret = IRQ_NONE; /* not our interrupt, or already handled */
goto bail;
}
- ipath_stats.sps_ints++;
-
- /*
- * this needs to be flags&initted, not statusp, so we keep
- * taking interrupts even after link goes down, etc.
- * Also, we *must* clear the interrupt at some point, or we won't
- * take it again, which can be real bad for errors, etc...
- */
-
- if (!(dd->ipath_flags & IPATH_INITTED)) {
- ipath_bad_intr(dd, &unexpected);
- ret = IRQ_NONE;
- goto bail;
- }
if (unexpected)
unexpected = 0;
- ipath_cdbg(VERBOSE, "intr stat=0x%x\n", istat);
-
- if (istat & ~infinipath_i_bitsextant)
+ if (unlikely(istat & ~infinipath_i_bitsextant))
ipath_dev_err(dd,
"interrupt with unknown interrupts %x set\n",
istat & (u32) ~ infinipath_i_bitsextant);
+ else
+ ipath_cdbg(VERBOSE, "intr stat=0x%x\n", istat);
- if (istat & INFINIPATH_I_ERROR) {
+ if (unlikely(istat & INFINIPATH_I_ERROR)) {
ipath_stats.sps_errints++;
estat = ipath_read_kreg64(dd,
dd->ipath_kregs->kr_errorstatus);
ipath_dev_err(dd, "Read of error status failed "
"(all bits set); ignoring\n");
else
- handle_errors(dd, estat);
+ if (handle_errors(dd, estat))
+ /* force calling ipath_kreceive() */
+ chk0rcv = 1;
}
if (istat & INFINIPATH_I_GPIO) {
+ /*
+ * Packets are available in the port 0 rcv queue.
+ * Eventually this needs to be generalized to check
+ * IPATH_GPIO_INTR, and the specific GPIO bit, if
+ * GPIO interrupts are used for anything else.
+ */
if (unlikely(!(dd->ipath_flags & IPATH_GPIO_INTR))) {
u32 gpiostatus;
gpiostatus = ipath_read_kreg32(
else {
/* Clear GPIO status bit 2 */
ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear,
- (u64) (1 << 2));
-
- /*
- * Packets are available in the port 0 rcv queue.
- * Eventually this needs to be generalized to check
- * IPATH_GPIO_INTR, and the specific GPIO bit, if
- * GPIO interrupts are used for anything else.
- */
- ipath_kreceive(dd);
+ (u64) (1 << 2));
+ chk0rcv = 1;
}
}
+ chk0rcv |= istat & port0rbits;
/*
- * clear the ones we will deal with on this round
- * We clear it early, mostly for receive interrupts, so we
- * know the chip will have seen this by the time we process
- * the queue, and will re-interrupt if necessary. The processor
- * itself won't take the interrupt again until we return.
+ * Clear the interrupt bits we found set, unless they are receive
+ * related, in which case we already cleared them above, and don't
+ * want to clear them again, because we might lose an interrupt.
+ * Clear it early, so we "know" know the chip will have seen this by
+ * the time we process the queue, and will re-interrupt if necessary.
+ * The processor itself won't take the interrupt again until we return.
*/
ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat);
+ /*
+ * handle port0 receive before checking for pio buffers available,
+ * since receives can overflow; piobuf waiters can afford a few
+ * extra cycles, since they were waiting anyway, and user's waiting
+ * for receive are at the bottom.
+ */
+ if (chk0rcv) {
+ ipath_kreceive(dd);
+ istat &= ~port0rbits;
+ }
+
+ if (istat & ((infinipath_i_rcvavail_mask <<
+ INFINIPATH_I_RCVAVAIL_SHIFT)
+ | (infinipath_i_rcvurg_mask <<
+ INFINIPATH_I_RCVURG_SHIFT)))
+ handle_urcv(dd, istat);
+
if (istat & INFINIPATH_I_SPIOBUFAVAIL) {
clear_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
handle_layer_pioavail(dd);
}
- /*
- * we check for both transition from empty to non-empty, and urgent
- * packets (those with the interrupt bit set in the header)
- */
-
- if (istat & ((infinipath_i_rcvavail_mask <<
- INFINIPATH_I_RCVAVAIL_SHIFT)
- | (infinipath_i_rcvurg_mask <<
- INFINIPATH_I_RCVURG_SHIFT)))
- handle_rcv(dd, istat);
-
+done:
ret = IRQ_HANDLED;
bail:
#ifndef _IPATH_KERNEL_H
#define _IPATH_KERNEL_H
/*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
/* rcvhdrq base, needs mmap before useful */
void *port_rcvhdrq;
/* kernel virtual address where hdrqtail is updated */
- u64 *port_rcvhdrtail_kvaddr;
- /* page * used for uaddr */
- struct page *port_rcvhdrtail_pagep;
+ volatile __le64 *port_rcvhdrtail_kvaddr;
/*
* temp buffer for expected send setup, allocated at open, instead
* of each setup call
dma_addr_t port_rcvegr_phys;
/* mmap of hdrq, must fit in 44 bits */
dma_addr_t port_rcvhdrq_phys;
- /*
- * the actual user address that we ipath_mlock'ed, so we can
- * ipath_munlock it at close
- */
- unsigned long port_rcvhdrtail_uaddr;
+ dma_addr_t port_rcvhdrqtailaddr_phys;
/*
* number of opens on this instance (0 or 1; ignoring forks, dup,
* etc. for now)
unsigned long ipath_physaddr;
/* base of memory alloced for ipath_kregbase, for free */
u64 *ipath_kregalloc;
- /*
- * version of kregbase that doesn't have high bits set (for 32 bit
- * programs, so mmap64 44 bit works)
- */
- u64 __iomem *ipath_kregvirt;
/*
* virtual address where port0 rcvhdrqtail updated for this unit.
* only written to by the chip, not the driver.
*/
volatile __le64 *ipath_hdrqtailptr;
- dma_addr_t ipath_dma_addr;
/* ipath_cfgports pointers */
struct ipath_portdata **ipath_pd;
/* sk_buffs used by port 0 eager receive queue */
char *ipath_freezemsg;
/* pci access data structure */
struct pci_dev *pcidev;
- struct cdev *cdev;
- struct class_device *class_dev;
+ struct cdev *user_cdev;
+ struct cdev *diag_cdev;
+ struct class_device *user_class_dev;
+ struct class_device *diag_class_dev;
/* timer used to prevent stats overflow, error throttling, etc. */
struct timer_list ipath_stats_timer;
/* check for stale messages in rcv queue */
/* only allow one intr at a time. */
unsigned long ipath_rcv_pending;
+ void *ipath_dummy_hdrq; /* used after port close */
+ dma_addr_t ipath_dummy_hdrq_phys;
/*
* Shadow copies of registers; size indicates read access size.
u16 ipath_lid;
/* list of pkeys programmed; 0 if not set */
u16 ipath_pkeys[4];
- /* ASCII serial number, from flash */
- u8 ipath_serial[12];
+ /*
+ * ASCII serial number, from flash, large enough for original
+ * all digit strings, and longer QLogic serial number format
+ */
+ u8 ipath_serial[16];
/* human readable board version */
u8 ipath_boardversion[80];
/* chip major rev, from ipath_revision */
u8 ipath_pci_cacheline;
/* LID mask control */
u8 ipath_lmc;
-};
-
-extern volatile __le64 *ipath_port0_rcvhdrtail;
-extern dma_addr_t ipath_port0_rcvhdrtail_dma;
-#define IPATH_PORT0_RCVHDRTAIL_SIZE PAGE_SIZE
+ /* local link integrity counter */
+ u32 ipath_lli_counter;
+ /* local link integrity errors */
+ u32 ipath_lli_errors;
+};
extern struct list_head ipath_dev_list;
extern spinlock_t ipath_devs_lock;
extern int __ipath_verbs_rcv(struct ipath_devdata *, void *, void *, u32);
void ipath_layer_add(struct ipath_devdata *);
-void ipath_layer_del(struct ipath_devdata *);
+void ipath_layer_remove(struct ipath_devdata *);
int ipath_init_chip(struct ipath_devdata *, int);
int ipath_enable_wc(struct ipath_devdata *dd);
void ipath_cdev_cleanup(struct cdev **cdevp,
struct class_device **class_devp);
-int ipath_diag_init(void);
-void ipath_diag_cleanup(void);
+int ipath_diag_add(struct ipath_devdata *);
+void ipath_diag_remove(struct ipath_devdata *);
void ipath_diag_bringup_link(struct ipath_devdata *);
extern wait_queue_head_t ipath_sma_state_wait;
int ipath_user_add(struct ipath_devdata *dd);
-void ipath_user_del(struct ipath_devdata *dd);
+void ipath_user_remove(struct ipath_devdata *dd);
struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd, gfp_t);
unsigned cnt);
int ipath_create_rcvhdrq(struct ipath_devdata *, struct ipath_portdata *);
-void ipath_free_pddata(struct ipath_devdata *, u32, int);
+void ipath_free_pddata(struct ipath_devdata *, struct ipath_portdata *);
int ipath_parse_ushort(const char *str, unsigned short *valp);
* @port: port number
*
* Return the contents of a register that is virtualized to be per port.
- * Prints a debug message and returns -1 on errors (not distinguishable from
- * valid contents at runtime; we may add a separate error variable at some
- * point).
- *
- * This is normally not used by the kernel, but may be for debugging, and
- * has a different implementation than user mode, which is why it's not in
- * _common.h.
+ * Returns -1 on errors (not distinguishable from valid contents at
+ * runtime; we may add a separate error variable at some point).
*/
static inline u32 ipath_read_ureg32(const struct ipath_devdata *dd,
ipath_ureg regno, int port)
#define IPATH_DRV_NAME "ipath_core"
#define IPATH_MAJOR 233
+#define IPATH_USER_MINOR_BASE 0
#define IPATH_SMA_MINOR 128
-#define IPATH_DIAG_MINOR 129
-#define IPATH_NMINORS 130
+#define IPATH_DIAG_MINOR_BASE 129
+#define IPATH_NMINORS 255
#define ipath_dev_err(dd,fmt,...) \
do { \
/*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
struct ib_sge *sge, int acc)
{
struct ipath_mregion *mr;
+ unsigned n, m;
size_t off;
int ret;
}
off += mr->offset;
- isge->mr = mr;
- isge->m = 0;
- isge->n = 0;
- while (off >= mr->map[isge->m]->segs[isge->n].length) {
- off -= mr->map[isge->m]->segs[isge->n].length;
- isge->n++;
- if (isge->n >= IPATH_SEGSZ) {
- isge->m++;
- isge->n = 0;
+ m = 0;
+ n = 0;
+ while (off >= mr->map[m]->segs[n].length) {
+ off -= mr->map[m]->segs[n].length;
+ n++;
+ if (n >= IPATH_SEGSZ) {
+ m++;
+ n = 0;
}
}
- isge->vaddr = mr->map[isge->m]->segs[isge->n].vaddr + off;
- isge->length = mr->map[isge->m]->segs[isge->n].length - off;
+ isge->mr = mr;
+ isge->vaddr = mr->map[m]->segs[n].vaddr + off;
+ isge->length = mr->map[m]->segs[n].length - off;
isge->sge_length = sge->length;
+ isge->m = m;
+ isge->n = n;
ret = 1;
struct ipath_lkey_table *rkt = &dev->lk_table;
struct ipath_sge *sge = &ss->sge;
struct ipath_mregion *mr;
+ unsigned n, m;
size_t off;
int ret;
}
off += mr->offset;
- sge->mr = mr;
- sge->m = 0;
- sge->n = 0;
- while (off >= mr->map[sge->m]->segs[sge->n].length) {
- off -= mr->map[sge->m]->segs[sge->n].length;
- sge->n++;
- if (sge->n >= IPATH_SEGSZ) {
- sge->m++;
- sge->n = 0;
+ m = 0;
+ n = 0;
+ while (off >= mr->map[m]->segs[n].length) {
+ off -= mr->map[m]->segs[n].length;
+ n++;
+ if (n >= IPATH_SEGSZ) {
+ m++;
+ n = 0;
}
}
- sge->vaddr = mr->map[sge->m]->segs[sge->n].vaddr + off;
- sge->length = mr->map[sge->m]->segs[sge->n].length - off;
+ sge->mr = mr;
+ sge->vaddr = mr->map[m]->segs[n].vaddr + off;
+ sge->length = mr->map[m]->segs[n].length - off;
sge->sge_length = len;
+ sge->m = m;
+ sge->n = n;
ss->sg_list = NULL;
ss->num_sge = 1;
/*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
#include <asm/byteorder.h>
#include "ipath_kernel.h"
-#include "ips_common.h"
#include "ipath_layer.h"
+#include "ipath_common.h"
/* Acquire before ipath_devs_lock. */
static DEFINE_MUTEX(ipath_layer_mutex);
EXPORT_SYMBOL_GPL(ipath_layer_set_mtu);
-int ipath_set_sps_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
+int ipath_set_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
{
- ipath_stats.sps_lid[dd->ipath_unit] = arg;
dd->ipath_lid = arg;
dd->ipath_lmc = lmc;
return 0;
}
-EXPORT_SYMBOL_GPL(ipath_set_sps_lid);
+EXPORT_SYMBOL_GPL(ipath_set_lid);
int ipath_layer_set_guid(struct ipath_devdata *dd, __be64 guid)
{
EXPORT_SYMBOL_GPL(ipath_layer_get_nguid);
-int ipath_layer_query_device(struct ipath_devdata *dd, u32 * vendor,
- u32 * boardrev, u32 * majrev, u32 * minrev)
+u32 ipath_layer_get_majrev(struct ipath_devdata *dd)
{
- *vendor = dd->ipath_vendorid;
- *boardrev = dd->ipath_boardrev;
- *majrev = dd->ipath_majrev;
- *minrev = dd->ipath_minrev;
+ return dd->ipath_majrev;
+}
- return 0;
+EXPORT_SYMBOL_GPL(ipath_layer_get_majrev);
+
+u32 ipath_layer_get_minrev(struct ipath_devdata *dd)
+{
+ return dd->ipath_minrev;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_get_minrev);
+
+u32 ipath_layer_get_pcirev(struct ipath_devdata *dd)
+{
+ return dd->ipath_pcirev;
}
-EXPORT_SYMBOL_GPL(ipath_layer_query_device);
+EXPORT_SYMBOL_GPL(ipath_layer_get_pcirev);
u32 ipath_layer_get_flags(struct ipath_devdata *dd)
{
EXPORT_SYMBOL_GPL(ipath_layer_get_deviceid);
+u32 ipath_layer_get_vendorid(struct ipath_devdata *dd)
+{
+ return dd->ipath_vendorid;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_get_vendorid);
+
u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd)
{
return dd->ipath_lastibcstat;
mutex_unlock(&ipath_layer_mutex);
}
-void ipath_layer_del(struct ipath_devdata *dd)
+void ipath_layer_remove(struct ipath_devdata *dd)
{
mutex_lock(&ipath_layer_mutex);
goto bail;
}
- ret = ipath_setrcvhdrsize(dd, NUM_OF_EXTRA_WORDS_IN_HEADER_QUEUE);
+ ret = ipath_setrcvhdrsize(dd, IPATH_HEADER_QUEUE_WORDS);
if (ret < 0)
goto bail;
if (*dd->ipath_statusp & IPATH_STATUS_IB_READY)
intval |= IPATH_LAYER_INT_IF_UP;
- if (ipath_stats.sps_lid[dd->ipath_unit])
+ if (dd->ipath_lid)
intval |= IPATH_LAYER_INT_LID;
- if (ipath_stats.sps_mlid[dd->ipath_unit])
+ if (dd->ipath_mlid)
intval |= IPATH_LAYER_INT_BCAST;
/*
* do this on open, in case low level is already up and
/**
* ipath_verbs_send - send a packet from the verbs layer
* @dd: the infinipath device
- * @hdrwords: the number of works in the header
+ * @hdrwords: the number of words in the header
* @hdr: the packet header
* @len: the length of the packet in bytes
* @ss: the SGE to send
ipath_snap_cntr(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
cntrs->link_error_recovery_counter =
ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
+ /*
+ * The link downed counter counts when the other side downs the
+ * connection. We add in the number of times we downed the link
+ * due to local link integrity errors to compensate.
+ */
cntrs->link_downed_counter =
ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkdowncnt);
cntrs->port_rcv_errors =
ipath_snap_cntr(dd, dd->ipath_cregs->cr_rxdroppktcnt) +
ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvovflcnt) +
ipath_snap_cntr(dd, dd->ipath_cregs->cr_portovflcnt) +
- ipath_snap_cntr(dd, dd->ipath_cregs->cr_errrcvflowctrlcnt) +
ipath_snap_cntr(dd, dd->ipath_cregs->cr_err_rlencnt) +
ipath_snap_cntr(dd, dd->ipath_cregs->cr_invalidrlencnt) +
ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) +
ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) +
ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) +
- ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlinkcnt) +
ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt);
cntrs->port_rcv_remphys_errors =
ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt);
ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
cntrs->port_rcv_packets =
ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
+ cntrs->local_link_integrity_errors = dd->ipath_lli_errors;
+ cntrs->excessive_buffer_overrun_errors = 0; /* XXX */
ret = 0;
}
vlsllnh = *((__be16 *) hdr);
- if (vlsllnh != htons(IPS_LRH_BTH)) {
+ if (vlsllnh != htons(IPATH_LRH_BTH)) {
ipath_dbg("Warning: lrh[0] wrong (%x, not %x); "
"not sending\n", be16_to_cpu(vlsllnh),
- IPS_LRH_BTH);
+ IPATH_LRH_BTH);
ret = -EINVAL;
}
if (ret)
/*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
u64 port_rcv_data;
u64 port_xmit_packets;
u64 port_rcv_packets;
+ u32 local_link_integrity_errors;
+ u32 excessive_buffer_overrun_errors;
};
/*
u32 ipath_layer_get_cr_errpkey(struct ipath_devdata *dd);
int ipath_layer_set_linkstate(struct ipath_devdata *dd, u8 state);
int ipath_layer_set_mtu(struct ipath_devdata *, u16);
-int ipath_set_sps_lid(struct ipath_devdata *, u32, u8);
+int ipath_set_lid(struct ipath_devdata *, u32, u8);
int ipath_layer_send_hdr(struct ipath_devdata *dd,
struct ether_header *hdr);
int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
int ipath_layer_set_guid(struct ipath_devdata *, __be64 guid);
__be64 ipath_layer_get_guid(struct ipath_devdata *);
u32 ipath_layer_get_nguid(struct ipath_devdata *);
-int ipath_layer_query_device(struct ipath_devdata *, u32 * vendor,
- u32 * boardrev, u32 * majrev, u32 * minrev);
+u32 ipath_layer_get_majrev(struct ipath_devdata *);
+u32 ipath_layer_get_minrev(struct ipath_devdata *);
+u32 ipath_layer_get_pcirev(struct ipath_devdata *);
u32 ipath_layer_get_flags(struct ipath_devdata *dd);
struct device *ipath_layer_get_device(struct ipath_devdata *dd);
u16 ipath_layer_get_deviceid(struct ipath_devdata *dd);
+u32 ipath_layer_get_vendorid(struct ipath_devdata *);
u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd);
u32 ipath_layer_get_ibmtu(struct ipath_devdata *dd);
int ipath_layer_enable_timer(struct ipath_devdata *dd);
/*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
#include "ipath_kernel.h"
#include "ipath_verbs.h"
-#include "ips_common.h"
+#include "ipath_common.h"
#define IB_SMP_UNSUP_VERSION __constant_htons(0x0004)
#define IB_SMP_UNSUP_METHOD __constant_htons(0x0008)
{
struct nodeinfo *nip = (struct nodeinfo *)&smp->data;
struct ipath_devdata *dd = to_idev(ibdev)->dd;
- u32 vendor, boardid, majrev, minrev;
+ u32 vendor, majrev, minrev;
if (smp->attr_mod)
smp->status |= IB_SMP_INVALID_FIELD;
nip->port_guid = nip->sys_guid;
nip->partition_cap = cpu_to_be16(ipath_layer_get_npkeys(dd));
nip->device_id = cpu_to_be16(ipath_layer_get_deviceid(dd));
- ipath_layer_query_device(dd, &vendor, &boardid, &majrev, &minrev);
+ majrev = ipath_layer_get_majrev(dd);
+ minrev = ipath_layer_get_minrev(dd);
nip->revision = cpu_to_be32((majrev << 16) | minrev);
nip->local_port_num = port;
+ vendor = ipath_layer_get_vendorid(dd);
nip->vendor_id[0] = 0;
nip->vendor_id[1] = vendor >> 8;
nip->vendor_id[2] = vendor;
/* P_KeyViolations are counted by hardware. */
pip->pkey_violations =
cpu_to_be16((ipath_layer_get_cr_errpkey(dev->dd) -
- dev->n_pkey_violations) & 0xFFFF);
+ dev->z_pkey_violations) & 0xFFFF);
pip->qkey_violations = cpu_to_be16(dev->qkey_violations);
/* Only the hardware GUID is supported for now */
pip->guid_cap = 1;
lid = be16_to_cpu(pip->lid);
if (lid != ipath_layer_get_lid(dev->dd)) {
/* Must be a valid unicast LID address. */
- if (lid == 0 || lid >= IPS_MULTICAST_LID_BASE)
+ if (lid == 0 || lid >= IPATH_MULTICAST_LID_BASE)
goto err;
- ipath_set_sps_lid(dev->dd, lid, pip->mkeyprot_resv_lmc & 7);
+ ipath_set_lid(dev->dd, lid, pip->mkeyprot_resv_lmc & 7);
event.event = IB_EVENT_LID_CHANGE;
ib_dispatch_event(&event);
}
smlid = be16_to_cpu(pip->sm_lid);
if (smlid != dev->sm_lid) {
/* Must be a valid unicast LID address. */
- if (smlid == 0 || smlid >= IPS_MULTICAST_LID_BASE)
+ if (smlid == 0 || smlid >= IPATH_MULTICAST_LID_BASE)
goto err;
dev->sm_lid = smlid;
event.event = IB_EVENT_SM_CHANGE;
* later.
*/
if (pip->pkey_violations == 0)
- dev->n_pkey_violations =
+ dev->z_pkey_violations =
ipath_layer_get_cr_errpkey(dev->dd);
if (pip->qkey_violations == 0)
#define IB_PMA_SEL_PORT_RCV_ERRORS __constant_htons(0x0008)
#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS __constant_htons(0x0010)
#define IB_PMA_SEL_PORT_XMIT_DISCARDS __constant_htons(0x0040)
+#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS __constant_htons(0x0200)
+#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS __constant_htons(0x0400)
+#define IB_PMA_SEL_PORT_VL15_DROPPED __constant_htons(0x0800)
#define IB_PMA_SEL_PORT_XMIT_DATA __constant_htons(0x1000)
#define IB_PMA_SEL_PORT_RCV_DATA __constant_htons(0x2000)
#define IB_PMA_SEL_PORT_XMIT_PACKETS __constant_htons(0x4000)
ipath_layer_get_counters(dev->dd, &cntrs);
/* Adjust counters for any resets done. */
- cntrs.symbol_error_counter -= dev->n_symbol_error_counter;
+ cntrs.symbol_error_counter -= dev->z_symbol_error_counter;
cntrs.link_error_recovery_counter -=
- dev->n_link_error_recovery_counter;
- cntrs.link_downed_counter -= dev->n_link_downed_counter;
+ dev->z_link_error_recovery_counter;
+ cntrs.link_downed_counter -= dev->z_link_downed_counter;
cntrs.port_rcv_errors += dev->rcv_errors;
- cntrs.port_rcv_errors -= dev->n_port_rcv_errors;
- cntrs.port_rcv_remphys_errors -= dev->n_port_rcv_remphys_errors;
- cntrs.port_xmit_discards -= dev->n_port_xmit_discards;
- cntrs.port_xmit_data -= dev->n_port_xmit_data;
- cntrs.port_rcv_data -= dev->n_port_rcv_data;
- cntrs.port_xmit_packets -= dev->n_port_xmit_packets;
- cntrs.port_rcv_packets -= dev->n_port_rcv_packets;
+ cntrs.port_rcv_errors -= dev->z_port_rcv_errors;
+ cntrs.port_rcv_remphys_errors -= dev->z_port_rcv_remphys_errors;
+ cntrs.port_xmit_discards -= dev->z_port_xmit_discards;
+ cntrs.port_xmit_data -= dev->z_port_xmit_data;
+ cntrs.port_rcv_data -= dev->z_port_rcv_data;
+ cntrs.port_xmit_packets -= dev->z_port_xmit_packets;
+ cntrs.port_rcv_packets -= dev->z_port_rcv_packets;
+ cntrs.local_link_integrity_errors -=
+ dev->z_local_link_integrity_errors;
+ cntrs.excessive_buffer_overrun_errors -=
+ dev->z_excessive_buffer_overrun_errors;
memset(pmp->data, 0, sizeof(pmp->data));
else
p->port_xmit_discards =
cpu_to_be16((u16)cntrs.port_xmit_discards);
+ if (cntrs.local_link_integrity_errors > 0xFUL)
+ cntrs.local_link_integrity_errors = 0xFUL;
+ if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
+ cntrs.excessive_buffer_overrun_errors = 0xFUL;
+ p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
+ cntrs.excessive_buffer_overrun_errors;
+ if (dev->n_vl15_dropped > 0xFFFFUL)
+ p->vl15_dropped = __constant_cpu_to_be16(0xFFFF);
+ else
+ p->vl15_dropped = cpu_to_be16((u16)dev->n_vl15_dropped);
if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
p->port_xmit_data = __constant_cpu_to_be32(0xFFFFFFFF);
else
&rpkts, &xwait);
/* Adjust counters for any resets done. */
- swords -= dev->n_port_xmit_data;
- rwords -= dev->n_port_rcv_data;
- spkts -= dev->n_port_xmit_packets;
- rpkts -= dev->n_port_rcv_packets;
+ swords -= dev->z_port_xmit_data;
+ rwords -= dev->z_port_rcv_data;
+ spkts -= dev->z_port_xmit_packets;
+ rpkts -= dev->z_port_rcv_packets;
memset(pmp->data, 0, sizeof(pmp->data));
ipath_layer_get_counters(dev->dd, &cntrs);
if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
- dev->n_symbol_error_counter = cntrs.symbol_error_counter;
+ dev->z_symbol_error_counter = cntrs.symbol_error_counter;
if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY)
- dev->n_link_error_recovery_counter =
+ dev->z_link_error_recovery_counter =
cntrs.link_error_recovery_counter;
if (p->counter_select & IB_PMA_SEL_LINK_DOWNED)
- dev->n_link_downed_counter = cntrs.link_downed_counter;
+ dev->z_link_downed_counter = cntrs.link_downed_counter;
if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS)
- dev->n_port_rcv_errors =
+ dev->z_port_rcv_errors =
cntrs.port_rcv_errors + dev->rcv_errors;
if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS)
- dev->n_port_rcv_remphys_errors =
+ dev->z_port_rcv_remphys_errors =
cntrs.port_rcv_remphys_errors;
if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS)
- dev->n_port_xmit_discards = cntrs.port_xmit_discards;
+ dev->z_port_xmit_discards = cntrs.port_xmit_discards;
+
+ if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS)
+ dev->z_local_link_integrity_errors =
+ cntrs.local_link_integrity_errors;
+
+ if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS)
+ dev->z_excessive_buffer_overrun_errors =
+ cntrs.excessive_buffer_overrun_errors;
+
+ if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED)
+ dev->n_vl15_dropped = 0;
if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)
- dev->n_port_xmit_data = cntrs.port_xmit_data;
+ dev->z_port_xmit_data = cntrs.port_xmit_data;
if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA)
- dev->n_port_rcv_data = cntrs.port_rcv_data;
+ dev->z_port_rcv_data = cntrs.port_rcv_data;
if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS)
- dev->n_port_xmit_packets = cntrs.port_xmit_packets;
+ dev->z_port_xmit_packets = cntrs.port_xmit_packets;
if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS)
- dev->n_port_rcv_packets = cntrs.port_rcv_packets;
+ dev->z_port_rcv_packets = cntrs.port_rcv_packets;
return recv_pma_get_portcounters(pmp, ibdev, port);
}
&rpkts, &xwait);
if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
- dev->n_port_xmit_data = swords;
+ dev->z_port_xmit_data = swords;
if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA)
- dev->n_port_rcv_data = rwords;
+ dev->z_port_rcv_data = rwords;
if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS)
- dev->n_port_xmit_packets = spkts;
+ dev->z_port_xmit_packets = spkts;
if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)
- dev->n_port_rcv_packets = rpkts;
+ dev->z_port_rcv_packets = rpkts;
if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)
dev->n_unicast_xmit = 0;
struct ib_wc *in_wc, struct ib_grh *in_grh,
struct ib_mad *in_mad, struct ib_mad *out_mad)
{
- struct ipath_ibdev *dev = to_idev(ibdev);
int ret;
- /*
- * Snapshot current HW counters to "clear" them.
- * This should be done when the driver is loaded except that for
- * some reason we get a zillion errors when brining up the link.
- */
- if (dev->rcv_errors == 0) {
- struct ipath_layer_counters cntrs;
-
- ipath_layer_get_counters(to_idev(ibdev)->dd, &cntrs);
- dev->rcv_errors++;
- dev->n_symbol_error_counter = cntrs.symbol_error_counter;
- dev->n_link_error_recovery_counter =
- cntrs.link_error_recovery_counter;
- dev->n_link_downed_counter = cntrs.link_downed_counter;
- dev->n_port_rcv_errors = cntrs.port_rcv_errors + 1;
- dev->n_port_rcv_remphys_errors =
- cntrs.port_rcv_remphys_errors;
- dev->n_port_xmit_discards = cntrs.port_xmit_discards;
- dev->n_port_xmit_data = cntrs.port_xmit_data;
- dev->n_port_rcv_data = cntrs.port_rcv_data;
- dev->n_port_xmit_packets = cntrs.port_xmit_packets;
- dev->n_port_rcv_packets = cntrs.port_rcv_packets;
- }
switch (in_mad->mad_hdr.mgmt_class) {
case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
case IB_MGMT_CLASS_SUBN_LID_ROUTED:
/*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
int n, m, i;
struct ib_mr *ret;
+ if (region->length == 0) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail;
+ }
+
n = 0;
list_for_each_entry(chunk, ®ion->chunk_list, list)
n += chunk->nents;
/*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
/*
* This file contains all the chip-specific register information and
- * access functions for the PathScale PE800, the PCI-Express chip.
+ * access functions for the QLogic InfiniPath PE800, the PCI-Express chip.
*
* This lists the InfiniPath PE800 registers, in the actual chip layout.
* This structure should never be directly accessed.
if (n)
snprintf(name, namelen, "%s", n);
- if (dd->ipath_majrev != 4 || dd->ipath_minrev != 1) {
+ if (dd->ipath_majrev != 4 || !dd->ipath_minrev || dd->ipath_minrev>2) {
ipath_dev_err(dd, "Unsupported PE-800 revision %u.%u!\n",
dd->ipath_majrev, dd->ipath_minrev);
ret = 1;
/*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
#include <linux/vmalloc.h>
#include "ipath_verbs.h"
-#include "ips_common.h"
+#include "ipath_common.h"
#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
qp->remote_qpn = 0;
qp->qkey = 0;
qp->qp_access_flags = 0;
+ clear_bit(IPATH_S_BUSY, &qp->s_flags);
qp->s_hdrwords = 0;
qp->s_psn = 0;
qp->r_psn = 0;
- atomic_set(&qp->msn, 0);
+ qp->r_msn = 0;
if (qp->ibqp.qp_type == IB_QPT_RC) {
qp->s_state = IB_OPCODE_RC_SEND_LAST;
qp->r_state = IB_OPCODE_RC_SEND_LAST;
qp->r_state = IB_OPCODE_UC_SEND_LAST;
}
qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
- qp->s_nak_state = 0;
+ qp->r_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
+ qp->r_nak_state = 0;
qp->s_rnr_timeout = 0;
qp->s_head = 0;
qp->s_tail = 0;
* @qp: the QP to put into an error state
*
* Flushes both send and receive work queues.
- * QP r_rq.lock and s_lock should be held.
+ * QP s_lock should be held and interrupts disabled.
*/
-static void ipath_error_qp(struct ipath_qp *qp)
+void ipath_error_qp(struct ipath_qp *qp)
{
struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
struct ib_wc wc;
qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
wc.opcode = IB_WC_RECV;
+ spin_lock(&qp->r_rq.lock);
while (qp->r_rq.tail != qp->r_rq.head) {
wc.wr_id = get_rwqe_ptr(&qp->r_rq, qp->r_rq.tail)->wr_id;
if (++qp->r_rq.tail >= qp->r_rq.size)
qp->r_rq.tail = 0;
ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
}
+ spin_unlock(&qp->r_rq.lock);
}
/**
unsigned long flags;
int ret;
- spin_lock_irqsave(&qp->r_rq.lock, flags);
- spin_lock(&qp->s_lock);
+ spin_lock_irqsave(&qp->s_lock, flags);
cur_state = attr_mask & IB_QP_CUR_STATE ?
attr->cur_qp_state : qp->state;
if (attr_mask & IB_QP_AV)
if (attr->ah_attr.dlid == 0 ||
- attr->ah_attr.dlid >= IPS_MULTICAST_LID_BASE)
+ attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE)
goto inval;
if (attr_mask & IB_QP_PKEY_INDEX)
}
if (attr_mask & IB_QP_MIN_RNR_TIMER)
- qp->s_min_rnr_timer = attr->min_rnr_timer;
+ qp->r_min_rnr_timer = attr->min_rnr_timer;
if (attr_mask & IB_QP_QKEY)
qp->qkey = attr->qkey;
- if (attr_mask & IB_QP_PKEY_INDEX)
- qp->s_pkey_index = attr->pkey_index;
-
qp->state = new_state;
- spin_unlock(&qp->s_lock);
- spin_unlock_irqrestore(&qp->r_rq.lock, flags);
-
- /*
- * If QP1 changed to the RTS state, try to move to the link to INIT
- * even if it was ACTIVE so the SM will reinitialize the SMA's
- * state.
- */
- if (qp->ibqp.qp_num == 1 && new_state == IB_QPS_RTS) {
- struct ipath_ibdev *dev = to_idev(ibqp->device);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
- ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKDOWN);
- }
ret = 0;
goto bail;
inval:
- spin_unlock(&qp->s_lock);
- spin_unlock_irqrestore(&qp->r_rq.lock, flags);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
ret = -EINVAL;
bail:
attr->sq_draining = 0;
attr->max_rd_atomic = 1;
attr->max_dest_rd_atomic = 1;
- attr->min_rnr_timer = qp->s_min_rnr_timer;
+ attr->min_rnr_timer = qp->r_min_rnr_timer;
attr->port_num = 1;
attr->timeout = 0;
attr->retry_cnt = qp->s_retry_cnt;
* @qp: the queue pair to compute the AETH for
*
* Returns the AETH.
- *
- * The QP s_lock should be held.
*/
__be32 ipath_compute_aeth(struct ipath_qp *qp)
{
- u32 aeth = atomic_read(&qp->msn) & IPS_MSN_MASK;
+ u32 aeth = qp->r_msn & IPATH_MSN_MASK;
- if (qp->s_nak_state) {
- aeth |= qp->s_nak_state << IPS_AETH_CREDIT_SHIFT;
- } else if (qp->ibqp.srq) {
+ if (qp->ibqp.srq) {
/*
* Shared receive queues don't generate credits.
* Set the credit field to the invalid value.
*/
- aeth |= IPS_AETH_CREDIT_INVAL << IPS_AETH_CREDIT_SHIFT;
+ aeth |= IPATH_AETH_CREDIT_INVAL << IPATH_AETH_CREDIT_SHIFT;
} else {
u32 min, max, x;
u32 credits;
else
min = x;
}
- aeth |= x << IPS_AETH_CREDIT_SHIFT;
+ aeth |= x << IPATH_AETH_CREDIT_SHIFT;
}
return cpu_to_be32(aeth);
}
size_t sz;
struct ib_qp *ret;
- if (init_attr->cap.max_send_sge > 255 ||
- init_attr->cap.max_recv_sge > 255) {
+ if (init_attr->cap.max_send_sge > ib_ipath_max_sges ||
+ init_attr->cap.max_recv_sge > ib_ipath_max_sges ||
+ init_attr->cap.max_send_wr > ib_ipath_max_qp_wrs ||
+ init_attr->cap.max_recv_wr > ib_ipath_max_qp_wrs) {
ret = ERR_PTR(-ENOMEM);
goto bail;
}
+ if (init_attr->cap.max_send_sge +
+ init_attr->cap.max_recv_sge +
+ init_attr->cap.max_send_wr +
+ init_attr->cap.max_recv_wr == 0) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail;
+ }
+
switch (init_attr->qp_type) {
case IB_QPT_UC:
case IB_QPT_RC:
case IB_QPT_GSI:
qp = kmalloc(sizeof(*qp), GFP_KERNEL);
if (!qp) {
+ vfree(swq);
ret = ERR_PTR(-ENOMEM);
goto bail;
}
- qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
- sz = sizeof(struct ipath_sge) *
- init_attr->cap.max_recv_sge +
- sizeof(struct ipath_rwqe);
- qp->r_rq.wq = vmalloc(qp->r_rq.size * sz);
- if (!qp->r_rq.wq) {
- kfree(qp);
- ret = ERR_PTR(-ENOMEM);
- goto bail;
+ if (init_attr->srq) {
+ qp->r_rq.size = 0;
+ qp->r_rq.max_sge = 0;
+ qp->r_rq.wq = NULL;
+ } else {
+ qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
+ qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
+ sz = (sizeof(struct ipath_sge) * qp->r_rq.max_sge) +
+ sizeof(struct ipath_rwqe);
+ qp->r_rq.wq = vmalloc(qp->r_rq.size * sz);
+ if (!qp->r_rq.wq) {
+ kfree(qp);
+ vfree(swq);
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
}
/*
spin_lock_init(&qp->r_rq.lock);
atomic_set(&qp->refcount, 0);
init_waitqueue_head(&qp->wait);
- tasklet_init(&qp->s_task,
- init_attr->qp_type == IB_QPT_RC ?
- ipath_do_rc_send : ipath_do_uc_send,
+ tasklet_init(&qp->s_task, ipath_do_ruc_send,
(unsigned long)qp);
INIT_LIST_HEAD(&qp->piowait);
INIT_LIST_HEAD(&qp->timerwait);
qp->s_wq = swq;
qp->s_size = init_attr->cap.max_send_wr + 1;
qp->s_max_sge = init_attr->cap.max_send_sge;
- qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
qp->s_flags = init_attr->sq_sig_type == IB_SIGNAL_REQ_WR ?
1 << IPATH_S_SIGNAL_REQ_WR : 0;
dev = to_idev(ibpd->device);
*/
void ipath_get_credit(struct ipath_qp *qp, u32 aeth)
{
- u32 credit = (aeth >> IPS_AETH_CREDIT_SHIFT) & IPS_AETH_CREDIT_MASK;
+ u32 credit = (aeth >> IPATH_AETH_CREDIT_SHIFT) & IPATH_AETH_CREDIT_MASK;
/*
* If the credit is invalid, we can send
* as many packets as we like. Otherwise, we have to
* honor the credit field.
*/
- if (credit == IPS_AETH_CREDIT_INVAL) {
+ if (credit == IPATH_AETH_CREDIT_INVAL)
qp->s_lsn = (u32) -1;
- } else if (qp->s_lsn != (u32) -1) {
+ else if (qp->s_lsn != (u32) -1) {
/* Compute new LSN (i.e., MSN + credit) */
- credit = (aeth + credit_table[credit]) & IPS_MSN_MASK;
+ credit = (aeth + credit_table[credit]) & IPATH_MSN_MASK;
if (ipath_cmp24(credit, qp->s_lsn) > 0)
qp->s_lsn = credit;
}
/*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
*/
#include "ipath_verbs.h"
-#include "ips_common.h"
+#include "ipath_common.h"
/* cut down ridiculously long IB macro names */
#define OP(x) IB_OPCODE_RC_##x
* @qp: the QP who's SGE we're restarting
* @wqe: the work queue to initialize the QP's SGE from
*
- * The QP s_lock should be held.
+ * The QP s_lock should be held and interrupts disabled.
*/
static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe)
{
struct ipath_ibdev *dev;
u32 len;
- len = ((qp->s_psn - wqe->psn) & IPS_PSN_MASK) *
+ len = ((qp->s_psn - wqe->psn) & IPATH_PSN_MASK) *
ib_mtu_enum_to_int(qp->path_mtu);
qp->s_sge.sge = wqe->sg_list[0];
qp->s_sge.sg_list = wqe->sg_list + 1;
* Return bth0 if constructed; otherwise, return 0.
* Note the QP s_lock must be held.
*/
-static inline u32 ipath_make_rc_ack(struct ipath_qp *qp,
- struct ipath_other_headers *ohdr,
- u32 pmtu)
+u32 ipath_make_rc_ack(struct ipath_qp *qp,
+ struct ipath_other_headers *ohdr,
+ u32 pmtu)
{
- struct ipath_sge_state *ss;
u32 hwords;
u32 len;
u32 bth0;
*/
switch (qp->s_ack_state) {
case OP(RDMA_READ_REQUEST):
- ss = &qp->s_rdma_sge;
+ qp->s_cur_sge = &qp->s_rdma_sge;
len = qp->s_rdma_len;
if (len > pmtu) {
len = pmtu;
qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
- }
- else
+ } else
qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
qp->s_rdma_len -= len;
bth0 = qp->s_ack_state << 24;
qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
/* FALLTHROUGH */
case OP(RDMA_READ_RESPONSE_MIDDLE):
- ss = &qp->s_rdma_sge;
+ qp->s_cur_sge = &qp->s_rdma_sge;
len = qp->s_rdma_len;
if (len > pmtu)
len = pmtu;
* We have to prevent new requests from changing
* the r_sge state while a ipath_verbs_send()
* is in progress.
- * Changing r_state allows the receiver
- * to continue processing new packets.
- * We do it here now instead of above so
- * that we are sure the packet was sent before
- * changing the state.
*/
- qp->r_state = OP(RDMA_READ_RESPONSE_LAST);
qp->s_ack_state = OP(ACKNOWLEDGE);
- return 0;
+ bth0 = 0;
+ goto bail;
case OP(COMPARE_SWAP):
case OP(FETCH_ADD):
- ss = NULL;
+ qp->s_cur_sge = NULL;
len = 0;
- qp->r_state = OP(SEND_LAST);
- qp->s_ack_state = OP(ACKNOWLEDGE);
- bth0 = IB_OPCODE_ATOMIC_ACKNOWLEDGE << 24;
+ /*
+ * Set the s_ack_state so the receive interrupt handler
+ * won't try to send an ACK (out of order) until this one
+ * is actually sent.
+ */
+ qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
+ bth0 = OP(ATOMIC_ACKNOWLEDGE) << 24;
ohdr->u.at.aeth = ipath_compute_aeth(qp);
- ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->s_ack_atomic);
+ ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->r_atomic_data);
hwords += sizeof(ohdr->u.at) / 4;
break;
default:
/* Send a regular ACK. */
- ss = NULL;
+ qp->s_cur_sge = NULL;
len = 0;
- qp->s_ack_state = OP(ACKNOWLEDGE);
- bth0 = qp->s_ack_state << 24;
- ohdr->u.aeth = ipath_compute_aeth(qp);
+ /*
+ * Set the s_ack_state so the receive interrupt handler
+ * won't try to send an ACK (out of order) until this one
+ * is actually sent.
+ */
+ qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
+ bth0 = OP(ACKNOWLEDGE) << 24;
+ if (qp->s_nak_state)
+ ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
+ (qp->s_nak_state <<
+ IPATH_AETH_CREDIT_SHIFT));
+ else
+ ohdr->u.aeth = ipath_compute_aeth(qp);
hwords++;
}
qp->s_hdrwords = hwords;
- qp->s_cur_sge = ss;
qp->s_cur_size = len;
+bail:
return bth0;
}
* @bth2p: pointer to the BTH PSN word
*
* Return 1 if constructed; otherwise, return 0.
- * Note the QP s_lock must be held.
+ * Note the QP s_lock must be held and interrupts disabled.
*/
-static inline int ipath_make_rc_req(struct ipath_qp *qp,
- struct ipath_other_headers *ohdr,
- u32 pmtu, u32 *bth0p, u32 *bth2p)
+int ipath_make_rc_req(struct ipath_qp *qp,
+ struct ipath_other_headers *ohdr,
+ u32 pmtu, u32 *bth0p, u32 *bth2p)
{
struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
struct ipath_sge_state *ss;
break;
case IB_WR_RDMA_WRITE:
- if (newreq)
+ if (newreq && qp->s_lsn != (u32) -1)
qp->s_lsn++;
/* FALLTHROUGH */
case IB_WR_RDMA_WRITE_WITH_IMM:
else {
qp->s_state =
OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
- /* Immediate data comes
- * after RETH */
+ /* Immediate data comes after RETH */
ohdr->u.rc.imm_data = wqe->wr.imm_data;
hwords += 1;
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
qp->s_state = OP(RDMA_READ_REQUEST);
hwords += sizeof(ohdr->u.rc.reth) / 4;
if (newreq) {
- qp->s_lsn++;
+ if (qp->s_lsn != (u32) -1)
+ qp->s_lsn++;
/*
* Adjust s_next_psn to count the
* expected number of responses.
wqe->wr.wr.atomic.compare_add);
hwords += sizeof(struct ib_atomic_eth) / 4;
if (newreq) {
- qp->s_lsn++;
+ if (qp->s_lsn != (u32) -1)
+ qp->s_lsn++;
wqe->lpsn = wqe->psn;
}
if (++qp->s_cur == qp->s_size)
if (qp->s_tail >= qp->s_size)
qp->s_tail = 0;
}
- bth2 |= qp->s_psn++ & IPS_PSN_MASK;
+ bth2 |= qp->s_psn++ & IPATH_PSN_MASK;
if ((int)(qp->s_psn - qp->s_next_psn) > 0)
qp->s_next_psn = qp->s_psn;
+ /*
+ * Put the QP on the pending list so lost ACKs will cause
+ * a retry. More than one request can be pending so the
+ * QP may already be on the dev->pending list.
+ */
spin_lock(&dev->pending_lock);
if (list_empty(&qp->timerwait))
list_add_tail(&qp->timerwait,
case OP(RDMA_READ_RESPONSE_FIRST):
/*
- * This case can only happen if a send is restarted. See
- * ipath_restart_rc().
+ * This case can only happen if a send is restarted.
+ * See ipath_restart_rc().
*/
ipath_init_restart(qp, wqe);
/* FALLTHROUGH */
qp->s_state = OP(SEND_MIDDLE);
/* FALLTHROUGH */
case OP(SEND_MIDDLE):
- bth2 = qp->s_psn++ & IPS_PSN_MASK;
+ bth2 = qp->s_psn++ & IPATH_PSN_MASK;
if ((int)(qp->s_psn - qp->s_next_psn) > 0)
qp->s_next_psn = qp->s_psn;
ss = &qp->s_sge;
qp->s_state = OP(RDMA_WRITE_MIDDLE);
/* FALLTHROUGH */
case OP(RDMA_WRITE_MIDDLE):
- bth2 = qp->s_psn++ & IPS_PSN_MASK;
+ bth2 = qp->s_psn++ & IPATH_PSN_MASK;
if ((int)(qp->s_psn - qp->s_next_psn) > 0)
qp->s_next_psn = qp->s_psn;
ss = &qp->s_sge;
* See ipath_restart_rc().
*/
ipath_init_restart(qp, wqe);
- len = ((qp->s_psn - wqe->psn) & IPS_PSN_MASK) * pmtu;
+ len = ((qp->s_psn - wqe->psn) & IPATH_PSN_MASK) * pmtu;
ohdr->u.rc.reth.vaddr =
cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len);
ohdr->u.rc.reth.rkey =
ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len);
qp->s_state = OP(RDMA_READ_REQUEST);
hwords += sizeof(ohdr->u.rc.reth) / 4;
- bth2 = qp->s_psn++ & IPS_PSN_MASK;
+ bth2 = qp->s_psn++ & IPATH_PSN_MASK;
if ((int)(qp->s_psn - qp->s_next_psn) > 0)
qp->s_next_psn = qp->s_psn;
ss = NULL;
return 0;
}
-static inline void ipath_make_rc_grh(struct ipath_qp *qp,
- struct ib_global_route *grh,
- u32 nwords)
-{
- struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-
- /* GRH header size in 32-bit words. */
- qp->s_hdrwords += 10;
- qp->s_hdr.u.l.grh.version_tclass_flow =
- cpu_to_be32((6 << 28) |
- (grh->traffic_class << 20) |
- grh->flow_label);
- qp->s_hdr.u.l.grh.paylen =
- cpu_to_be16(((qp->s_hdrwords - 12) + nwords +
- SIZE_OF_CRC) << 2);
- /* next_hdr is defined by C8-7 in ch. 8.4.1 */
- qp->s_hdr.u.l.grh.next_hdr = 0x1B;
- qp->s_hdr.u.l.grh.hop_limit = grh->hop_limit;
- /* The SGID is 32-bit aligned. */
- qp->s_hdr.u.l.grh.sgid.global.subnet_prefix = dev->gid_prefix;
- qp->s_hdr.u.l.grh.sgid.global.interface_id =
- ipath_layer_get_guid(dev->dd);
- qp->s_hdr.u.l.grh.dgid = grh->dgid;
-}
-
/**
- * ipath_do_rc_send - perform a send on an RC QP
- * @data: contains a pointer to the QP
+ * send_rc_ack - Construct an ACK packet and send it
+ * @qp: a pointer to the QP
*
- * Process entries in the send work queue until credit or queue is
- * exhausted. Only allow one CPU to send a packet per QP (tasklet).
- * Otherwise, after we drop the QP s_lock, two threads could send
- * packets out of order.
+ * This is called from ipath_rc_rcv() and only uses the receive
+ * side QP state.
+ * Note that RDMA reads are handled in the send side QP state and tasklet.
*/
-void ipath_do_rc_send(unsigned long data)
+static void send_rc_ack(struct ipath_qp *qp)
{
- struct ipath_qp *qp = (struct ipath_qp *)data;
struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
- unsigned long flags;
u16 lrh0;
- u32 nwords;
- u32 extra_bytes;
u32 bth0;
- u32 bth2;
- u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
+ u32 hwords;
+ struct ipath_ib_header hdr;
struct ipath_other_headers *ohdr;
- if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags))
- goto bail;
-
- if (unlikely(qp->remote_ah_attr.dlid ==
- ipath_layer_get_lid(dev->dd))) {
- struct ib_wc wc;
-
- /*
- * Pass in an uninitialized ib_wc to be consistent with
- * other places where ipath_ruc_loopback() is called.
- */
- ipath_ruc_loopback(qp, &wc);
- goto clear;
- }
-
- ohdr = &qp->s_hdr.u.oth;
- if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
- ohdr = &qp->s_hdr.u.l.oth;
-
-again:
- /* Check for a constructed packet to be sent. */
- if (qp->s_hdrwords != 0) {
- /*
- * If no PIO bufs are available, return. An interrupt will
- * call ipath_ib_piobufavail() when one is available.
- */
- _VERBS_INFO("h %u %p\n", qp->s_hdrwords, &qp->s_hdr);
- _VERBS_INFO("d %u %p %u %p %u %u %u %u\n", qp->s_cur_size,
- qp->s_cur_sge->sg_list,
- qp->s_cur_sge->num_sge,
- qp->s_cur_sge->sge.vaddr,
- qp->s_cur_sge->sge.sge_length,
- qp->s_cur_sge->sge.length,
- qp->s_cur_sge->sge.m,
- qp->s_cur_sge->sge.n);
- if (ipath_verbs_send(dev->dd, qp->s_hdrwords,
- (u32 *) &qp->s_hdr, qp->s_cur_size,
- qp->s_cur_sge)) {
- ipath_no_bufs_available(qp, dev);
- goto bail;
- }
- dev->n_unicast_xmit++;
- /* Record that we sent the packet and s_hdr is empty. */
- qp->s_hdrwords = 0;
- }
-
- /*
- * The lock is needed to synchronize between setting
- * qp->s_ack_state, resend timer, and post_send().
- */
- spin_lock_irqsave(&qp->s_lock, flags);
-
- /* Sending responses has higher priority over sending requests. */
- if (qp->s_ack_state != OP(ACKNOWLEDGE) &&
- (bth0 = ipath_make_rc_ack(qp, ohdr, pmtu)) != 0)
- bth2 = qp->s_ack_psn++ & IPS_PSN_MASK;
- else if (!ipath_make_rc_req(qp, ohdr, pmtu, &bth0, &bth2))
- goto done;
-
- spin_unlock_irqrestore(&qp->s_lock, flags);
-
/* Construct the header. */
- extra_bytes = (4 - qp->s_cur_size) & 3;
- nwords = (qp->s_cur_size + extra_bytes) >> 2;
- lrh0 = IPS_LRH_BTH;
+ ohdr = &hdr.u.oth;
+ lrh0 = IPATH_LRH_BTH;
+ /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
+ hwords = 6;
if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
- ipath_make_rc_grh(qp, &qp->remote_ah_attr.grh, nwords);
- lrh0 = IPS_LRH_GRH;
+ hwords += ipath_make_grh(dev, &hdr.u.l.grh,
+ &qp->remote_ah_attr.grh,
+ hwords, 0);
+ ohdr = &hdr.u.l.oth;
+ lrh0 = IPATH_LRH_GRH;
}
+ /* read pkey_index w/o lock (its atomic) */
+ bth0 = ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
+ if (qp->r_nak_state)
+ ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
+ (qp->r_nak_state <<
+ IPATH_AETH_CREDIT_SHIFT));
+ else
+ ohdr->u.aeth = ipath_compute_aeth(qp);
+ if (qp->r_ack_state >= OP(COMPARE_SWAP)) {
+ bth0 |= OP(ATOMIC_ACKNOWLEDGE) << 24;
+ ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->r_atomic_data);
+ hwords += sizeof(ohdr->u.at.atomic_ack_eth) / 4;
+ } else
+ bth0 |= OP(ACKNOWLEDGE) << 24;
lrh0 |= qp->remote_ah_attr.sl << 4;
- qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
- qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
- qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords +
- SIZE_OF_CRC);
- qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
- bth0 |= ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
- bth0 |= extra_bytes << 20;
+ hdr.lrh[0] = cpu_to_be16(lrh0);
+ hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
+ hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
+ hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
ohdr->bth[0] = cpu_to_be32(bth0);
ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
- ohdr->bth[2] = cpu_to_be32(bth2);
+ ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK);
- /* Check for more work to do. */
- goto again;
+ /*
+ * If we can send the ACK, clear the ACK state.
+ */
+ if (ipath_verbs_send(dev->dd, hwords, (u32 *) &hdr, 0, NULL) == 0) {
+ qp->r_ack_state = OP(ACKNOWLEDGE);
+ dev->n_unicast_xmit++;
+ } else {
+ /*
+ * We are out of PIO buffers at the moment.
+ * Pass responsibility for sending the ACK to the
+ * send tasklet so that when a PIO buffer becomes
+ * available, the ACK is sent ahead of other outgoing
+ * packets.
+ */
+ dev->n_rc_qacks++;
+ spin_lock_irq(&qp->s_lock);
+ /* Don't coalesce if a RDMA read or atomic is pending. */
+ if (qp->s_ack_state == OP(ACKNOWLEDGE) ||
+ qp->s_ack_state < OP(RDMA_READ_REQUEST)) {
+ qp->s_ack_state = qp->r_ack_state;
+ qp->s_nak_state = qp->r_nak_state;
+ qp->s_ack_psn = qp->r_ack_psn;
+ qp->r_ack_state = OP(ACKNOWLEDGE);
+ }
+ spin_unlock_irq(&qp->s_lock);
-done:
- spin_unlock_irqrestore(&qp->s_lock, flags);
-clear:
- clear_bit(IPATH_S_BUSY, &qp->s_flags);
-bail:
- return;
+ /* Call ipath_do_rc_send() in another thread. */
+ tasklet_hi_schedule(&qp->s_task);
+ }
}
-static void send_rc_ack(struct ipath_qp *qp)
+/**
+ * reset_psn - reset the QP state to send starting from PSN
+ * @qp: the QP
+ * @psn: the packet sequence number to restart at
+ *
+ * This is called from ipath_rc_rcv() to process an incoming RC ACK
+ * for the given QP.
+ * Called at interrupt level with the QP s_lock held.
+ */
+static void reset_psn(struct ipath_qp *qp, u32 psn)
{
- struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
- u16 lrh0;
- u32 bth0;
- struct ipath_other_headers *ohdr;
+ u32 n = qp->s_last;
+ struct ipath_swqe *wqe = get_swqe_ptr(qp, n);
+ u32 opcode;
- /* Construct the header. */
- ohdr = &qp->s_hdr.u.oth;
- lrh0 = IPS_LRH_BTH;
- /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
- qp->s_hdrwords = 6;
- if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
- ipath_make_rc_grh(qp, &qp->remote_ah_attr.grh, 0);
- ohdr = &qp->s_hdr.u.l.oth;
- lrh0 = IPS_LRH_GRH;
+ qp->s_cur = n;
+
+ /*
+ * If we are starting the request from the beginning,
+ * let the normal send code handle initialization.
+ */
+ if (ipath_cmp24(psn, wqe->psn) <= 0) {
+ qp->s_state = OP(SEND_LAST);
+ goto done;
}
- bth0 = ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
- ohdr->u.aeth = ipath_compute_aeth(qp);
- if (qp->s_ack_state >= OP(COMPARE_SWAP)) {
- bth0 |= IB_OPCODE_ATOMIC_ACKNOWLEDGE << 24;
- ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->s_ack_atomic);
- qp->s_hdrwords += sizeof(ohdr->u.at.atomic_ack_eth) / 4;
+
+ /* Find the work request opcode corresponding to the given PSN. */
+ opcode = wqe->wr.opcode;
+ for (;;) {
+ int diff;
+
+ if (++n == qp->s_size)
+ n = 0;
+ if (n == qp->s_tail)
+ break;
+ wqe = get_swqe_ptr(qp, n);
+ diff = ipath_cmp24(psn, wqe->psn);
+ if (diff < 0)
+ break;
+ qp->s_cur = n;
+ /*
+ * If we are starting the request from the beginning,
+ * let the normal send code handle initialization.
+ */
+ if (diff == 0) {
+ qp->s_state = OP(SEND_LAST);
+ goto done;
+ }
+ opcode = wqe->wr.opcode;
}
- else
- bth0 |= OP(ACKNOWLEDGE) << 24;
- lrh0 |= qp->remote_ah_attr.sl << 4;
- qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
- qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
- qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + SIZE_OF_CRC);
- qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
- ohdr->bth[0] = cpu_to_be32(bth0);
- ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
- ohdr->bth[2] = cpu_to_be32(qp->s_ack_psn & IPS_PSN_MASK);
/*
- * If we can send the ACK, clear the ACK state.
+ * Set the state to restart in the middle of a request.
+ * Don't change the s_sge, s_cur_sge, or s_cur_size.
+ * See ipath_do_rc_send().
*/
- if (ipath_verbs_send(dev->dd, qp->s_hdrwords, (u32 *) &qp->s_hdr,
- 0, NULL) == 0) {
- qp->s_ack_state = OP(ACKNOWLEDGE);
- dev->n_rc_qacks++;
- dev->n_unicast_xmit++;
+ switch (opcode) {
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
+ break;
+
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
+ break;
+
+ case IB_WR_RDMA_READ:
+ qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
+ break;
+
+ default:
+ /*
+ * This case shouldn't happen since its only
+ * one PSN per req.
+ */
+ qp->s_state = OP(SEND_LAST);
}
+done:
+ qp->s_psn = psn;
}
/**
* @psn: packet sequence number for the request
* @wc: the work completion request
*
- * The QP s_lock should be held.
+ * The QP s_lock should be held and interrupts disabled.
*/
void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
{
struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
struct ipath_ibdev *dev;
- u32 n;
/*
* If there are no requests pending, we are done.
else
dev->n_rc_resends += (int)qp->s_psn - (int)psn;
- /*
- * If we are starting the request from the beginning, let the normal
- * send code handle initialization.
- */
- qp->s_cur = qp->s_last;
- if (ipath_cmp24(psn, wqe->psn) <= 0) {
- qp->s_state = OP(SEND_LAST);
- qp->s_psn = wqe->psn;
- } else {
- n = qp->s_cur;
- for (;;) {
- if (++n == qp->s_size)
- n = 0;
- if (n == qp->s_tail) {
- if (ipath_cmp24(psn, qp->s_next_psn) >= 0) {
- qp->s_cur = n;
- wqe = get_swqe_ptr(qp, n);
- }
- break;
- }
- wqe = get_swqe_ptr(qp, n);
- if (ipath_cmp24(psn, wqe->psn) < 0)
- break;
- qp->s_cur = n;
- }
- qp->s_psn = psn;
-
- /*
- * Reset the state to restart in the middle of a request.
- * Don't change the s_sge, s_cur_sge, or s_cur_size.
- * See ipath_do_rc_send().
- */
- switch (wqe->wr.opcode) {
- case IB_WR_SEND:
- case IB_WR_SEND_WITH_IMM:
- qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
- break;
-
- case IB_WR_RDMA_WRITE:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
- break;
-
- case IB_WR_RDMA_READ:
- qp->s_state =
- OP(RDMA_READ_RESPONSE_MIDDLE);
- break;
-
- default:
- /*
- * This case shouldn't happen since its only
- * one PSN per req.
- */
- qp->s_state = OP(SEND_LAST);
- }
- }
+ reset_psn(qp, psn);
done:
tasklet_hi_schedule(&qp->s_task);
return;
}
-/**
- * reset_psn - reset the QP state to send starting from PSN
- * @qp: the QP
- * @psn: the packet sequence number to restart at
- *
- * This is called from ipath_rc_rcv() to process an incoming RC ACK
- * for the given QP.
- * Called at interrupt level with the QP s_lock held.
- */
-static void reset_psn(struct ipath_qp *qp, u32 psn)
-{
- struct ipath_swqe *wqe;
- u32 n;
-
- n = qp->s_cur;
- wqe = get_swqe_ptr(qp, n);
- for (;;) {
- if (++n == qp->s_size)
- n = 0;
- if (n == qp->s_tail) {
- if (ipath_cmp24(psn, qp->s_next_psn) >= 0) {
- qp->s_cur = n;
- wqe = get_swqe_ptr(qp, n);
- }
- break;
- }
- wqe = get_swqe_ptr(qp, n);
- if (ipath_cmp24(psn, wqe->psn) < 0)
- break;
- qp->s_cur = n;
- }
- qp->s_psn = psn;
-
- /*
- * Set the state to restart in the middle of a
- * request. Don't change the s_sge, s_cur_sge, or
- * s_cur_size. See ipath_do_rc_send().
- */
- switch (wqe->wr.opcode) {
- case IB_WR_SEND:
- case IB_WR_SEND_WITH_IMM:
- qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
- break;
-
- case IB_WR_RDMA_WRITE:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
- break;
-
- case IB_WR_RDMA_READ:
- qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
- break;
-
- default:
- /*
- * This case shouldn't happen since its only
- * one PSN per req.
- */
- qp->s_state = OP(SEND_LAST);
- }
-}
-
/**
* do_rc_ack - process an incoming RC ACK
* @qp: the QP the ACK came in on
* @psn: the packet sequence number of the ACK
* @opcode: the opcode of the request that resulted in the ACK
*
- * This is called from ipath_rc_rcv() to process an incoming RC ACK
+ * This is called from ipath_rc_rcv_resp() to process an incoming RC ACK
* for the given QP.
- * Called at interrupt level with the QP s_lock held.
+ * Called at interrupt level with the QP s_lock held and interrupts disabled.
* Returns 1 if OK, 0 if current operation should be aborted (NAK).
*/
static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
if (qp->s_last == qp->s_tail)
goto bail;
- /* The last valid PSN seen is the previous request's. */
- qp->s_last_psn = wqe->psn - 1;
+ /* The last valid PSN is the previous PSN. */
+ qp->s_last_psn = psn - 1;
dev->n_rc_resends += (int)qp->s_psn - (int)psn;
- /*
- * If we are starting the request from the beginning, let
- * the normal send code handle initialization.
- */
- qp->s_cur = qp->s_last;
- wqe = get_swqe_ptr(qp, qp->s_cur);
- if (ipath_cmp24(psn, wqe->psn) <= 0) {
- qp->s_state = OP(SEND_LAST);
- qp->s_psn = wqe->psn;
- } else
- reset_psn(qp, psn);
+ reset_psn(qp, psn);
qp->s_rnr_timeout =
- ib_ipath_rnr_table[(aeth >> IPS_AETH_CREDIT_SHIFT) &
- IPS_AETH_CREDIT_MASK];
+ ib_ipath_rnr_table[(aeth >> IPATH_AETH_CREDIT_SHIFT) &
+ IPATH_AETH_CREDIT_MASK];
ipath_insert_rnr_queue(qp);
goto bail;
/* The last valid PSN seen is the previous request's. */
if (qp->s_last != qp->s_tail)
qp->s_last_psn = wqe->psn - 1;
- switch ((aeth >> IPS_AETH_CREDIT_SHIFT) &
- IPS_AETH_CREDIT_MASK) {
+ switch ((aeth >> IPATH_AETH_CREDIT_SHIFT) &
+ IPATH_AETH_CREDIT_MASK) {
case 0: /* PSN sequence error */
dev->n_seq_naks++;
/*
goto ack_done;
}
rdma_read:
- if (unlikely(qp->s_state != OP(RDMA_READ_REQUEST)))
- goto ack_done;
- if (unlikely(tlen != (hdrsize + pmtu + 4)))
- goto ack_done;
- if (unlikely(pmtu >= qp->s_len))
- goto ack_done;
- /* We got a response so update the timeout. */
- if (unlikely(qp->s_last == qp->s_tail ||
- get_swqe_ptr(qp, qp->s_last)->wr.opcode !=
- IB_WR_RDMA_READ))
- goto ack_done;
- spin_lock(&dev->pending_lock);
- if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait))
- list_move_tail(&qp->timerwait,
- &dev->pending[dev->pending_index]);
- spin_unlock(&dev->pending_lock);
- /*
- * Update the RDMA receive state but do the copy w/o holding the
- * locks and blocking interrupts. XXX Yet another place that
- * affects relaxed RDMA order since we don't want s_sge modified.
- */
- qp->s_len -= pmtu;
- qp->s_last_psn = psn;
- spin_unlock_irqrestore(&qp->s_lock, flags);
- ipath_copy_sge(&qp->s_sge, data, pmtu);
- goto bail;
+ if (unlikely(qp->s_state != OP(RDMA_READ_REQUEST)))
+ goto ack_done;
+ if (unlikely(tlen != (hdrsize + pmtu + 4)))
+ goto ack_done;
+ if (unlikely(pmtu >= qp->s_len))
+ goto ack_done;
+ /* We got a response so update the timeout. */
+ if (unlikely(qp->s_last == qp->s_tail ||
+ get_swqe_ptr(qp, qp->s_last)->wr.opcode !=
+ IB_WR_RDMA_READ))
+ goto ack_done;
+ spin_lock(&dev->pending_lock);
+ if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait))
+ list_move_tail(&qp->timerwait,
+ &dev->pending[dev->pending_index]);
+ spin_unlock(&dev->pending_lock);
+ /*
+ * Update the RDMA receive state but do the copy w/o
+ * holding the locks and blocking interrupts.
+ * XXX Yet another place that affects relaxed RDMA order
+ * since we don't want s_sge modified.
+ */
+ qp->s_len -= pmtu;
+ qp->s_last_psn = psn;
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ ipath_copy_sge(&qp->s_sge, data, pmtu);
+ goto bail;
case OP(RDMA_READ_RESPONSE_LAST):
/* ACKs READ req. */
* ICRC (4).
*/
if (unlikely(tlen <= (hdrsize + pad + 8))) {
- /*
- * XXX Need to generate an error CQ
- * entry.
- */
+ /* XXX Need to generate an error CQ entry. */
goto ack_done;
}
tlen -= hdrsize + pad + 8;
if (unlikely(tlen != qp->s_len)) {
- /*
- * XXX Need to generate an error CQ
- * entry.
- */
+ /* XXX Need to generate an error CQ entry. */
goto ack_done;
}
if (!header_in_data)
if (do_rc_ack(qp, aeth, psn, OP(RDMA_READ_RESPONSE_LAST))) {
/*
* Change the state so we contimue
- * processing new requests.
+ * processing new requests and wake up the
+ * tasklet if there are posted sends.
*/
qp->s_state = OP(SEND_LAST);
+ if (qp->s_tail != qp->s_head)
+ tasklet_hi_schedule(&qp->s_task);
}
goto ack_done;
}
* Don't queue the NAK if a RDMA read, atomic, or
* NAK is pending though.
*/
- spin_lock(&qp->s_lock);
- if ((qp->s_ack_state >= OP(RDMA_READ_REQUEST) &&
- qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE) ||
- qp->s_nak_state != 0) {
- spin_unlock(&qp->s_lock);
+ if (qp->s_ack_state != OP(ACKNOWLEDGE) ||
+ qp->r_nak_state != 0)
goto done;
+ if (qp->r_ack_state < OP(COMPARE_SWAP)) {
+ qp->r_ack_state = OP(SEND_ONLY);
+ qp->r_nak_state = IB_NAK_PSN_ERROR;
+ /* Use the expected PSN. */
+ qp->r_ack_psn = qp->r_psn;
}
- qp->s_ack_state = OP(SEND_ONLY);
- qp->s_nak_state = IB_NAK_PSN_ERROR;
- /* Use the expected PSN. */
- qp->s_ack_psn = qp->r_psn;
- goto resched;
+ goto send_ack;
}
/*
* send the earliest so that RDMA reads can be restarted at
* the requester's expected PSN.
*/
- spin_lock(&qp->s_lock);
- if (qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE &&
- ipath_cmp24(psn, qp->s_ack_psn) >= 0) {
- if (qp->s_ack_state < IB_OPCODE_RDMA_READ_REQUEST)
- qp->s_ack_psn = psn;
- spin_unlock(&qp->s_lock);
- goto done;
- }
- switch (opcode) {
- case OP(RDMA_READ_REQUEST):
- /*
- * We have to be careful to not change s_rdma_sge
- * while ipath_do_rc_send() is using it and not
- * holding the s_lock.
- */
- if (qp->s_ack_state != OP(ACKNOWLEDGE) &&
- qp->s_ack_state >= IB_OPCODE_RDMA_READ_REQUEST) {
- spin_unlock(&qp->s_lock);
- dev->n_rdma_dup_busy++;
- goto done;
- }
+ if (opcode == OP(RDMA_READ_REQUEST)) {
/* RETH comes after BTH */
if (!header_in_data)
reth = &ohdr->u.rc.reth;
reth = (struct ib_reth *)data;
data += sizeof(*reth);
}
+ /*
+ * If we receive a duplicate RDMA request, it means the
+ * requester saw a sequence error and needs to restart
+ * from an earlier point. We can abort the current
+ * RDMA read send in that case.
+ */
+ spin_lock_irq(&qp->s_lock);
+ if (qp->s_ack_state != OP(ACKNOWLEDGE) &&
+ (qp->s_hdrwords || ipath_cmp24(psn, qp->s_ack_psn) >= 0)) {
+ /*
+ * We are already sending earlier requested data.
+ * Don't abort it to send later out of sequence data.
+ */
+ spin_unlock_irq(&qp->s_lock);
+ goto done;
+ }
qp->s_rdma_len = be32_to_cpu(reth->length);
if (qp->s_rdma_len != 0) {
u32 rkey = be32_to_cpu(reth->rkey);
ok = ipath_rkey_ok(dev, &qp->s_rdma_sge,
qp->s_rdma_len, vaddr, rkey,
IB_ACCESS_REMOTE_READ);
- if (unlikely(!ok))
+ if (unlikely(!ok)) {
+ spin_unlock_irq(&qp->s_lock);
goto done;
+ }
} else {
qp->s_rdma_sge.sg_list = NULL;
qp->s_rdma_sge.num_sge = 0;
qp->s_rdma_sge.sge.length = 0;
qp->s_rdma_sge.sge.sge_length = 0;
}
- break;
+ qp->s_ack_state = opcode;
+ qp->s_ack_psn = psn;
+ spin_unlock_irq(&qp->s_lock);
+ tasklet_hi_schedule(&qp->s_task);
+ goto send_ack;
+ }
+
+ /*
+ * A pending RDMA read will ACK anything before it so
+ * ignore earlier duplicate requests.
+ */
+ if (qp->s_ack_state != OP(ACKNOWLEDGE))
+ goto done;
+ /*
+ * If an ACK is pending, don't replace the pending ACK
+ * with an earlier one since the later one will ACK the earlier.
+ * Also, if we already have a pending atomic, send it.
+ */
+ if (qp->r_ack_state != OP(ACKNOWLEDGE) &&
+ (ipath_cmp24(psn, qp->r_ack_psn) <= 0 ||
+ qp->r_ack_state >= OP(COMPARE_SWAP)))
+ goto send_ack;
+ switch (opcode) {
case OP(COMPARE_SWAP):
case OP(FETCH_ADD):
/*
- * Check for the PSN of the last atomic operations
+ * Check for the PSN of the last atomic operation
* performed and resend the result if found.
*/
- if ((psn & IPS_PSN_MASK) != qp->r_atomic_psn) {
- spin_unlock(&qp->s_lock);
+ if ((psn & IPATH_PSN_MASK) != qp->r_atomic_psn)
goto done;
- }
- qp->s_ack_atomic = qp->r_atomic_data;
break;
}
- qp->s_ack_state = opcode;
- qp->s_nak_state = 0;
- qp->s_ack_psn = psn;
-resched:
+ qp->r_ack_state = opcode;
+ qp->r_nak_state = 0;
+ qp->r_ack_psn = psn;
+send_ack:
return 0;
done:
u32 hdrsize;
u32 psn;
u32 pad;
- unsigned long flags;
struct ib_wc wc;
u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
int diff;
} else
psn = be32_to_cpu(ohdr->bth[2]);
}
- /*
- * The opcode is in the low byte when its in network order
- * (top byte when in host order).
- */
- opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
/*
* Process responses (ACKs) before anything else. Note that the
* queue rather than the expected receive packet sequence number.
* In other words, this QP is the requester.
*/
+ opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
ipath_rc_rcv_resp(dev, ohdr, data, tlen, qp, opcode, psn,
hdrsize, pmtu, header_in_data);
- goto bail;
+ goto done;
}
- spin_lock_irqsave(&qp->r_rq.lock, flags);
-
/* Compute 24 bits worth of difference. */
diff = ipath_cmp24(psn, qp->r_psn);
if (unlikely(diff)) {
if (ipath_rc_rcv_error(dev, ohdr, data, qp, opcode,
psn, diff, header_in_data))
goto done;
- goto resched;
+ goto send_ack;
}
/* Check for opcode sequence errors. */
opcode == OP(SEND_LAST_WITH_IMMEDIATE))
break;
nack_inv:
- /*
- * A NAK will ACK earlier sends and RDMA writes. Don't queue the
- * NAK if a RDMA read, atomic, or NAK is pending though.
- */
- spin_lock(&qp->s_lock);
- if (qp->s_ack_state >= OP(RDMA_READ_REQUEST) &&
- qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE) {
- spin_unlock(&qp->s_lock);
- goto done;
- }
- /* XXX Flush WQEs */
- qp->state = IB_QPS_ERR;
- qp->s_ack_state = OP(SEND_ONLY);
- qp->s_nak_state = IB_NAK_INVALID_REQUEST;
- qp->s_ack_psn = qp->r_psn;
- goto resched;
+ /*
+ * A NAK will ACK earlier sends and RDMA writes.
+ * Don't queue the NAK if a RDMA read, atomic, or NAK
+ * is pending though.
+ */
+ if (qp->r_ack_state >= OP(COMPARE_SWAP))
+ goto send_ack;
+ /* XXX Flush WQEs */
+ qp->state = IB_QPS_ERR;
+ qp->r_ack_state = OP(SEND_ONLY);
+ qp->r_nak_state = IB_NAK_INVALID_REQUEST;
+ qp->r_ack_psn = qp->r_psn;
+ goto send_ack;
case OP(RDMA_WRITE_FIRST):
case OP(RDMA_WRITE_MIDDLE):
break;
goto nack_inv;
- case OP(RDMA_READ_REQUEST):
- case OP(COMPARE_SWAP):
- case OP(FETCH_ADD):
- /*
- * Drop all new requests until a response has been sent. A
- * new request then ACKs the RDMA response we sent. Relaxed
- * ordering would allow new requests to be processed but we
- * would need to keep a queue of rwqe's for all that are in
- * progress. Note that we can't RNR NAK this request since
- * the RDMA READ or atomic response is already queued to be
- * sent (unless we implement a response send queue).
- */
- goto done;
-
default:
if (opcode == OP(SEND_MIDDLE) ||
opcode == OP(SEND_LAST) ||
opcode == OP(RDMA_WRITE_LAST) ||
opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
goto nack_inv;
+ /*
+ * Note that it is up to the requester to not send a new
+ * RDMA read or atomic operation before receiving an ACK
+ * for the previous operation.
+ */
break;
}
* Don't queue the NAK if a RDMA read or atomic
* is pending though.
*/
- spin_lock(&qp->s_lock);
- if (qp->s_ack_state >=
- OP(RDMA_READ_REQUEST) &&
- qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE) {
- spin_unlock(&qp->s_lock);
- goto done;
- }
- qp->s_ack_state = OP(SEND_ONLY);
- qp->s_nak_state = IB_RNR_NAK | qp->s_min_rnr_timer;
- qp->s_ack_psn = qp->r_psn;
- goto resched;
+ if (qp->r_ack_state >= OP(COMPARE_SWAP))
+ goto send_ack;
+ qp->r_ack_state = OP(SEND_ONLY);
+ qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
+ qp->r_ack_psn = qp->r_psn;
+ goto send_ack;
}
qp->r_rcv_len = 0;
/* FALLTHROUGH */
if (unlikely(wc.byte_len > qp->r_len))
goto nack_inv;
ipath_copy_sge(&qp->r_sge, data, tlen);
- atomic_inc(&qp->msn);
+ qp->r_msn++;
if (opcode == OP(RDMA_WRITE_LAST) ||
opcode == OP(RDMA_WRITE_ONLY))
break;
ok = ipath_rkey_ok(dev, &qp->r_sge,
qp->r_len, vaddr, rkey,
IB_ACCESS_REMOTE_WRITE);
- if (unlikely(!ok)) {
- nack_acc:
- /*
- * A NAK will ACK earlier sends and RDMA
- * writes. Don't queue the NAK if a RDMA
- * read, atomic, or NAK is pending though.
- */
- spin_lock(&qp->s_lock);
- if (qp->s_ack_state >=
- OP(RDMA_READ_REQUEST) &&
- qp->s_ack_state !=
- IB_OPCODE_ACKNOWLEDGE) {
- spin_unlock(&qp->s_lock);
- goto done;
- }
- /* XXX Flush WQEs */
- qp->state = IB_QPS_ERR;
- qp->s_ack_state = OP(RDMA_WRITE_ONLY);
- qp->s_nak_state =
- IB_NAK_REMOTE_ACCESS_ERROR;
- qp->s_ack_psn = qp->r_psn;
- goto resched;
- }
+ if (unlikely(!ok))
+ goto nack_acc;
} else {
qp->r_sge.sg_list = NULL;
qp->r_sge.sge.mr = NULL;
reth = (struct ib_reth *)data;
data += sizeof(*reth);
}
- spin_lock(&qp->s_lock);
- if (qp->s_ack_state != OP(ACKNOWLEDGE) &&
- qp->s_ack_state >= IB_OPCODE_RDMA_READ_REQUEST) {
- spin_unlock(&qp->s_lock);
- goto done;
- }
+ if (unlikely(!(qp->qp_access_flags &
+ IB_ACCESS_REMOTE_READ)))
+ goto nack_acc;
+ spin_lock_irq(&qp->s_lock);
qp->s_rdma_len = be32_to_cpu(reth->length);
if (qp->s_rdma_len != 0) {
u32 rkey = be32_to_cpu(reth->rkey);
qp->s_rdma_len, vaddr, rkey,
IB_ACCESS_REMOTE_READ);
if (unlikely(!ok)) {
- spin_unlock(&qp->s_lock);
+ spin_unlock_irq(&qp->s_lock);
goto nack_acc;
}
/*
qp->s_rdma_sge.sge.length = 0;
qp->s_rdma_sge.sge.sge_length = 0;
}
- if (unlikely(!(qp->qp_access_flags &
- IB_ACCESS_REMOTE_READ)))
- goto nack_acc;
/*
* We need to increment the MSN here instead of when we
* finish sending the result since a duplicate request would
* increment it more than once.
*/
- atomic_inc(&qp->msn);
+ qp->r_msn++;
+
qp->s_ack_state = opcode;
- qp->s_nak_state = 0;
qp->s_ack_psn = psn;
+ spin_unlock_irq(&qp->s_lock);
+
qp->r_psn++;
qp->r_state = opcode;
- goto rdmadone;
+ qp->r_nak_state = 0;
+
+ /* Call ipath_do_rc_send() in another thread. */
+ tasklet_hi_schedule(&qp->s_task);
+
+ goto done;
case OP(COMPARE_SWAP):
case OP(FETCH_ADD): {
goto nack_acc;
/* Perform atomic OP and save result. */
sdata = be64_to_cpu(ateth->swap_data);
- spin_lock(&dev->pending_lock);
+ spin_lock_irq(&dev->pending_lock);
qp->r_atomic_data = *(u64 *) qp->r_sge.sge.vaddr;
if (opcode == OP(FETCH_ADD))
*(u64 *) qp->r_sge.sge.vaddr =
else if (qp->r_atomic_data ==
be64_to_cpu(ateth->compare_data))
*(u64 *) qp->r_sge.sge.vaddr = sdata;
- spin_unlock(&dev->pending_lock);
- atomic_inc(&qp->msn);
- qp->r_atomic_psn = psn & IPS_PSN_MASK;
+ spin_unlock_irq(&dev->pending_lock);
+ qp->r_msn++;
+ qp->r_atomic_psn = psn & IPATH_PSN_MASK;
psn |= 1 << 31;
break;
}
}
qp->r_psn++;
qp->r_state = opcode;
+ qp->r_nak_state = 0;
/* Send an ACK if requested or required. */
if (psn & (1 << 31)) {
/*
* Coalesce ACKs unless there is a RDMA READ or
* ATOMIC pending.
*/
- spin_lock(&qp->s_lock);
- if (qp->s_ack_state == OP(ACKNOWLEDGE) ||
- qp->s_ack_state < IB_OPCODE_RDMA_READ_REQUEST) {
- qp->s_ack_state = opcode;
- qp->s_nak_state = 0;
- qp->s_ack_psn = psn;
- qp->s_ack_atomic = qp->r_atomic_data;
- goto resched;
+ if (qp->r_ack_state < OP(COMPARE_SWAP)) {
+ qp->r_ack_state = opcode;
+ qp->r_ack_psn = psn;
}
- spin_unlock(&qp->s_lock);
+ goto send_ack;
}
-done:
- spin_unlock_irqrestore(&qp->r_rq.lock, flags);
- goto bail;
+ goto done;
-resched:
+nack_acc:
/*
- * Try to send ACK right away but not if ipath_do_rc_send() is
- * active.
+ * A NAK will ACK earlier sends and RDMA writes.
+ * Don't queue the NAK if a RDMA read, atomic, or NAK
+ * is pending though.
*/
- if (qp->s_hdrwords == 0 &&
- (qp->s_ack_state < IB_OPCODE_RDMA_READ_REQUEST ||
- qp->s_ack_state >= IB_OPCODE_COMPARE_SWAP))
+ if (qp->r_ack_state < OP(COMPARE_SWAP)) {
+ /* XXX Flush WQEs */
+ qp->state = IB_QPS_ERR;
+ qp->r_ack_state = OP(RDMA_WRITE_ONLY);
+ qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
+ qp->r_ack_psn = qp->r_psn;
+ }
+send_ack:
+ /* Send ACK right away unless the send tasklet has a pending ACK. */
+ if (qp->s_ack_state == OP(ACKNOWLEDGE))
send_rc_ack(qp);
-rdmadone:
- spin_unlock(&qp->s_lock);
- spin_unlock_irqrestore(&qp->r_rq.lock, flags);
-
- /* Call ipath_do_rc_send() in another thread. */
- tasklet_hi_schedule(&qp->s_task);
-
-bail:
+done:
return;
}
/*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
/*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
*/
#include "ipath_verbs.h"
+#include "ipath_common.h"
/*
* Convert the AETH RNR timeout code into the number of milliseconds.
*
* Return 0 if no RWQE is available, otherwise return 1.
*
- * Called at interrupt level with the QP r_rq.lock held.
+ * Can be called from interrupt level.
*/
int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
{
+ unsigned long flags;
struct ipath_rq *rq;
struct ipath_srq *srq;
struct ipath_rwqe *wqe;
- int ret;
+ int ret = 1;
if (!qp->ibqp.srq) {
rq = &qp->r_rq;
+ spin_lock_irqsave(&rq->lock, flags);
+
if (unlikely(rq->tail == rq->head)) {
ret = 0;
- goto bail;
+ goto done;
}
wqe = get_rwqe_ptr(rq, rq->tail);
qp->r_wr_id = wqe->wr_id;
}
if (++rq->tail >= rq->size)
rq->tail = 0;
- ret = 1;
- goto bail;
+ goto done;
}
srq = to_isrq(qp->ibqp.srq);
rq = &srq->rq;
- spin_lock(&rq->lock);
+ spin_lock_irqsave(&rq->lock, flags);
+
if (unlikely(rq->tail == rq->head)) {
- spin_unlock(&rq->lock);
ret = 0;
- goto bail;
+ goto done;
}
wqe = get_rwqe_ptr(rq, rq->tail);
qp->r_wr_id = wqe->wr_id;
n = rq->head - rq->tail;
if (n < srq->limit) {
srq->limit = 0;
- spin_unlock(&rq->lock);
+ spin_unlock_irqrestore(&rq->lock, flags);
ev.device = qp->ibqp.device;
ev.element.srq = qp->ibqp.srq;
ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
srq->ibsrq.event_handler(&ev,
srq->ibsrq.srq_context);
- } else
- spin_unlock(&rq->lock);
- } else
- spin_unlock(&rq->lock);
- ret = 1;
+ goto bail;
+ }
+ }
+done:
+ spin_unlock_irqrestore(&rq->lock, flags);
bail:
return ret;
}
/**
* ipath_ruc_loopback - handle UC and RC lookback requests
* @sqp: the loopback QP
- * @wc: the work completion entry
*
* This is called from ipath_do_uc_send() or ipath_do_rc_send() to
* forward a WQE addressed to the same HCA.
* receive interrupts since this is a connected protocol and all packets
* will pass through here.
*/
-void ipath_ruc_loopback(struct ipath_qp *sqp, struct ib_wc *wc)
+static void ipath_ruc_loopback(struct ipath_qp *sqp)
{
struct ipath_ibdev *dev = to_idev(sqp->ibqp.device);
struct ipath_qp *qp;
struct ipath_swqe *wqe;
struct ipath_sge *sge;
unsigned long flags;
+ struct ib_wc wc;
u64 sdata;
qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn);
wqe = get_swqe_ptr(sqp, sqp->s_last);
spin_unlock_irqrestore(&sqp->s_lock, flags);
- wc->wc_flags = 0;
- wc->imm_data = 0;
+ wc.wc_flags = 0;
+ wc.imm_data = 0;
sqp->s_sge.sge = wqe->sg_list[0];
sqp->s_sge.sg_list = wqe->sg_list + 1;
sqp->s_len = wqe->length;
switch (wqe->wr.opcode) {
case IB_WR_SEND_WITH_IMM:
- wc->wc_flags = IB_WC_WITH_IMM;
- wc->imm_data = wqe->wr.imm_data;
+ wc.wc_flags = IB_WC_WITH_IMM;
+ wc.imm_data = wqe->wr.imm_data;
/* FALLTHROUGH */
case IB_WR_SEND:
- spin_lock_irqsave(&qp->r_rq.lock, flags);
if (!ipath_get_rwqe(qp, 0)) {
rnr_nak:
- spin_unlock_irqrestore(&qp->r_rq.lock, flags);
/* Handle RNR NAK */
if (qp->ibqp.qp_type == IB_QPT_UC)
goto send_comp;
if (sqp->s_rnr_retry == 0) {
- wc->status = IB_WC_RNR_RETRY_EXC_ERR;
+ wc.status = IB_WC_RNR_RETRY_EXC_ERR;
goto err;
}
if (sqp->s_rnr_retry_cnt < 7)
sqp->s_rnr_retry--;
dev->n_rnr_naks++;
sqp->s_rnr_timeout =
- ib_ipath_rnr_table[sqp->s_min_rnr_timer];
+ ib_ipath_rnr_table[sqp->r_min_rnr_timer];
ipath_insert_rnr_queue(sqp);
goto done;
}
- spin_unlock_irqrestore(&qp->r_rq.lock, flags);
break;
case IB_WR_RDMA_WRITE_WITH_IMM:
- wc->wc_flags = IB_WC_WITH_IMM;
- wc->imm_data = wqe->wr.imm_data;
- spin_lock_irqsave(&qp->r_rq.lock, flags);
+ wc.wc_flags = IB_WC_WITH_IMM;
+ wc.imm_data = wqe->wr.imm_data;
if (!ipath_get_rwqe(qp, 1))
goto rnr_nak;
- spin_unlock_irqrestore(&qp->r_rq.lock, flags);
/* FALLTHROUGH */
case IB_WR_RDMA_WRITE:
if (wqe->length == 0)
wqe->wr.wr.rdma.rkey,
IB_ACCESS_REMOTE_WRITE))) {
acc_err:
- wc->status = IB_WC_REM_ACCESS_ERR;
+ wc.status = IB_WC_REM_ACCESS_ERR;
err:
- wc->wr_id = wqe->wr.wr_id;
- wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
- wc->vendor_err = 0;
- wc->byte_len = 0;
- wc->qp_num = sqp->ibqp.qp_num;
- &nb