llseek: automatically add .llseek fop
[linux-2.6.git] / drivers / infiniband / hw / ipath / ipath_file_ops.c
index cd2d5cd..6078992 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
 #include <linux/cdev.h>
 #include <linux/swap.h>
 #include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/smp_lock.h>
 #include <asm/pgtable.h>
 
 #include "ipath_kernel.h"
-#include "ips_common.h"
-#include "ipath_layer.h"
+#include "ipath_common.h"
+#include "ipath_user_sdma.h"
 
 static int ipath_open(struct inode *, struct file *);
 static int ipath_close(struct inode *, struct file *);
 static ssize_t ipath_write(struct file *, const char __user *, size_t,
                           loff_t *);
+static ssize_t ipath_writev(struct kiocb *, const struct iovec *,
+                           unsigned long , loff_t);
 static unsigned int ipath_poll(struct file *, struct poll_table_struct *);
 static int ipath_mmap(struct file *, struct vm_area_struct *);
 
-static struct file_operations ipath_file_ops = {
+static const struct file_operations ipath_file_ops = {
        .owner = THIS_MODULE,
        .write = ipath_write,
+       .aio_write = ipath_writev,
        .open = ipath_open,
        .release = ipath_close,
        .poll = ipath_poll,
-       .mmap = ipath_mmap
+       .mmap = ipath_mmap,
+       .llseek = noop_llseek,
 };
 
-static int ipath_get_base_info(struct ipath_portdata *pd,
+/*
+ * Convert kernel virtual addresses to physical addresses so they don't
+ * potentially conflict with the chip addresses used as mmap offsets.
+ * It doesn't really matter what mmap offset we use as long as we can
+ * interpret it correctly.
+ */
+static u64 cvt_kvaddr(void *p)
+{
+       struct page *page;
+       u64 paddr = 0;
+
+       page = vmalloc_to_page(p);
+       if (page)
+               paddr = page_to_pfn(page) << PAGE_SHIFT;
+
+       return paddr;
+}
+
+static int ipath_get_base_info(struct file *fp,
                               void __user *ubase, size_t ubase_size)
 {
+       struct ipath_portdata *pd = port_fp(fp);
        int ret = 0;
        struct ipath_base_info *kinfo = NULL;
        struct ipath_devdata *dd = pd->port_dd;
+       unsigned subport_cnt;
+       int shared, master;
+       size_t sz;
+
+       subport_cnt = pd->port_subport_cnt;
+       if (!subport_cnt) {
+               shared = 0;
+               master = 0;
+               subport_cnt = 1;
+       } else {
+               shared = 1;
+               master = !subport_fp(fp);
+       }
 
-       if (ubase_size < sizeof(*kinfo)) {
+       sz = sizeof(*kinfo);
+       /* If port sharing is not requested, allow the old size structure */
+       if (!shared)
+               sz -= 7 * sizeof(u64);
+       if (ubase_size < sz) {
                ipath_cdbg(PROC,
-                          "Base size %lu, need %lu (version mismatch?)\n",
-                          (unsigned long) ubase_size,
-                          (unsigned long) sizeof(*kinfo));
+                          "Base size %zu, need %zu (version mismatch?)\n",
+                          ubase_size, sz);
                ret = -EINVAL;
                goto bail;
        }
@@ -96,7 +140,9 @@ static int ipath_get_base_info(struct ipath_portdata *pd,
        kinfo->spi_rcv_egrperchunk = pd->port_rcvegrbufs_perchunk;
        kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen /
                pd->port_rcvegrbuf_chunks;
-       kinfo->spi_tidcnt = dd->ipath_rcvtidcnt;
+       kinfo->spi_tidcnt = dd->ipath_rcvtidcnt / subport_cnt;
+       if (master)
+               kinfo->spi_tidcnt += dd->ipath_rcvtidcnt % subport_cnt;
        /*
         * for this use, may be ipath_cfgports summed over all chips that
         * are are configured and present
@@ -119,31 +165,94 @@ static int ipath_get_base_info(struct ipath_portdata *pd,
         * page_address() macro worked, but in 2.6.11, even that returns the
         * full 64 bit address (upper bits all 1's).  So far, using the
         * physical addresses (or chip offsets, for chip mapping) works, but
-        * no doubt some future kernel release will chang that, and we'll be
-        * on to yet another method of dealing with this
+        * no doubt some future kernel release will change that, and we'll be
+        * on to yet another method of dealing with this.
         */
        kinfo->spi_rcvhdr_base = (u64) pd->port_rcvhdrq_phys;
+       kinfo->spi_rcvhdr_tailaddr = (u64) pd->port_rcvhdrqtailaddr_phys;
        kinfo->spi_rcv_egrbufs = (u64) pd->port_rcvegr_phys;
        kinfo->spi_pioavailaddr = (u64) dd->ipath_pioavailregs_phys;
        kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
                (void *) dd->ipath_statusp -
                (void *) dd->ipath_pioavailregs_dma;
-       kinfo->spi_piobufbase = (u64) pd->port_piobufs;
-       kinfo->__spi_uregbase =
-               dd->ipath_uregbase + dd->ipath_palign * pd->port_port;
+       if (!shared) {
+               kinfo->spi_piocnt = pd->port_piocnt;
+               kinfo->spi_piobufbase = (u64) pd->port_piobufs;
+               kinfo->__spi_uregbase = (u64) dd->ipath_uregbase +
+                       dd->ipath_ureg_align * pd->port_port;
+       } else if (master) {
+               kinfo->spi_piocnt = (pd->port_piocnt / subport_cnt) +
+                                   (pd->port_piocnt % subport_cnt);
+               /* Master's PIO buffers are after all the slave's */
+               kinfo->spi_piobufbase = (u64) pd->port_piobufs +
+                       dd->ipath_palign *
+                       (pd->port_piocnt - kinfo->spi_piocnt);
+       } else {
+               unsigned slave = subport_fp(fp) - 1;
+
+               kinfo->spi_piocnt = pd->port_piocnt / subport_cnt;
+               kinfo->spi_piobufbase = (u64) pd->port_piobufs +
+                       dd->ipath_palign * kinfo->spi_piocnt * slave;
+       }
+
+       if (shared) {
+               kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase +
+                       dd->ipath_ureg_align * pd->port_port;
+               kinfo->spi_port_rcvegrbuf = kinfo->spi_rcv_egrbufs;
+               kinfo->spi_port_rcvhdr_base = kinfo->spi_rcvhdr_base;
+               kinfo->spi_port_rcvhdr_tailaddr = kinfo->spi_rcvhdr_tailaddr;
+
+               kinfo->__spi_uregbase = cvt_kvaddr(pd->subport_uregbase +
+                       PAGE_SIZE * subport_fp(fp));
+
+               kinfo->spi_rcvhdr_base = cvt_kvaddr(pd->subport_rcvhdr_base +
+                       pd->port_rcvhdrq_size * subport_fp(fp));
+               kinfo->spi_rcvhdr_tailaddr = 0;
+               kinfo->spi_rcv_egrbufs = cvt_kvaddr(pd->subport_rcvegrbuf +
+                       pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size *
+                       subport_fp(fp));
+
+               kinfo->spi_subport_uregbase =
+                       cvt_kvaddr(pd->subport_uregbase);
+               kinfo->spi_subport_rcvegrbuf =
+                       cvt_kvaddr(pd->subport_rcvegrbuf);
+               kinfo->spi_subport_rcvhdr_base =
+                       cvt_kvaddr(pd->subport_rcvhdr_base);
+               ipath_cdbg(PROC, "port %u flags %x %llx %llx %llx\n",
+                       kinfo->spi_port, kinfo->spi_runtime_flags,
+                       (unsigned long long) kinfo->spi_subport_uregbase,
+                       (unsigned long long) kinfo->spi_subport_rcvegrbuf,
+                       (unsigned long long) kinfo->spi_subport_rcvhdr_base);
+       }
 
-       kinfo->spi_pioindex = dd->ipath_pbufsport * (pd->port_port - 1);
-       kinfo->spi_piocnt = dd->ipath_pbufsport;
+       /*
+        * All user buffers are 2KB buffers.  If we ever support
+        * giving 4KB buffers to user processes, this will need some
+        * work.
+        */
+       kinfo->spi_pioindex = (kinfo->spi_piobufbase -
+               (dd->ipath_piobufbase & 0xffffffff)) / dd->ipath_palign;
        kinfo->spi_pioalign = dd->ipath_palign;
 
        kinfo->spi_qpair = IPATH_KD_QP;
-       kinfo->spi_piosize = dd->ipath_ibmaxlen;
+       /*
+        * user mode PIO buffers are always 2KB, even when 4KB can
+        * be received, and sent via the kernel; this is ibmaxlen
+        * for 2K MTU.
+        */
+       kinfo->spi_piosize = dd->ipath_piosize2k - 2 * sizeof(u32);
        kinfo->spi_mtu = dd->ipath_ibmaxlen;    /* maxlen, not ibmtu */
        kinfo->spi_port = pd->port_port;
+       kinfo->spi_subport = subport_fp(fp);
        kinfo->spi_sw_version = IPATH_KERN_SWVERSION;
        kinfo->spi_hw_version = dd->ipath_revision;
 
-       if (copy_to_user(ubase, kinfo, sizeof(*kinfo)))
+       if (master) {
+               kinfo->spi_runtime_flags |= IPATH_RUNTIME_MASTER;
+       }
+
+       sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo);
+       if (copy_to_user(ubase, kinfo, sz))
                ret = -EFAULT;
 
 bail:
@@ -154,6 +263,7 @@ bail:
 /**
  * ipath_tid_update - update a port TID
  * @pd: the port
+ * @fp: the ipath device file
  * @ti: the TID information
  *
  * The new implementation as of Oct 2004 is that the driver assigns
@@ -176,11 +286,11 @@ bail:
  * virtually contiguous pages, that should change to improve
  * performance.
  */
-static int ipath_tid_update(struct ipath_portdata *pd,
+static int ipath_tid_update(struct ipath_portdata *pd, struct file *fp,
                            const struct ipath_tid_info *ti)
 {
        int ret = 0, ntids;
-       u32 tid, porttid, cnt, i, tidcnt;
+       u32 tid, porttid, cnt, i, tidcnt, tidoff;
        u16 *tidlist;
        struct ipath_devdata *dd = pd->port_dd;
        u64 physaddr;
@@ -188,6 +298,7 @@ static int ipath_tid_update(struct ipath_portdata *pd,
        u64 __iomem *tidbase;
        unsigned long tidmap[8];
        struct page **pagep = NULL;
+       unsigned subport = subport_fp(fp);
 
        if (!dd->ipath_pageshadow) {
                ret = -ENOMEM;
@@ -204,20 +315,34 @@ static int ipath_tid_update(struct ipath_portdata *pd,
                ret = -EFAULT;
                goto done;
        }
-       tidcnt = dd->ipath_rcvtidcnt;
-       if (cnt >= tidcnt) {
+       porttid = pd->port_port * dd->ipath_rcvtidcnt;
+       if (!pd->port_subport_cnt) {
+               tidcnt = dd->ipath_rcvtidcnt;
+               tid = pd->port_tidcursor;
+               tidoff = 0;
+       } else if (!subport) {
+               tidcnt = (dd->ipath_rcvtidcnt / pd->port_subport_cnt) +
+                        (dd->ipath_rcvtidcnt % pd->port_subport_cnt);
+               tidoff = dd->ipath_rcvtidcnt - tidcnt;
+               porttid += tidoff;
+               tid = tidcursor_fp(fp);
+       } else {
+               tidcnt = dd->ipath_rcvtidcnt / pd->port_subport_cnt;
+               tidoff = tidcnt * (subport - 1);
+               porttid += tidoff;
+               tid = tidcursor_fp(fp);
+       }
+       if (cnt > tidcnt) {
                /* make sure it all fits in port_tid_pg_list */
                dev_info(&dd->pcidev->dev, "Process tried to allocate %u "
                         "TIDs, only trying max (%u)\n", cnt, tidcnt);
                cnt = tidcnt;
        }
-       pagep = (struct page **)pd->port_tid_pg_list;
-       tidlist = (u16 *) (&pagep[cnt]);
+       pagep = &((struct page **) pd->port_tid_pg_list)[tidoff];
+       tidlist = &((u16 *) &pagep[dd->ipath_rcvtidcnt])[tidoff];
 
        memset(tidmap, 0, sizeof(tidmap));
-       tid = pd->port_tidcursor;
        /* before decrement; chip actual # */
-       porttid = pd->port_port * tidcnt;
        ntids = tidcnt;
        tidbase = (u64 __iomem *) (((char __iomem *) dd->ipath_kregbase) +
                                   dd->ipath_rcvtidbase +
@@ -274,22 +399,26 @@ static int ipath_tid_update(struct ipath_portdata *pd,
                        ret = -ENOMEM;
                        break;
                }
-               tidlist[i] = tid;
+               tidlist[i] = tid + tidoff;
                ipath_cdbg(VERBOSE, "Updating idx %u to TID %u, "
-                          "vaddr %lx\n", i, tid, vaddr);
+                          "vaddr %lx\n", i, tid + tidoff, vaddr);
                /* we "know" system pages and TID pages are same size */
                dd->ipath_pageshadow[porttid + tid] = pagep[i];
+               dd->ipath_physshadow[porttid + tid] = ipath_map_page(
+                       dd->pcidev, pagep[i], 0, PAGE_SIZE,
+                       PCI_DMA_FROMDEVICE);
                /*
                 * don't need atomic or it's overhead
                 */
                __set_bit(tid, tidmap);
-               physaddr = page_to_phys(pagep[i]);
+               physaddr = dd->ipath_physshadow[porttid + tid];
                ipath_stats.sps_pagelocks++;
                ipath_cdbg(VERBOSE,
                           "TID %u, vaddr %lx, physaddr %llx pgp %p\n",
                           tid, vaddr, (unsigned long long) physaddr,
                           pagep[i]);
-               dd->ipath_f_put_tid(dd, &tidbase[tid], 1, physaddr);
+               dd->ipath_f_put_tid(dd, &tidbase[tid], RCVHQ_RCV_TYPE_EXPECTED,
+                                   physaddr);
                /*
                 * don't check this tid in ipath_portshadow, since we
                 * just filled it in; start with the next one.
@@ -315,8 +444,12 @@ static int ipath_tid_update(struct ipath_portdata *pd,
                        if (dd->ipath_pageshadow[porttid + tid]) {
                                ipath_cdbg(VERBOSE, "Freeing TID %u\n",
                                           tid);
-                               dd->ipath_f_put_tid(dd, &tidbase[tid], 1,
+                               dd->ipath_f_put_tid(dd, &tidbase[tid],
+                                                   RCVHQ_RCV_TYPE_EXPECTED,
                                                    dd->ipath_tidinvalid);
+                               pci_unmap_page(dd->pcidev,
+                                       dd->ipath_physshadow[porttid + tid],
+                                       PAGE_SIZE, PCI_DMA_FROMDEVICE);
                                dd->ipath_pageshadow[porttid + tid] = NULL;
                                ipath_stats.sps_pageunlocks++;
                        }
@@ -341,7 +474,10 @@ static int ipath_tid_update(struct ipath_portdata *pd,
                }
                if (tid == tidcnt)
                        tid = 0;
-               pd->port_tidcursor = tid;
+               if (!pd->port_subport_cnt)
+                       pd->port_tidcursor = tid;
+               else
+                       tidcursor_fp(fp) = tid;
        }
 
 done:
@@ -354,6 +490,7 @@ done:
 /**
  * ipath_tid_free - free a port TID
  * @pd: the port
+ * @subport: the subport
  * @ti: the TID info
  *
  * right now we are unlocking one page at a time, but since
@@ -367,7 +504,7 @@ done:
  * they pass in to us.
  */
 
-static int ipath_tid_free(struct ipath_portdata *pd,
+static int ipath_tid_free(struct ipath_portdata *pd, unsigned subport,
                          const struct ipath_tid_info *ti)
 {
        int ret = 0;
@@ -388,11 +525,20 @@ static int ipath_tid_free(struct ipath_portdata *pd,
        }
 
        porttid = pd->port_port * dd->ipath_rcvtidcnt;
+       if (!pd->port_subport_cnt)
+               tidcnt = dd->ipath_rcvtidcnt;
+       else if (!subport) {
+               tidcnt = (dd->ipath_rcvtidcnt / pd->port_subport_cnt) +
+                        (dd->ipath_rcvtidcnt % pd->port_subport_cnt);
+               porttid += dd->ipath_rcvtidcnt - tidcnt;
+       } else {
+               tidcnt = dd->ipath_rcvtidcnt / pd->port_subport_cnt;
+               porttid += tidcnt * (subport - 1);
+       }
        tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) +
                                   dd->ipath_rcvtidbase +
                                   porttid * sizeof(*tidbase));
 
-       tidcnt = dd->ipath_rcvtidcnt;
        limit = sizeof(tidmap) * BITS_PER_BYTE;
        if (limit > tidcnt)
                /* just in case size changes in future */
@@ -413,13 +559,18 @@ static int ipath_tid_free(struct ipath_portdata *pd,
                        continue;
                cnt++;
                if (dd->ipath_pageshadow[porttid + tid]) {
+                       struct page *p;
+                       p = dd->ipath_pageshadow[porttid + tid];
+                       dd->ipath_pageshadow[porttid + tid] = NULL;
                        ipath_cdbg(VERBOSE, "PID %u freeing TID %u\n",
-                                  pd->port_pid, tid);
-                       dd->ipath_f_put_tid(dd, &tidbase[tid], 1,
+                                  pid_nr(pd->port_pid), tid);
+                       dd->ipath_f_put_tid(dd, &tidbase[tid],
+                                           RCVHQ_RCV_TYPE_EXPECTED,
                                            dd->ipath_tidinvalid);
-                       ipath_release_user_pages(
-                               &dd->ipath_pageshadow[porttid + tid], 1);
-                       dd->ipath_pageshadow[porttid + tid] = NULL;
+                       pci_unmap_page(dd->pcidev,
+                               dd->ipath_physshadow[porttid + tid],
+                               PAGE_SIZE, PCI_DMA_FROMDEVICE);
+                       ipath_release_user_pages(&p, 1);
                        ipath_stats.sps_pageunlocks++;
                } else
                        ipath_dbg("Unused tid %u, ignoring\n", tid);
@@ -457,7 +608,7 @@ static int ipath_set_part_key(struct ipath_portdata *pd, u16 key)
        u16 lkey = key & 0x7FFF;
        int ret;
 
-       if (lkey == (IPS_DEFAULT_P_KEY & 0x7FFF)) {
+       if (lkey == (IPATH_DEFAULT_P_KEY & 0x7FFF)) {
                /* nothing to do; this key always valid */
                ret = 0;
                goto bail;
@@ -581,20 +732,23 @@ bail:
 /**
  * ipath_manage_rcvq - manage a port's receive queue
  * @pd: the port
+ * @subport: the subport
  * @start_stop: action to carry out
  *
  * start_stop == 0 disables receive on the port, for use in queue
  * overflow conditions.  start_stop==1 re-enables, to be used to
  * re-init the software copy of the head register
  */
-static int ipath_manage_rcvq(struct ipath_portdata *pd, int start_stop)
+static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport,
+                            int start_stop)
 {
        struct ipath_devdata *dd = pd->port_dd;
-       u64 tval;
 
-       ipath_cdbg(PROC, "%sabling rcv for unit %u port %u\n",
+       ipath_cdbg(PROC, "%sabling rcv for unit %u port %u:%u\n",
                   start_stop ? "en" : "dis", dd->ipath_unit,
-                  pd->port_port);
+                  pd->port_port, subport);
+       if (subport)
+               goto bail;
        /* atomically clear receive enable port. */
        if (start_stop) {
                /*
@@ -609,16 +763,17 @@ static int ipath_manage_rcvq(struct ipath_portdata *pd, int start_stop)
                 * updated and correct itself, even in the face of software
                 * bugs.
                 */
-               *pd->port_rcvhdrtail_kvaddr = 0;
-               set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port,
+               if (pd->port_rcvhdrtail_kvaddr)
+                       ipath_clear_rcvhdrtail(pd);
+               set_bit(dd->ipath_r_portenable_shift + pd->port_port,
                        &dd->ipath_rcvctrl);
        } else
-               clear_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port,
+               clear_bit(dd->ipath_r_portenable_shift + pd->port_port,
                          &dd->ipath_rcvctrl);
        ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
                         dd->ipath_rcvctrl);
        /* now be sure chip saw it before we return */
-       tval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
        if (start_stop) {
                /*
                 * And try to be sure that tail reg update has happened too.
@@ -627,9 +782,10 @@ static int ipath_manage_rcvq(struct ipath_portdata *pd, int start_stop)
                 * in memory copy, since we could overwrite an update by the
                 * chip if we did.
                 */
-               tval = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
+               ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
        }
        /* always; new head should be equal to new tail; see above */
+bail:
        return 0;
 }
 
@@ -687,6 +843,36 @@ static void ipath_clean_part_key(struct ipath_portdata *pd,
        }
 }
 
+/*
+ * Initialize the port data with the receive buffer sizes
+ * so this can be done while the master port is locked.
+ * Otherwise, there is a race with a slave opening the port
+ * and seeing these fields uninitialized.
+ */
+static void init_user_egr_sizes(struct ipath_portdata *pd)
+{
+       struct ipath_devdata *dd = pd->port_dd;
+       unsigned egrperchunk, egrcnt, size;
+
+       /*
+        * to avoid wasting a lot of memory, we allocate 32KB chunks of
+        * physically contiguous memory, advance through it until used up
+        * and then allocate more.  Of course, we need memory to store those
+        * extra pointers, now.  Started out with 256KB, but under heavy
+        * memory pressure (creating large files and then copying them over
+        * NFS while doing lots of MPI jobs), we hit some allocation
+        * failures, even though we can sleep...  (2.6.10) Still get
+        * failures at 64K.  32K is the lowest we can go without wasting
+        * additional memory.
+        */
+       size = 0x8000;
+       egrperchunk = size / dd->ipath_rcvegrbufsize;
+       egrcnt = dd->ipath_rcvegrcnt;
+       pd->port_rcvegrbuf_chunks = (egrcnt + egrperchunk - 1) / egrperchunk;
+       pd->port_rcvegrbufs_perchunk = egrperchunk;
+       pd->port_rcvegrbuf_size = size;
+}
+
 /**
  * ipath_create_user_egr - allocate eager TID buffers
  * @pd: the port to allocate TID buffers for
@@ -702,55 +888,43 @@ static void ipath_clean_part_key(struct ipath_portdata *pd,
 static int ipath_create_user_egr(struct ipath_portdata *pd)
 {
        struct ipath_devdata *dd = pd->port_dd;
-       unsigned e, egrcnt, alloced, egrperchunk, chunk, egrsize, egroff;
+       unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff;
        size_t size;
        int ret;
+       gfp_t gfp_flags;
+
+       /*
+        * GFP_USER, but without GFP_FS, so buffer cache can be
+        * coalesced (we hope); otherwise, even at order 4,
+        * heavy filesystem activity makes these fail, and we can
+        * use compound pages.
+        */
+       gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP;
 
        egrcnt = dd->ipath_rcvegrcnt;
        /* TID number offset for this port */
-       egroff = pd->port_port * egrcnt;
+       egroff = (pd->port_port - 1) * egrcnt + dd->ipath_p0_rcvegrcnt;
        egrsize = dd->ipath_rcvegrbufsize;
        ipath_cdbg(VERBOSE, "Allocating %d egr buffers, at egrtid "
                   "offset %x, egrsize %u\n", egrcnt, egroff, egrsize);
 
-       /*
-        * to avoid wasting a lot of memory, we allocate 32KB chunks of
-        * physically contiguous memory, advance through it until used up
-        * and then allocate more.  Of course, we need memory to store those
-        * extra pointers, now.  Started out with 256KB, but under heavy
-        * memory pressure (creating large files and then copying them over
-        * NFS while doing lots of MPI jobs), we hit some allocation
-        * failures, even though we can sleep...  (2.6.10) Still get
-        * failures at 64K.  32K is the lowest we can go without waiting
-        * more memory again.  It seems likely that the coalescing in
-        * free_pages, etc. still has issues (as it has had previously
-        * during 2.6.x development).
-        */
-       size = 0x8000;
-       alloced = ALIGN(egrsize * egrcnt, size);
-       egrperchunk = size / egrsize;
-       chunk = (egrcnt + egrperchunk - 1) / egrperchunk;
-       pd->port_rcvegrbuf_chunks = chunk;
-       pd->port_rcvegrbufs_perchunk = egrperchunk;
-       pd->port_rcvegrbuf_size = size;
-       pd->port_rcvegrbuf = vmalloc(chunk * sizeof(pd->port_rcvegrbuf[0]));
+       chunk = pd->port_rcvegrbuf_chunks;
+       egrperchunk = pd->port_rcvegrbufs_perchunk;
+       size = pd->port_rcvegrbuf_size;
+       pd->port_rcvegrbuf = kmalloc(chunk * sizeof(pd->port_rcvegrbuf[0]),
+                                    GFP_KERNEL);
        if (!pd->port_rcvegrbuf) {
                ret = -ENOMEM;
                goto bail;
        }
        pd->port_rcvegrbuf_phys =
-               vmalloc(chunk * sizeof(pd->port_rcvegrbuf_phys[0]));
+               kmalloc(chunk * sizeof(pd->port_rcvegrbuf_phys[0]),
+                       GFP_KERNEL);
        if (!pd->port_rcvegrbuf_phys) {
                ret = -ENOMEM;
                goto bail_rcvegrbuf;
        }
        for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
-               /*
-                * GFP_USER, but without GFP_FS, so buffer cache can be
-                * coalesced (we hope); otherwise, even at order 4,
-                * heavy filesystem activity makes these fail
-                */
-               gfp_t gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP;
 
                pd->port_rcvegrbuf[e] = dma_alloc_coherent(
                        &dd->pcidev->dev, size, &pd->port_rcvegrbuf_phys[e],
@@ -773,7 +947,8 @@ static int ipath_create_user_egr(struct ipath_portdata *pd)
                                            (u64 __iomem *)
                                            ((char __iomem *)
                                             dd->ipath_kregbase +
-                                            dd->ipath_rcvegrbase), 0, pa);
+                                            dd->ipath_rcvegrbase),
+                                           RCVHQ_RCV_TYPE_EAGER, pa);
                        pa += egrsize;
                }
                cond_resched(); /* don't hog the cpu */
@@ -784,154 +959,63 @@ static int ipath_create_user_egr(struct ipath_portdata *pd)
 
 bail_rcvegrbuf_phys:
        for (e = 0; e < pd->port_rcvegrbuf_chunks &&
-                    pd->port_rcvegrbuf[e]; e++)
+               pd->port_rcvegrbuf[e]; e++) {
                dma_free_coherent(&dd->pcidev->dev, size,
                                  pd->port_rcvegrbuf[e],
                                  pd->port_rcvegrbuf_phys[e]);
 
-       vfree(pd->port_rcvegrbuf_phys);
+       }
+       kfree(pd->port_rcvegrbuf_phys);
        pd->port_rcvegrbuf_phys = NULL;
 bail_rcvegrbuf:
-       vfree(pd->port_rcvegrbuf);
+       kfree(pd->port_rcvegrbuf);
        pd->port_rcvegrbuf = NULL;
 bail:
        return ret;
 }
 
-static int ipath_do_user_init(struct ipath_portdata *pd,
-                             const struct ipath_user_info *uinfo)
+
+/* common code for the mappings on dma_alloc_coherent mem */
+static int ipath_mmap_mem(struct vm_area_struct *vma,
+       struct ipath_portdata *pd, unsigned len, int write_ok,
+       void *kvaddr, char *what)
 {
-       int ret = 0;
        struct ipath_devdata *dd = pd->port_dd;
-       u64 physaddr, uaddr, off, atmp;
-       struct page *pagep;
-       u32 head32;
-       u64 head;
+       unsigned long pfn;
+       int ret;
 
-       /* for now, if major version is different, bail */
-       if ((uinfo->spu_userversion >> 16) != IPATH_USER_SWMAJOR) {
+       if ((vma->vm_end - vma->vm_start) > len) {
                dev_info(&dd->pcidev->dev,
-                        "User major version %d not same as driver "
-                        "major %d\n", uinfo->spu_userversion >> 16,
-                        IPATH_USER_SWMAJOR);
-               ret = -ENODEV;
-               goto done;
-       }
-
-       if ((uinfo->spu_userversion & 0xffff) != IPATH_USER_SWMINOR)
-               ipath_dbg("User minor version %d not same as driver "
-                         "minor %d\n", uinfo->spu_userversion & 0xffff,
-                         IPATH_USER_SWMINOR);
-
-       if (uinfo->spu_rcvhdrsize) {
-               ret = ipath_setrcvhdrsize(dd, uinfo->spu_rcvhdrsize);
-               if (ret)
-                       goto done;
+                        "FAIL on %s: len %lx > %x\n", what,
+                        vma->vm_end - vma->vm_start, len);
+               ret = -EFAULT;
+               goto bail;
        }
 
-       /* for now we do nothing with rcvhdrcnt: uinfo->spu_rcvhdrcnt */
-
-       /* set up for the rcvhdr Q tail register writeback to user memory */
-       if (!uinfo->spu_rcvhdraddr ||
-           !access_ok(VERIFY_WRITE, (u64 __user *) (unsigned long)
-                      uinfo->spu_rcvhdraddr, sizeof(u64))) {
-               ipath_dbg("Port %d rcvhdrtail addr %llx not valid\n",
-                         pd->port_port,
-                         (unsigned long long) uinfo->spu_rcvhdraddr);
-               ret = -EINVAL;
-               goto done;
-       }
+       if (!write_ok) {
+               if (vma->vm_flags & VM_WRITE) {
+                       dev_info(&dd->pcidev->dev,
+                                "%s must be mapped readonly\n", what);
+                       ret = -EPERM;
+                       goto bail;
+               }
 
-       off = offset_in_page(uinfo->spu_rcvhdraddr);
-       uaddr = PAGE_MASK & (unsigned long) uinfo->spu_rcvhdraddr;
-       ret = ipath_get_user_pages_nocopy(uaddr, &pagep);
-       if (ret) {
-               dev_info(&dd->pcidev->dev, "Failed to lookup and lock "
-                        "address %llx for rcvhdrtail: errno %d\n",
-                        (unsigned long long) uinfo->spu_rcvhdraddr, -ret);
-               goto done;
+               /* don't allow them to later change with mprotect */
+               vma->vm_flags &= ~VM_MAYWRITE;
        }
-       ipath_stats.sps_pagelocks++;
-       pd->port_rcvhdrtail_uaddr = uaddr;
-       pd->port_rcvhdrtail_pagep = pagep;
-       pd->port_rcvhdrtail_kvaddr =
-               page_address(pagep);
-       pd->port_rcvhdrtail_kvaddr += off;
-       physaddr = page_to_phys(pagep) + off;
-       ipath_cdbg(VERBOSE, "port %d user addr %llx hdrtailaddr, %llx "
-                  "physical (off=%llx)\n",
-                  pd->port_port,
-                  (unsigned long long) uinfo->spu_rcvhdraddr,
-                  (unsigned long long) physaddr, (unsigned long long) off);
-       ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
-                             pd->port_port, physaddr);
-       atmp = ipath_read_kreg64_port(dd,
-                                     dd->ipath_kregs->kr_rcvhdrtailaddr,
-                                     pd->port_port);
-       if (physaddr != atmp) {
-               ipath_dev_err(dd,
-                             "Catastrophic software error, "
-                             "RcvHdrTailAddr%u written as %llx, "
-                             "read back as %llx\n", pd->port_port,
-                             (unsigned long long) physaddr,
-                             (unsigned long long) atmp);
-               ret = -EINVAL;
-               goto done;
-       }
-
-       /* for right now, kernel piobufs are at end, so port 1 is at 0 */
-       pd->port_piobufs = dd->ipath_piobufbase +
-               dd->ipath_pbufsport * (pd->port_port -
-                                      1) * dd->ipath_palign;
-       ipath_cdbg(VERBOSE, "Set base of piobufs for port %u to 0x%x\n",
-                  pd->port_port, pd->port_piobufs);
 
-       /*
-        * Now allocate the rcvhdr Q and eager TIDs; skip the TID
-        * array for time being.  If pd->port_port > chip-supported,
-        * we need to do extra stuff here to handle by handling overflow
-        * through port 0, someday
-        */
-       ret = ipath_create_rcvhdrq(dd, pd);
-       if (!ret)
-               ret = ipath_create_user_egr(pd);
+       pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
+       ret = remap_pfn_range(vma, vma->vm_start, pfn,
+                             len, vma->vm_page_prot);
        if (ret)
-               goto done;
-       /* enable receives now */
-       /* atomically set enable bit for this port */
-       set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port,
-               &dd->ipath_rcvctrl);
-
-       /*
-        * set the head registers for this port to the current values
-        * of the tail pointers, since we don't know if they were
-        * updated on last use of the port.
-        */
-       head32 = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
-       head = (u64) head32;
-       ipath_write_ureg(dd, ur_rcvhdrhead, head, pd->port_port);
-       head32 = ipath_read_ureg32(dd, ur_rcvegrindextail, pd->port_port);
-       ipath_write_ureg(dd, ur_rcvegrindexhead, head32, pd->port_port);
-       dd->ipath_lastegrheads[pd->port_port] = -1;
-       dd->ipath_lastrcvhdrqtails[pd->port_port] = -1;
-       ipath_cdbg(VERBOSE, "Wrote port%d head %llx, egrhead %x from "
-                  "tail regs\n", pd->port_port,
-                  (unsigned long long) head, head32);
-       pd->port_tidcursor = 0; /* start at beginning after open */
-       /*
-        * now enable the port; the tail registers will be written to memory
-        * by the chip as soon as it sees the write to
-        * dd->ipath_kregs->kr_rcvctrl.  The update only happens on
-        * transition from 0 to 1, so clear it first, then set it as part of
-        * enabling the port.  This will (very briefly) affect any other
-        * open ports, but it shouldn't be long enough to be an issue.
-        */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
-                        dd->ipath_rcvctrl & ~INFINIPATH_R_TAILUPD);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
-                        dd->ipath_rcvctrl);
-
-done:
+               dev_info(&dd->pcidev->dev, "%s port%u mmap of %lx, %x "
+                        "bytes r%c failed: %d\n", what, pd->port_port,
+                        pfn, len, write_ok?'w':'o', ret);
+       else
+               ipath_cdbg(VERBOSE, "%s port%u mmaped %lx, %x bytes "
+                          "r%c\n", what, pd->port_port, pfn, len,
+                          write_ok?'w':'o');
+bail:
        return ret;
 }
 
@@ -941,8 +1025,11 @@ static int mmap_ureg(struct vm_area_struct *vma, struct ipath_devdata *dd,
        unsigned long phys;
        int ret;
 
-       /* it's the real hardware, so io_remap works */
-
+       /*
+        * This is real hardware, so use io_remap.  This is the mechanism
+        * for the user process to update the head registers for their port
+        * in the chip.
+        */
        if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
                dev_info(&dd->pcidev->dev, "FAIL mmap userreg: reqlen "
                         "%lx > PAGE\n", vma->vm_end - vma->vm_start);
@@ -962,44 +1049,40 @@ static int mmap_ureg(struct vm_area_struct *vma, struct ipath_devdata *dd,
 
 static int mmap_piobufs(struct vm_area_struct *vma,
                        struct ipath_devdata *dd,
-                       struct ipath_portdata *pd)
+                       struct ipath_portdata *pd,
+                       unsigned piobufs, unsigned piocnt)
 {
        unsigned long phys;
        int ret;
 
        /*
-        * When we map the PIO buffers, we want to map them as writeonly, no
-        * read possible.
+        * When we map the PIO buffers in the chip, we want to map them as
+        * writeonly, no read possible.   This prevents access to previous
+        * process data, and catches users who might try to read the i/o
+        * space due to a bug.
         */
-
-       if ((vma->vm_end - vma->vm_start) >
-           (dd->ipath_pbufsport * dd->ipath_palign)) {
+       if ((vma->vm_end - vma->vm_start) > (piocnt * dd->ipath_palign)) {
                dev_info(&dd->pcidev->dev, "FAIL mmap piobufs: "
                         "reqlen %lx > PAGE\n",
                         vma->vm_end - vma->vm_start);
-               ret = -EFAULT;
+               ret = -EINVAL;
                goto bail;
        }
 
-       phys = dd->ipath_physaddr + pd->port_piobufs;
-       /*
-        * Do *NOT* mark this as non-cached (PWT bit), or we don't get the
-        * write combining behavior we want on the PIO buffers!
-        * vma->vm_page_prot =
-        *        pgprot_noncached(vma->vm_page_prot);
-        */
-
-       if (vma->vm_flags & VM_READ) {
-               dev_info(&dd->pcidev->dev,
-                        "Can't map piobufs as readable (flags=%lx)\n",
-                        vma->vm_flags);
-               ret = -EPERM;
-               goto bail;
-       }
+       phys = dd->ipath_physaddr + piobufs;
 
-       /* don't allow them to later change to readable with mprotect */
+#if defined(__powerpc__)
+       /* There isn't a generic way to specify writethrough mappings */
+       pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
+       pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU;
+       pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED;
+#endif
 
-       vma->vm_flags &= ~VM_MAYWRITE;
+       /*
+        * don't allow them to later change to readable with mprotect (for when
+        * not initially mapped readable, as is normally the case)
+        */
+       vma->vm_flags &= ~VM_MAYREAD;
        vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
 
        ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
@@ -1015,14 +1098,9 @@ static int mmap_rcvegrbufs(struct vm_area_struct *vma,
        struct ipath_devdata *dd = pd->port_dd;
        unsigned long start, size;
        size_t total_size, i;
-       dma_addr_t *phys;
+       unsigned long pfn;
        int ret;
 
-       if (!pd->port_rcvegrbuf) {
-               ret = -EFAULT;
-               goto bail;
-       }
-
        size = pd->port_rcvegrbuf_size;
        total_size = pd->port_rcvegrbuf_chunks * size;
        if ((vma->vm_end - vma->vm_start) > total_size) {
@@ -1030,7 +1108,7 @@ static int mmap_rcvegrbufs(struct vm_area_struct *vma,
                         "reqlen %lx > actual %lx\n",
                         vma->vm_end - vma->vm_start,
                         (unsigned long) total_size);
-               ret = -EFAULT;
+               ret = -EINVAL;
                goto bail;
        }
 
@@ -1040,16 +1118,15 @@ static int mmap_rcvegrbufs(struct vm_area_struct *vma,
                ret = -EPERM;
                goto bail;
        }
-
-       start = vma->vm_start;
-       phys = pd->port_rcvegrbuf_phys;
-
        /* don't allow them to later change to writeable with mprotect */
        vma->vm_flags &= ~VM_MAYWRITE;
 
+       start = vma->vm_start;
+
        for (i = 0; i < pd->port_rcvegrbuf_chunks; i++, start += size) {
-               ret = remap_pfn_range(vma, start, phys[i] >> PAGE_SHIFT,
-                                     size, vma->vm_page_prot);
+               pfn = virt_to_phys(pd->port_rcvegrbuf[i]) >> PAGE_SHIFT;
+               ret = remap_pfn_range(vma, start, pfn, size,
+                                     vma->vm_page_prot);
                if (ret < 0)
                        goto bail;
        }
@@ -1059,74 +1136,97 @@ bail:
        return ret;
 }
 
-static int mmap_rcvhdrq(struct vm_area_struct *vma,
-                       struct ipath_portdata *pd)
+/*
+ * ipath_file_vma_fault - handle a VMA page fault.
+ */
+static int ipath_file_vma_fault(struct vm_area_struct *vma,
+                                       struct vm_fault *vmf)
 {
-       struct ipath_devdata *dd = pd->port_dd;
-       size_t total_size;
-       int ret;
+       struct page *page;
 
-       /*
-        * kmalloc'ed memory, physically contiguous; this is from
-        * spi_rcvhdr_base; we allow user to map read-write so they can
-        * write hdrq entries to allow protocol code to directly poll
-        * whether a hdrq entry has been written.
-        */
-       total_size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
-                          sizeof(u32), PAGE_SIZE);
-       if ((vma->vm_end - vma->vm_start) > total_size) {
-               dev_info(&dd->pcidev->dev,
-                        "FAIL on rcvhdrq: reqlen %lx > actual %lx\n",
-                        vma->vm_end - vma->vm_start,
-                        (unsigned long) total_size);
-               ret = -EFAULT;
-               goto bail;
-       }
+       page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
+       if (!page)
+               return VM_FAULT_SIGBUS;
+       get_page(page);
+       vmf->page = page;
 
-       ret = remap_pfn_range(vma, vma->vm_start,
-                             pd->port_rcvhdrq_phys >> PAGE_SHIFT,
-                             vma->vm_end - vma->vm_start,
-                             vma->vm_page_prot);
-bail:
-       return ret;
+       return 0;
 }
 
-static int mmap_pioavailregs(struct vm_area_struct *vma,
-                            struct ipath_portdata *pd)
+static const struct vm_operations_struct ipath_file_vm_ops = {
+       .fault = ipath_file_vma_fault,
+};
+
+static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
+                      struct ipath_portdata *pd, unsigned subport)
 {
-       struct ipath_devdata *dd = pd->port_dd;
-       int ret;
+       unsigned long len;
+       struct ipath_devdata *dd;
+       void *addr;
+       size_t size;
+       int ret = 0;
+
+       /* If the port is not shared, all addresses should be physical */
+       if (!pd->port_subport_cnt)
+               goto bail;
+
+       dd = pd->port_dd;
+       size = pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size;
 
        /*
-        * when we map the PIO bufferavail registers, we want to map them as
-        * readonly, no write possible.
-        *
-        * kmalloc'ed memory, physically contiguous, one page only, readonly
+        * Each process has all the subport uregbase, rcvhdrq, and
+        * rcvegrbufs mmapped - as an array for all the processes,
+        * and also separately for this process.
         */
-
-       if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
-               dev_info(&dd->pcidev->dev, "FAIL on pioavailregs_dma: "
-                        "reqlen %lx > actual %lx\n",
-                        vma->vm_end - vma->vm_start,
-                        (unsigned long) PAGE_SIZE);
-               ret = -EFAULT;
+       if (pgaddr == cvt_kvaddr(pd->subport_uregbase)) {
+               addr = pd->subport_uregbase;
+               size = PAGE_SIZE * pd->port_subport_cnt;
+       } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base)) {
+               addr = pd->subport_rcvhdr_base;
+               size = pd->port_rcvhdrq_size * pd->port_subport_cnt;
+       } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf)) {
+               addr = pd->subport_rcvegrbuf;
+               size *= pd->port_subport_cnt;
+        } else if (pgaddr == cvt_kvaddr(pd->subport_uregbase +
+                                        PAGE_SIZE * subport)) {
+                addr = pd->subport_uregbase + PAGE_SIZE * subport;
+                size = PAGE_SIZE;
+        } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base +
+                                pd->port_rcvhdrq_size * subport)) {
+                addr = pd->subport_rcvhdr_base +
+                        pd->port_rcvhdrq_size * subport;
+                size = pd->port_rcvhdrq_size;
+        } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf +
+                               size * subport)) {
+                addr = pd->subport_rcvegrbuf + size * subport;
+                /* rcvegrbufs are read-only on the slave */
+                if (vma->vm_flags & VM_WRITE) {
+                        dev_info(&dd->pcidev->dev,
+                                 "Can't map eager buffers as "
+                                 "writable (flags=%lx)\n", vma->vm_flags);
+                        ret = -EPERM;
+                        goto bail;
+                }
+                /*
+                 * Don't allow permission to later change to writeable
+                 * with mprotect.
+                 */
+                vma->vm_flags &= ~VM_MAYWRITE;
+       } else {
                goto bail;
        }
-
-       if (vma->vm_flags & VM_WRITE) {
-               dev_info(&dd->pcidev->dev,
-                        "Can't map pioavailregs as writable (flags=%lx)\n",
-                        vma->vm_flags);
-               ret = -EPERM;
+       len = vma->vm_end - vma->vm_start;
+       if (len > size) {
+               ipath_cdbg(MM, "FAIL: reqlen %lx > %zx\n", len, size);
+               ret = -EINVAL;
                goto bail;
        }
 
-       /* don't allow them to later change with mprotect */
-       vma->vm_flags &= ~VM_MAYWRITE;
+       vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
+       vma->vm_ops = &ipath_file_vm_ops;
+       vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
+       ret = 1;
 
-       ret = remap_pfn_range(vma, vma->vm_start,
-                             dd->ipath_pioavailregs_phys >> PAGE_SHIFT,
-                             PAGE_SIZE, vma->vm_page_prot);
 bail:
        return ret;
 }
@@ -1146,41 +1246,96 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
        struct ipath_portdata *pd;
        struct ipath_devdata *dd;
        u64 pgaddr, ureg;
+       unsigned piobufs, piocnt;
        int ret;
 
        pd = port_fp(fp);
+       if (!pd) {
+               ret = -EINVAL;
+               goto bail;
+       }
        dd = pd->port_dd;
+
        /*
         * This is the ipath_do_user_init() code, mapping the shared buffers
         * into the user process. The address referred to by vm_pgoff is the
-        * virtual, not physical, address; we only do one mmap for each
-        * space mapped.
+        * file offset passed via mmap().  For shared ports, this is the
+        * kernel vmalloc() address of the pages to share with the master.
+        * For non-shared or master ports, this is a physical address.
+        * We only do one mmap for each space mapped.
         */
        pgaddr = vma->vm_pgoff << PAGE_SHIFT;
 
        /*
-        * note that ureg does *NOT* have the kregvirt as part of it, to be
-        * sure that for 32 bit programs, we don't end up trying to map a >
-        * 44 address.  Has to match ipath_get_base_info() code that sets
-        * __spi_uregbase
+        * Check for 0 in case one of the allocations failed, but user
+        * called mmap anyway.
         */
+       if (!pgaddr)  {
+               ret = -EINVAL;
+               goto bail;
+       }
 
-       ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port;
-
-       ipath_cdbg(MM, "ushare: pgaddr %llx vm_start=%lx, vmlen %lx\n",
+       ipath_cdbg(MM, "pgaddr %llx vm_start=%lx len %lx port %u:%u:%u\n",
                   (unsigned long long) pgaddr, vma->vm_start,
-                  vma->vm_end - vma->vm_start);
+                  vma->vm_end - vma->vm_start, dd->ipath_unit,
+                  pd->port_port, subport_fp(fp));
+
+       /*
+        * Physical addresses must fit in 40 bits for our hardware.
+        * Check for kernel virtual addresses first, anything else must
+        * match a HW or memory address.
+        */
+       ret = mmap_kvaddr(vma, pgaddr, pd, subport_fp(fp));
+       if (ret) {
+               if (ret > 0)
+                       ret = 0;
+               goto bail;
+       }
+
+       ureg = dd->ipath_uregbase + dd->ipath_ureg_align * pd->port_port;
+       if (!pd->port_subport_cnt) {
+               /* port is not shared */
+               piocnt = pd->port_piocnt;
+               piobufs = pd->port_piobufs;
+       } else if (!subport_fp(fp)) {
+               /* caller is the master */
+               piocnt = (pd->port_piocnt / pd->port_subport_cnt) +
+                        (pd->port_piocnt % pd->port_subport_cnt);
+               piobufs = pd->port_piobufs +
+                       dd->ipath_palign * (pd->port_piocnt - piocnt);
+       } else {
+               unsigned slave = subport_fp(fp) - 1;
+
+               /* caller is a slave */
+               piocnt = pd->port_piocnt / pd->port_subport_cnt;
+               piobufs = pd->port_piobufs + dd->ipath_palign * piocnt * slave;
+       }
 
        if (pgaddr == ureg)
                ret = mmap_ureg(vma, dd, ureg);
-       else if (pgaddr == pd->port_piobufs)
-               ret = mmap_piobufs(vma, dd, pd);
-       else if (pgaddr == (u64) pd->port_rcvegr_phys)
+       else if (pgaddr == piobufs)
+               ret = mmap_piobufs(vma, dd, pd, piobufs, piocnt);
+       else if (pgaddr == dd->ipath_pioavailregs_phys)
+               /* in-memory copy of pioavail registers */
+               ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0,
+                                    (void *) dd->ipath_pioavailregs_dma,
+                                    "pioavail registers");
+       else if (pgaddr == pd->port_rcvegr_phys)
                ret = mmap_rcvegrbufs(vma, pd);
        else if (pgaddr == (u64) pd->port_rcvhdrq_phys)
-               ret = mmap_rcvhdrq(vma, pd);
-       else if (pgaddr == dd->ipath_pioavailregs_phys)
-               ret = mmap_pioavailregs(vma, pd);
+               /*
+                * The rcvhdrq itself; readonly except on HT (so have
+                * to allow writable mapping), multiple pages, contiguous
+                * from an i/o perspective.
+                */
+               ret = ipath_mmap_mem(vma, pd, pd->port_rcvhdrq_size, 1,
+                                    pd->port_rcvhdrq,
+                                    "rcvhdrq");
+       else if (pgaddr == (u64) pd->port_rcvhdrqtailaddr_phys)
+               /* in-memory copy of rcvhdrq tail register */
+               ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0,
+                                    pd->port_rcvhdrtail_kvaddr,
+                                    "rcvhdrq tail");
        else
                ret = -EINVAL;
 
@@ -1188,76 +1343,247 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
 
        if (ret < 0)
                dev_info(&dd->pcidev->dev,
-                        "Failure %d on addr %lx, off %lx\n",
-                        -ret, vma->vm_start, vma->vm_pgoff);
-
+                        "Failure %d on off %llx len %lx\n",
+                        -ret, (unsigned long long)pgaddr,
+                        vma->vm_end - vma->vm_start);
+bail:
        return ret;
 }
 
-static unsigned int ipath_poll(struct file *fp,
-                              struct poll_table_struct *pt)
+static unsigned ipath_poll_hdrqfull(struct ipath_portdata *pd)
 {
-       struct ipath_portdata *pd;
-       u32 head, tail;
-       int bit;
+       unsigned pollflag = 0;
+
+       if ((pd->poll_type & IPATH_POLL_TYPE_OVERFLOW) &&
+           pd->port_hdrqfull != pd->port_hdrqfull_poll) {
+               pollflag |= POLLIN | POLLRDNORM;
+               pd->port_hdrqfull_poll = pd->port_hdrqfull;
+       }
+
+       return pollflag;
+}
+
+static unsigned int ipath_poll_urgent(struct ipath_portdata *pd,
+                                     struct file *fp,
+                                     struct poll_table_struct *pt)
+{
+       unsigned pollflag = 0;
        struct ipath_devdata *dd;
 
-       pd = port_fp(fp);
        dd = pd->port_dd;
 
-       bit = pd->port_port + INFINIPATH_R_INTRAVAIL_SHIFT;
-       set_bit(bit, &dd->ipath_rcvctrl);
+       /* variable access in ipath_poll_hdrqfull() needs this */
+       rmb();
+       pollflag = ipath_poll_hdrqfull(pd);
 
-       /*
-        * Before blocking, make sure that head is still == tail,
-        * reading from the chip, so we can be sure the interrupt
-        * enable has made it to the chip.  If not equal, disable
-        * interrupt again and return immediately.  This avoids races,
-        * and the overhead of the chip read doesn't matter much at
-        * this point, since we are waiting for something anyway.
-        */
+       if (pd->port_urgent != pd->port_urgent_poll) {
+               pollflag |= POLLIN | POLLRDNORM;
+               pd->port_urgent_poll = pd->port_urgent;
+       }
 
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
-                        dd->ipath_rcvctrl);
+       if (!pollflag) {
+               /* this saves a spin_lock/unlock in interrupt handler... */
+               set_bit(IPATH_PORT_WAITING_URG, &pd->port_flag);
+               /* flush waiting flag so don't miss an event... */
+               wmb();
+               poll_wait(fp, &pd->port_wait, pt);
+       }
+
+       return pollflag;
+}
+
+static unsigned int ipath_poll_next(struct ipath_portdata *pd,
+                                   struct file *fp,
+                                   struct poll_table_struct *pt)
+{
+       u32 head;
+       u32 tail;
+       unsigned pollflag = 0;
+       struct ipath_devdata *dd;
+
+       dd = pd->port_dd;
+
+       /* variable access in ipath_poll_hdrqfull() needs this */
+       rmb();
+       pollflag = ipath_poll_hdrqfull(pd);
 
        head = ipath_read_ureg32(dd, ur_rcvhdrhead, pd->port_port);
-       tail = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
+       if (pd->port_rcvhdrtail_kvaddr)
+               tail = ipath_get_rcvhdrtail(pd);
+       else
+               tail = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
 
-       if (tail == head) {
+       if (head != tail)
+               pollflag |= POLLIN | POLLRDNORM;
+       else {
+               /* this saves a spin_lock/unlock in interrupt handler */
                set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag);
-               if(dd->ipath_rhdrhead_intr_off) /* arm rcv interrupt */
-                       (void)ipath_write_ureg(dd, ur_rcvhdrhead,
-                                              dd->ipath_rhdrhead_intr_off
-                                              | head, pd->port_port);
+               /* flush waiting flag so we don't miss an event */
+               wmb();
+
+               set_bit(pd->port_port + dd->ipath_r_intravail_shift,
+                       &dd->ipath_rcvctrl);
+
+               ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
+                                dd->ipath_rcvctrl);
+
+               if (dd->ipath_rhdrhead_intr_off) /* arm rcv interrupt */
+                       ipath_write_ureg(dd, ur_rcvhdrhead,
+                                        dd->ipath_rhdrhead_intr_off | head,
+                                        pd->port_port);
+
                poll_wait(fp, &pd->port_wait, pt);
+       }
+
+       return pollflag;
+}
+
+static unsigned int ipath_poll(struct file *fp,
+                              struct poll_table_struct *pt)
+{
+       struct ipath_portdata *pd;
+       unsigned pollflag;
+
+       pd = port_fp(fp);
+       if (!pd)
+               pollflag = 0;
+       else if (pd->poll_type & IPATH_POLL_TYPE_URGENT)
+               pollflag = ipath_poll_urgent(pd, fp, pt);
+       else
+               pollflag = ipath_poll_next(pd, fp, pt);
+
+       return pollflag;
+}
+
+static int ipath_supports_subports(int user_swmajor, int user_swminor)
+{
+       /* no subport implementation prior to software version 1.3 */
+       return (user_swmajor > 1) || (user_swminor >= 3);
+}
 
-               if (test_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag)) {
-                       /* timed out, no packets received */
-                       clear_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag);
-                       pd->port_rcvwait_to++;
+static int ipath_compatible_subports(int user_swmajor, int user_swminor)
+{
+       /* this code is written long-hand for clarity */
+       if (IPATH_USER_SWMAJOR != user_swmajor) {
+               /* no promise of compatibility if major mismatch */
+               return 0;
+       }
+       if (IPATH_USER_SWMAJOR == 1) {
+               switch (IPATH_USER_SWMINOR) {
+               case 0:
+               case 1:
+               case 2:
+                       /* no subport implementation so cannot be compatible */
+                       return 0;
+               case 3:
+                       /* 3 is only compatible with itself */
+                       return user_swminor == 3;
+               default:
+                       /* >= 4 are compatible (or are expected to be) */
+                       return user_swminor >= 4;
                }
        }
-       else {
-               /* it's already happened; don't do wait_event overhead */
-               pd->port_rcvnowait++;
+       /* make no promises yet for future major versions */
+       return 0;
+}
+
+static int init_subports(struct ipath_devdata *dd,
+                        struct ipath_portdata *pd,
+                        const struct ipath_user_info *uinfo)
+{
+       int ret = 0;
+       unsigned num_subports;
+       size_t size;
+
+       /*
+        * If the user is requesting zero subports,
+        * skip the subport allocation.
+        */
+       if (uinfo->spu_subport_cnt <= 0)
+               goto bail;
+
+       /* Self-consistency check for ipath_compatible_subports() */
+       if (ipath_supports_subports(IPATH_USER_SWMAJOR, IPATH_USER_SWMINOR) &&
+           !ipath_compatible_subports(IPATH_USER_SWMAJOR,
+                                      IPATH_USER_SWMINOR)) {
+               dev_info(&dd->pcidev->dev,
+                        "Inconsistent ipath_compatible_subports()\n");
+               goto bail;
        }
 
-       clear_bit(bit, &dd->ipath_rcvctrl);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
-                        dd->ipath_rcvctrl);
+       /* Check for subport compatibility */
+       if (!ipath_compatible_subports(uinfo->spu_userversion >> 16,
+                                      uinfo->spu_userversion & 0xffff)) {
+               dev_info(&dd->pcidev->dev,
+                        "Mismatched user version (%d.%d) and driver "
+                        "version (%d.%d) while port sharing. Ensure "
+                         "that driver and library are from the same "
+                         "release.\n",
+                        (int) (uinfo->spu_userversion >> 16),
+                         (int) (uinfo->spu_userversion & 0xffff),
+                        IPATH_USER_SWMAJOR,
+                        IPATH_USER_SWMINOR);
+               goto bail;
+       }
+       if (uinfo->spu_subport_cnt > INFINIPATH_MAX_SUBPORT) {
+               ret = -EINVAL;
+               goto bail;
+       }
 
-       return 0;
+       num_subports = uinfo->spu_subport_cnt;
+       pd->subport_uregbase = vmalloc(PAGE_SIZE * num_subports);
+       if (!pd->subport_uregbase) {
+               ret = -ENOMEM;
+               goto bail;
+       }
+       /* Note: pd->port_rcvhdrq_size isn't initialized yet. */
+       size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
+                    sizeof(u32), PAGE_SIZE) * num_subports;
+       pd->subport_rcvhdr_base = vmalloc(size);
+       if (!pd->subport_rcvhdr_base) {
+               ret = -ENOMEM;
+               goto bail_ureg;
+       }
+
+       pd->subport_rcvegrbuf = vmalloc(pd->port_rcvegrbuf_chunks *
+                                       pd->port_rcvegrbuf_size *
+                                       num_subports);
+       if (!pd->subport_rcvegrbuf) {
+               ret = -ENOMEM;
+               goto bail_rhdr;
+       }
+
+       pd->port_subport_cnt = uinfo->spu_subport_cnt;
+       pd->port_subport_id = uinfo->spu_subport_id;
+       pd->active_slaves = 1;
+       set_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag);
+       memset(pd->subport_uregbase, 0, PAGE_SIZE * num_subports);
+       memset(pd->subport_rcvhdr_base, 0, size);
+       memset(pd->subport_rcvegrbuf, 0, pd->port_rcvegrbuf_chunks *
+                                        pd->port_rcvegrbuf_size *
+                                        num_subports);
+       goto bail;
+
+bail_rhdr:
+       vfree(pd->subport_rcvhdr_base);
+bail_ureg:
+       vfree(pd->subport_uregbase);
+       pd->subport_uregbase = NULL;
+bail:
+       return ret;
 }
 
 static int try_alloc_port(struct ipath_devdata *dd, int port,
-                         struct file *fp)
+                         struct file *fp,
+                         const struct ipath_user_info *uinfo)
 {
+       struct ipath_portdata *pd;
        int ret;
 
-       if (!dd->ipath_pd[port]) {
-               void *p, *ptmp;
+       if (!(pd = dd->ipath_pd[port])) {
+               void *ptmp;
 
-               p = kzalloc(sizeof(struct ipath_portdata), GFP_KERNEL);
+               pd = kzalloc(sizeof(struct ipath_portdata), GFP_KERNEL);
 
                /*
                 * Allocate memory for use in ipath_tid_update() just once
@@ -1267,34 +1593,36 @@ static int try_alloc_port(struct ipath_devdata *dd, int port,
                ptmp = kmalloc(dd->ipath_rcvtidcnt * sizeof(u16) +
                               dd->ipath_rcvtidcnt * sizeof(struct page **),
                               GFP_KERNEL);
-               if (!p || !ptmp) {
+               if (!pd || !ptmp) {
                        ipath_dev_err(dd, "Unable to allocate portdata "
                                      "memory, failing open\n");
                        ret = -ENOMEM;
-                       kfree(p);
+                       kfree(pd);
                        kfree(ptmp);
                        goto bail;
                }
-               dd->ipath_pd[port] = p;
+               dd->ipath_pd[port] = pd;
                dd->ipath_pd[port]->port_port = port;
                dd->ipath_pd[port]->port_dd = dd;
                dd->ipath_pd[port]->port_tid_pg_list = ptmp;
                init_waitqueue_head(&dd->ipath_pd[port]->port_wait);
        }
-       if (!dd->ipath_pd[port]->port_cnt) {
-               dd->ipath_pd[port]->port_cnt = 1;
-               fp->private_data = (void *) dd->ipath_pd[port];
+       if (!pd->port_cnt) {
+               pd->userversion = uinfo->spu_userversion;
+               init_user_egr_sizes(pd);
+               if ((ret = init_subports(dd, pd, uinfo)) != 0)
+                       goto bail;
                ipath_cdbg(PROC, "%s[%u] opened unit:port %u:%u\n",
                           current->comm, current->pid, dd->ipath_unit,
                           port);
-               dd->ipath_pd[port]->port_pid = current->pid;
-               strncpy(dd->ipath_pd[port]->port_comm, current->comm,
-                       sizeof(dd->ipath_pd[port]->port_comm));
+               pd->port_cnt = 1;
+               port_fp(fp) = pd;
+               pd->port_pid = get_pid(task_pid(current));
+               strlcpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
                ipath_stats.sps_ports++;
                ret = 0;
-               goto bail;
-       }
-       ret = -EBUSY;
+       } else
+               ret = -EBUSY;
 
 bail:
        return ret;
@@ -1310,7 +1638,8 @@ static inline int usable(struct ipath_devdata *dd)
                                     | IPATH_LINKUNK));
 }
 
-static int find_free_port(int unit, struct file *fp)
+static int find_free_port(int unit, struct file *fp,
+                         const struct ipath_user_info *uinfo)
 {
        struct ipath_devdata *dd = ipath_lookup(unit);
        int ret, i;
@@ -1325,8 +1654,8 @@ static int find_free_port(int unit, struct file *fp)
                goto bail;
        }
 
-       for (i = 0; i < dd->ipath_cfgports; i++) {
-               ret = try_alloc_port(dd, i, fp);
+       for (i = 1; i < dd->ipath_cfgports; i++) {
+               ret = try_alloc_port(dd, i, fp, uinfo);
                if (ret != -EBUSY)
                        goto bail;
        }
@@ -1336,44 +1665,46 @@ bail:
        return ret;
 }
 
-static int find_best_unit(struct file *fp)
+static int find_best_unit(struct file *fp,
+                         const struct ipath_user_info *uinfo)
 {
        int ret = 0, i, prefunit = -1, devmax;
        int maxofallports, npresent, nup;
        int ndev;
 
-       (void) ipath_count_units(&npresent, &nup, &maxofallports);
+       devmax = ipath_count_units(&npresent, &nup, &maxofallports);
 
        /*
         * This code is present to allow a knowledgeable person to
         * specify the layout of processes to processors before opening
         * this driver, and then we'll assign the process to the "closest"
-        * HT-400 to that processor (we assume reasonable connectivity,
+        * InfiniPath chip to that processor (we assume reasonable connectivity,
         * for now).  This code assumes that if affinity has been set
         * before this point, that at most one cpu is set; for now this
-        * is reasonable.  I check for both cpus_empty() and cpus_full(),
+        * is reasonable.  I check for both cpumask_empty() and cpumask_full(),
         * in case some kernel variant sets none of the bits when no
         * affinity is set.  2.6.11 and 12 kernels have all present
         * cpus set.  Some day we'll have to fix it up further to handle
-        * a cpu subset.  This algorithm fails for two HT-400's connected
+        * a cpu subset.  This algorithm fails for two HT chips connected
         * in tunnel fashion.  Eventually this needs real topology
         * information.  There may be some issues with dual core numbering
         * as well.  This needs more work prior to release.
         */
-       if (!cpus_empty(current->cpus_allowed) &&
-           !cpus_full(current->cpus_allowed)) {
-               int ncpus = num_online_cpus(), curcpu = -1;
+       if (!cpumask_empty(&current->cpus_allowed) &&
+           !cpumask_full(&current->cpus_allowed)) {
+               int ncpus = num_online_cpus(), curcpu = -1, nset = 0;
                for (i = 0; i < ncpus; i++)
-                       if (cpu_isset(i, current->cpus_allowed)) {
+                       if (cpumask_test_cpu(i, &current->cpus_allowed)) {
                                ipath_cdbg(PROC, "%s[%u] affinity set for "
-                                          "cpu %d\n", current->comm,
-                                          current->pid, i);
+                                          "cpu %d/%d\n", current->comm,
+                                          current->pid, i, ncpus);
                                curcpu = i;
+                               nset++;
                        }
-               if (curcpu != -1) {
+               if (curcpu != -1 && nset != ncpus) {
                        if (npresent) {
                                prefunit = curcpu / (ncpus / npresent);
-                               ipath_dbg("%s[%u] %d chips, %d cpus, "
+                               ipath_cdbg(PROC,"%s[%u] %d chips, %d cpus, "
                                          "%d cpus/chip, select unit %d\n",
                                          current->comm, current->pid,
                                          npresent, ncpus, ncpus / npresent,
@@ -1389,8 +1720,6 @@ static int find_best_unit(struct file *fp)
 
        if (prefunit != -1)
                devmax = prefunit + 1;
-       else
-               devmax = ipath_count_units(NULL, NULL, NULL);
 recheck:
        for (i = 1; i < maxofallports; i++) {
                for (ndev = prefunit != -1 ? prefunit : 0; ndev < devmax;
@@ -1405,7 +1734,7 @@ recheck:
                                 * next.
                                 */
                                continue;
-                       ret = try_alloc_port(dd, i, fp);
+                       ret = try_alloc_port(dd, i, fp, uinfo);
                        if (!ret)
                                goto done;
                }
@@ -1441,22 +1770,235 @@ done:
        return ret;
 }
 
+static int find_shared_port(struct file *fp,
+                           const struct ipath_user_info *uinfo)
+{
+       int devmax, ndev, i;
+       int ret = 0;
+
+       devmax = ipath_count_units(NULL, NULL, NULL);
+
+       for (ndev = 0; ndev < devmax; ndev++) {
+               struct ipath_devdata *dd = ipath_lookup(ndev);
+
+               if (!usable(dd))
+                       continue;
+               for (i = 1; i < dd->ipath_cfgports; i++) {
+                       struct ipath_portdata *pd = dd->ipath_pd[i];
+
+                       /* Skip ports which are not yet open */
+                       if (!pd || !pd->port_cnt)
+                               continue;
+                       /* Skip port if it doesn't match the requested one */
+                       if (pd->port_subport_id != uinfo->spu_subport_id)
+                               continue;
+                       /* Verify the sharing process matches the master */
+                       if (pd->port_subport_cnt != uinfo->spu_subport_cnt ||
+                           pd->userversion != uinfo->spu_userversion ||
+                           pd->port_cnt >= pd->port_subport_cnt) {
+                               ret = -EINVAL;
+                               goto done;
+                       }
+                       port_fp(fp) = pd;
+                       subport_fp(fp) = pd->port_cnt++;
+                       pd->port_subpid[subport_fp(fp)] =
+                               get_pid(task_pid(current));
+                       tidcursor_fp(fp) = 0;
+                       pd->active_slaves |= 1 << subport_fp(fp);
+                       ipath_cdbg(PROC,
+                                  "%s[%u] %u sharing %s[%u] unit:port %u:%u\n",
+                                  current->comm, current->pid,
+                                  subport_fp(fp),
+                                  pd->port_comm, pid_nr(pd->port_pid),
+                                  dd->ipath_unit, pd->port_port);
+                       ret = 1;
+                       goto done;
+               }
+       }
+
+done:
+       return ret;
+}
+
 static int ipath_open(struct inode *in, struct file *fp)
 {
-       int ret, user_minor;
+       /* The real work is performed later in ipath_assign_port() */
+       fp->private_data = kzalloc(sizeof(struct ipath_filedata), GFP_KERNEL);
+       return fp->private_data ? 0 : -ENOMEM;
+}
+
+/* Get port early, so can set affinity prior to memory allocation */
+static int ipath_assign_port(struct file *fp,
+                             const struct ipath_user_info *uinfo)
+{
+       int ret;
+       int i_minor;
+       unsigned swmajor, swminor;
+
+       /* Check to be sure we haven't already initialized this file */
+       if (port_fp(fp)) {
+               ret = -EINVAL;
+               goto done;
+       }
+
+       /* for now, if major version is different, bail */
+       swmajor = uinfo->spu_userversion >> 16;
+       if (swmajor != IPATH_USER_SWMAJOR) {
+               ipath_dbg("User major version %d not same as driver "
+                         "major %d\n", uinfo->spu_userversion >> 16,
+                         IPATH_USER_SWMAJOR);
+               ret = -ENODEV;
+               goto done;
+       }
+
+       swminor = uinfo->spu_userversion & 0xffff;
+       if (swminor != IPATH_USER_SWMINOR)
+               ipath_dbg("User minor version %d not same as driver "
+                         "minor %d\n", swminor, IPATH_USER_SWMINOR);
 
        mutex_lock(&ipath_mutex);
 
-       user_minor = iminor(in) - IPATH_USER_MINOR_BASE;
+       if (ipath_compatible_subports(swmajor, swminor) &&
+           uinfo->spu_subport_cnt &&
+           (ret = find_shared_port(fp, uinfo))) {
+               if (ret > 0)
+                       ret = 0;
+               goto done_chk_sdma;
+       }
+
+       i_minor = iminor(fp->f_path.dentry->d_inode) - IPATH_USER_MINOR_BASE;
        ipath_cdbg(VERBOSE, "open on dev %lx (minor %d)\n",
-                  (long)in->i_rdev, user_minor);
+                  (long)fp->f_path.dentry->d_inode->i_rdev, i_minor);
 
-       if (user_minor)
-               ret = find_free_port(user_minor - 1, fp);
+       if (i_minor)
+               ret = find_free_port(i_minor - 1, fp, uinfo);
        else
-               ret = find_best_unit(fp);
+               ret = find_best_unit(fp, uinfo);
+
+done_chk_sdma:
+       if (!ret) {
+               struct ipath_filedata *fd = fp->private_data;
+               const struct ipath_portdata *pd = fd->pd;
+               const struct ipath_devdata *dd = pd->port_dd;
+
+               fd->pq = ipath_user_sdma_queue_create(&dd->pcidev->dev,
+                                                     dd->ipath_unit,
+                                                     pd->port_port,
+                                                     fd->subport);
+
+               if (!fd->pq)
+                       ret = -ENOMEM;
+       }
 
        mutex_unlock(&ipath_mutex);
+
+done:
+       return ret;
+}
+
+
+static int ipath_do_user_init(struct file *fp,
+                             const struct ipath_user_info *uinfo)
+{
+       int ret;
+       struct ipath_portdata *pd = port_fp(fp);
+       struct ipath_devdata *dd;
+       u32 head32;
+
+       /* Subports don't need to initialize anything since master did it. */
+       if (subport_fp(fp)) {
+               ret = wait_event_interruptible(pd->port_wait,
+                       !test_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag));
+               goto done;
+       }
+
+       dd = pd->port_dd;
+
+       if (uinfo->spu_rcvhdrsize) {
+               ret = ipath_setrcvhdrsize(dd, uinfo->spu_rcvhdrsize);
+               if (ret)
+                       goto done;
+       }
+
+       /* for now we do nothing with rcvhdrcnt: uinfo->spu_rcvhdrcnt */
+
+       /* some ports may get extra buffers, calculate that here */
+       if (pd->port_port <= dd->ipath_ports_extrabuf)
+               pd->port_piocnt = dd->ipath_pbufsport + 1;
+       else
+               pd->port_piocnt = dd->ipath_pbufsport;
+
+       /* for right now, kernel piobufs are at end, so port 1 is at 0 */
+       if (pd->port_port <= dd->ipath_ports_extrabuf)
+               pd->port_pio_base = (dd->ipath_pbufsport + 1)
+                       * (pd->port_port - 1);
+       else
+               pd->port_pio_base = dd->ipath_ports_extrabuf +
+                       dd->ipath_pbufsport * (pd->port_port - 1);
+       pd->port_piobufs = dd->ipath_piobufbase +
+               pd->port_pio_base * dd->ipath_palign;
+       ipath_cdbg(VERBOSE, "piobuf base for port %u is 0x%x, piocnt %u,"
+               " first pio %u\n", pd->port_port, pd->port_piobufs,
+               pd->port_piocnt, pd->port_pio_base);
+       ipath_chg_pioavailkernel(dd, pd->port_pio_base, pd->port_piocnt, 0);
+
+       /*
+        * Now allocate the rcvhdr Q and eager TIDs; skip the TID
+        * array for time being.  If pd->port_port > chip-supported,
+        * we need to do extra stuff here to handle by handling overflow
+        * through port 0, someday
+        */
+       ret = ipath_create_rcvhdrq(dd, pd);
+       if (!ret)
+               ret = ipath_create_user_egr(pd);
+       if (ret)
+               goto done;
+
+       /*
+        * set the eager head register for this port to the current values
+        * of the tail pointers, since we don't know if they were
+        * updated on last use of the port.
+        */
+       head32 = ipath_read_ureg32(dd, ur_rcvegrindextail, pd->port_port);
+       ipath_write_ureg(dd, ur_rcvegrindexhead, head32, pd->port_port);
+       pd->port_lastrcvhdrqtail = -1;
+       ipath_cdbg(VERBOSE, "Wrote port%d egrhead %x from tail regs\n",
+               pd->port_port, head32);
+       pd->port_tidcursor = 0; /* start at beginning after open */
+
+       /* initialize poll variables... */
+       pd->port_urgent = 0;
+       pd->port_urgent_poll = 0;
+       pd->port_hdrqfull_poll = pd->port_hdrqfull;
+
+       /*
+        * Now enable the port for receive.
+        * For chips that are set to DMA the tail register to memory
+        * when they change (and when the update bit transitions from
+        * 0 to 1.  So for those chips, we turn it off and then back on.
+        * This will (very briefly) affect any other open ports, but the
+        * duration is very short, and therefore isn't an issue.  We
+        * explictly set the in-memory tail copy to 0 beforehand, so we
+        * don't have to wait to be sure the DMA update has happened
+        * (chip resets head/tail to 0 on transition to enable).
+        */
+       set_bit(dd->ipath_r_portenable_shift + pd->port_port,
+               &dd->ipath_rcvctrl);
+       if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
+               if (pd->port_rcvhdrtail_kvaddr)
+                       ipath_clear_rcvhdrtail(pd);
+               ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
+                       dd->ipath_rcvctrl &
+                       ~(1ULL << dd->ipath_r_tailupd_shift));
+       }
+       ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
+                        dd->ipath_rcvctrl);
+       /* Notify any waiting slaves */
+       if (pd->port_subport_cnt) {
+               clear_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag);
+               wake_up(&pd->port_wait);
+       }
+done:
        return ret;
 }
 
@@ -1476,12 +2018,15 @@ static void unlock_expected_tids(struct ipath_portdata *pd)
        ipath_cdbg(VERBOSE, "Port %u unlocking any locked expTID pages\n",
                   pd->port_port);
        for (i = port_tidbase; i < maxtid; i++) {
-               if (!dd->ipath_pageshadow[i])
+               struct page *ps = dd->ipath_pageshadow[i];
+
+               if (!ps)
                        continue;
 
-               ipath_release_user_pages_on_close(&dd->ipath_pageshadow[i],
-                                                 1);
                dd->ipath_pageshadow[i] = NULL;
+               pci_unmap_page(dd->pcidev, dd->ipath_physshadow[i],
+                       PAGE_SIZE, PCI_DMA_FROMDEVICE);
+               ipath_release_user_pages_on_close(&ps, 1);
                cnt++;
                ipath_stats.sps_pageunlocks++;
        }
@@ -1499,26 +2044,51 @@ static void unlock_expected_tids(struct ipath_portdata *pd)
 static int ipath_close(struct inode *in, struct file *fp)
 {
        int ret = 0;
+       struct ipath_filedata *fd;
        struct ipath_portdata *pd;
        struct ipath_devdata *dd;
+       unsigned long flags;
        unsigned port;
+       struct pid *pid;
 
        ipath_cdbg(VERBOSE, "close on dev %lx, private data %p\n",
                   (long)in->i_rdev, fp->private_data);
 
        mutex_lock(&ipath_mutex);
 
-       pd = port_fp(fp);
-       port = pd->port_port;
+       fd = fp->private_data;
        fp->private_data = NULL;
+       pd = fd->pd;
+       if (!pd) {
+               mutex_unlock(&ipath_mutex);
+               goto bail;
+       }
+
        dd = pd->port_dd;
 
-       if (pd->port_hdrqfull) {
-               ipath_cdbg(PROC, "%s[%u] had %u rcvhdrqfull errors "
-                          "during run\n", pd->port_comm, pd->port_pid,
-                          pd->port_hdrqfull);
-               pd->port_hdrqfull = 0;
+       /* drain user sdma queue */
+       ipath_user_sdma_queue_drain(dd, fd->pq);
+       ipath_user_sdma_queue_destroy(fd->pq);
+
+       if (--pd->port_cnt) {
+               /*
+                * XXX If the master closes the port before the slave(s),
+                * revoke the mmap for the eager receive queue so
+                * the slave(s) don't wait for receive data forever.
+                */
+               pd->active_slaves &= ~(1 << fd->subport);
+               put_pid(pd->port_subpid[fd->subport]);
+               pd->port_subpid[fd->subport] = NULL;
+               mutex_unlock(&ipath_mutex);
+               goto bail;
        }
+       /* early; no interrupt users after this */
+       spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
+       port = pd->port_port;
+       dd->ipath_pd[port] = NULL;
+       pid = pd->port_pid;
+       pd->port_pid = NULL;
+       spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
 
        if (pd->port_rcvwait_to || pd->port_piowait_to
            || pd->port_rcvnowait || pd->port_pionowait) {
@@ -1531,76 +2101,87 @@ static int ipath_close(struct inode *in, struct file *fp)
                        pd->port_rcvnowait = pd->port_pionowait = 0;
        }
        if (pd->port_flag) {
-               ipath_dbg("port %u port_flag still set to 0x%lx\n",
+               ipath_cdbg(PROC, "port %u port_flag set: 0x%lx\n",
                          pd->port_port, pd->port_flag);
                pd->port_flag = 0;
        }
 
        if (dd->ipath_kregbase) {
-               if (pd->port_rcvhdrtail_uaddr) {
-                       pd->port_rcvhdrtail_uaddr = 0;
-                       pd->port_rcvhdrtail_kvaddr = NULL;
-                       ipath_release_user_pages_on_close(
-                               &pd->port_rcvhdrtail_pagep, 1);
-                       pd->port_rcvhdrtail_pagep = NULL;
-                       ipath_stats.sps_pageunlocks++;
-               }
-               ipath_write_kreg_port(
-                       dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
-                       port, 0ULL);
-               ipath_write_kreg_port(
-                       dd, dd->ipath_kregs->kr_rcvhdraddr,
-                       pd->port_port, 0);
+               /* atomically clear receive enable port and intr avail. */
+               clear_bit(dd->ipath_r_portenable_shift + port,
+                         &dd->ipath_rcvctrl);
+               clear_bit(pd->port_port + dd->ipath_r_intravail_shift,
+                         &dd->ipath_rcvctrl);
+               ipath_write_kreg( dd, dd->ipath_kregs->kr_rcvctrl,
+                       dd->ipath_rcvctrl);
+               /* and read back from chip to be sure that nothing
+                * else is in flight when we do the rest */
+               (void)ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
 
                /* clean up the pkeys for this port user */
                ipath_clean_part_key(pd, dd);
+               /*
+                * be paranoid, and never write 0's to these, just use an
+                * unused part of the port 0 tail page.  Of course,
+                * rcvhdraddr points to a large chunk of memory, so this
+                * could still trash things, but at least it won't trash
+                * page 0, and by disabling the port, it should stop "soon",
+                * even if a packet or two is in already in flight after we
+                * disabled the port.
+                */
+               ipath_write_kreg_port(dd,
+                       dd->ipath_kregs->kr_rcvhdrtailaddr, port,
+                       dd->ipath_dummy_hdrq_phys);
+               ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
+                       pd->port_port, dd->ipath_dummy_hdrq_phys);
 
-               if (port < dd->ipath_cfgports) {
-                       int i = dd->ipath_pbufsport * (port - 1);
-                       ipath_disarm_piobufs(dd, i, dd->ipath_pbufsport);
-
-                       /* atomically clear receive enable port. */
-                       clear_bit(INFINIPATH_R_PORTENABLE_SHIFT + port,
-                                 &dd->ipath_rcvctrl);
-                       ipath_write_kreg(
-                               dd,
-                               dd->ipath_kregs->kr_rcvctrl,
-                               dd->ipath_rcvctrl);
-
-                       if (dd->ipath_pageshadow)
-                               unlock_expected_tids(pd);
-                       ipath_stats.sps_ports--;
-                       ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n",
-                                  pd->port_comm, pd->port_pid,
-                                  dd->ipath_unit, port);
-               }
-       }
-
-       pd->port_cnt = 0;
-       pd->port_pid = 0;
+               ipath_disarm_piobufs(dd, pd->port_pio_base, pd->port_piocnt);
+               ipath_chg_pioavailkernel(dd, pd->port_pio_base,
+                       pd->port_piocnt, 1);
 
-       dd->ipath_f_clear_tids(dd, pd->port_port);
+               dd->ipath_f_clear_tids(dd, pd->port_port);
 
-       ipath_free_pddata(dd, pd->port_port, 0);
+               if (dd->ipath_pageshadow)
+                       unlock_expected_tids(pd);
+               ipath_stats.sps_ports--;
+               ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n",
+                          pd->port_comm, pid_nr(pid),
+                          dd->ipath_unit, port);
+       }
 
+       put_pid(pid);
        mutex_unlock(&ipath_mutex);
+       ipath_free_pddata(dd, pd); /* after releasing the mutex */
 
+bail:
+       kfree(fd);
        return ret;
 }
 
-static int ipath_port_info(struct ipath_portdata *pd,
+static int ipath_port_info(struct ipath_portdata *pd, u16 subport,
                           struct ipath_port_info __user *uinfo)
 {
        struct ipath_port_info info;
        int nup;
        int ret;
+       size_t sz;
 
        (void) ipath_count_units(NULL, &nup, NULL);
        info.num_active = nup;
        info.unit = pd->port_dd->ipath_unit;
        info.port = pd->port_port;
+       info.subport = subport;
+       /* Don't return new fields if old library opened the port. */
+       if (ipath_supports_subports(pd->userversion >> 16,
+                                   pd->userversion & 0xffff)) {
+               /* Number of user ports available for this device. */
+               info.num_ports = pd->port_dd->ipath_cfgports - 1;
+               info.num_subports = pd->port_subport_cnt;
+               sz = sizeof(info);
+       } else
+               sz = sizeof(info) - 2 * sizeof(u16);
 
-       if (copy_to_user(uinfo, &info, sizeof(info))) {
+       if (copy_to_user(uinfo, &info, sz)) {
                ret = -EFAULT;
                goto bail;
        }
@@ -1610,6 +2191,45 @@ bail:
        return ret;
 }
 
+static int ipath_get_slave_info(struct ipath_portdata *pd,
+                               void __user *slave_mask_addr)
+{
+       int ret = 0;
+
+       if (copy_to_user(slave_mask_addr, &pd->active_slaves, sizeof(u32)))
+               ret = -EFAULT;
+       return ret;
+}
+
+static int ipath_sdma_get_inflight(struct ipath_user_sdma_queue *pq,
+                                  u32 __user *inflightp)
+{
+       const u32 val = ipath_user_sdma_inflight_counter(pq);
+
+       if (put_user(val, inflightp))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int ipath_sdma_get_complete(struct ipath_devdata *dd,
+                                  struct ipath_user_sdma_queue *pq,
+                                  u32 __user *completep)
+{
+       u32 val;
+       int err;
+
+       err = ipath_user_sdma_make_progress(dd, pq);
+       if (err < 0)
+               return err;
+
+       val = ipath_user_sdma_complete_counter(pq);
+       if (put_user(val, completep))
+               return -EFAULT;
+
+       return 0;
+}
+
 static ssize_t ipath_write(struct file *fp, const char __user *data,
                           size_t count, loff_t *off)
 {
@@ -1636,6 +2256,8 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
        consumed = sizeof(cmd.type);
 
        switch (cmd.type) {
+       case IPATH_CMD_ASSIGN_PORT:
+       case __IPATH_CMD_USER_INIT:
        case IPATH_CMD_USER_INIT:
                copy = sizeof(cmd.cmd.user_info);
                dest = &cmd.cmd.user_info;
@@ -1662,51 +2284,128 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
                dest = &cmd.cmd.part_key;
                src = &ucmd->cmd.part_key;
                break;
+       case __IPATH_CMD_SLAVE_INFO:
+               copy = sizeof(cmd.cmd.slave_mask_addr);
+               dest = &cmd.cmd.slave_mask_addr;
+               src = &ucmd->cmd.slave_mask_addr;
+               break;
+       case IPATH_CMD_PIOAVAILUPD:     // force an update of PIOAvail reg
+               copy = 0;
+               src = NULL;
+               dest = NULL;
+               break;
+       case IPATH_CMD_POLL_TYPE:
+               copy = sizeof(cmd.cmd.poll_type);
+               dest = &cmd.cmd.poll_type;
+               src = &ucmd->cmd.poll_type;
+               break;
+       case IPATH_CMD_ARMLAUNCH_CTRL:
+               copy = sizeof(cmd.cmd.armlaunch_ctrl);
+               dest = &cmd.cmd.armlaunch_ctrl;
+               src = &ucmd->cmd.armlaunch_ctrl;
+               break;
+       case IPATH_CMD_SDMA_INFLIGHT:
+               copy = sizeof(cmd.cmd.sdma_inflight);
+               dest = &cmd.cmd.sdma_inflight;
+               src = &ucmd->cmd.sdma_inflight;
+               break;
+       case IPATH_CMD_SDMA_COMPLETE:
+               copy = sizeof(cmd.cmd.sdma_complete);
+               dest = &cmd.cmd.sdma_complete;
+               src = &ucmd->cmd.sdma_complete;
+               break;
        default:
                ret = -EINVAL;
                goto bail;
        }
 
-       if ((count - consumed) < copy) {
-               ret = -EINVAL;
-               goto bail;
-       }
+       if (copy) {
+               if ((count - consumed) < copy) {
+                       ret = -EINVAL;
+                       goto bail;
+               }
 
-       if (copy_from_user(dest, src, copy)) {
-               ret = -EFAULT;
-               goto bail;
+               if (copy_from_user(dest, src, copy)) {
+                       ret = -EFAULT;
+                       goto bail;
+               }
+
+               consumed += copy;
        }
 
-       consumed += copy;
        pd = port_fp(fp);
+       if (!pd && cmd.type != __IPATH_CMD_USER_INIT &&
+               cmd.type != IPATH_CMD_ASSIGN_PORT) {
+               ret = -EINVAL;
+               goto bail;
+       }
 
        switch (cmd.type) {
+       case IPATH_CMD_ASSIGN_PORT:
+               ret = ipath_assign_port(fp, &cmd.cmd.user_info);
+               if (ret)
+                       goto bail;
+               break;
+       case __IPATH_CMD_USER_INIT:
+               /* backwards compatibility, get port first */
+               ret = ipath_assign_port(fp, &cmd.cmd.user_info);
+               if (ret)
+                       goto bail;
+               /* and fall through to current version. */
        case IPATH_CMD_USER_INIT:
-               ret = ipath_do_user_init(pd, &cmd.cmd.user_info);
-               if (ret < 0)
+               ret = ipath_do_user_init(fp, &cmd.cmd.user_info);
+               if (ret)
                        goto bail;
                ret = ipath_get_base_info(
-                       pd, (void __user *) (unsigned long)
+                       fp, (void __user *) (unsigned long)
                        cmd.cmd.user_info.spu_base_info,
                        cmd.cmd.user_info.spu_base_info_size);
                break;
        case IPATH_CMD_RECV_CTRL:
-               ret = ipath_manage_rcvq(pd, cmd.cmd.recv_ctrl);
+               ret = ipath_manage_rcvq(pd, subport_fp(fp), cmd.cmd.recv_ctrl);
                break;
        case IPATH_CMD_PORT_INFO:
-               ret = ipath_port_info(pd,
+               ret = ipath_port_info(pd, subport_fp(fp),
                                      (struct ipath_port_info __user *)
                                      (unsigned long) cmd.cmd.port_info);
                break;
        case IPATH_CMD_TID_UPDATE:
-               ret = ipath_tid_update(pd, &cmd.cmd.tid_info);
+               ret = ipath_tid_update(pd, fp, &cmd.cmd.tid_info);
                break;
        case IPATH_CMD_TID_FREE:
-               ret = ipath_tid_free(pd, &cmd.cmd.tid_info);
+               ret = ipath_tid_free(pd, subport_fp(fp), &cmd.cmd.tid_info);
                break;
        case IPATH_CMD_SET_PART_KEY:
                ret = ipath_set_part_key(pd, cmd.cmd.part_key);
                break;
+       case __IPATH_CMD_SLAVE_INFO:
+               ret = ipath_get_slave_info(pd,
+                                          (void __user *) (unsigned long)
+                                          cmd.cmd.slave_mask_addr);
+               break;
+       case IPATH_CMD_PIOAVAILUPD:
+               ipath_force_pio_avail_update(pd->port_dd);
+               break;
+       case IPATH_CMD_POLL_TYPE:
+               pd->poll_type = cmd.cmd.poll_type;
+               break;
+       case IPATH_CMD_ARMLAUNCH_CTRL:
+               if (cmd.cmd.armlaunch_ctrl)
+                       ipath_enable_armlaunch(pd->port_dd);
+               else
+                       ipath_disable_armlaunch(pd->port_dd);
+               break;
+       case IPATH_CMD_SDMA_INFLIGHT:
+               ret = ipath_sdma_get_inflight(user_sdma_queue_fp(fp),
+                                             (u32 __user *) (unsigned long)
+                                             cmd.cmd.sdma_inflight);
+               break;
+       case IPATH_CMD_SDMA_COMPLETE:
+               ret = ipath_sdma_get_complete(pd->port_dd,
+                                             user_sdma_queue_fp(fp),
+                                             (u32 __user *) (unsigned long)
+                                             cmd.cmd.sdma_complete);
+               break;
        }
 
        if (ret >= 0)
@@ -1716,14 +2415,28 @@ bail:
        return ret;
 }
 
+static ssize_t ipath_writev(struct kiocb *iocb, const struct iovec *iov,
+                           unsigned long dim, loff_t off)
+{
+       struct file *filp = iocb->ki_filp;
+       struct ipath_filedata *fp = filp->private_data;
+       struct ipath_portdata *pd = port_fp(filp);
+       struct ipath_user_sdma_queue *pq = fp->pq;
+
+       if (!dim)
+               return -EINVAL;
+
+       return ipath_user_sdma_writev(pd->port_dd, pq, iov, dim);
+}
+
 static struct class *ipath_class;
 
-static int init_cdev(int minor, char *name, struct file_operations *fops,
-                    struct cdev **cdevp, struct class_device **class_devp)
+static int init_cdev(int minor, char *name, const struct file_operations *fops,
+                    struct cdev **cdevp, struct device **devp)
 {
        const dev_t dev = MKDEV(IPATH_MAJOR, minor);
        struct cdev *cdev = NULL;
-       struct class_device *class_dev = NULL;
+       struct device *device = NULL;
        int ret;
 
        cdev = cdev_alloc();
@@ -1747,12 +2460,12 @@ static int init_cdev(int minor, char *name, struct file_operations *fops,
                goto err_cdev;
        }
 
-       class_dev = class_device_create(ipath_class, NULL, dev, NULL, name);
+       device = device_create(ipath_class, NULL, dev, NULL, name);
 
-       if (IS_ERR(class_dev)) {
-               ret = PTR_ERR(class_dev);
+       if (IS_ERR(device)) {
+               ret = PTR_ERR(device);
                printk(KERN_ERR IPATH_DRV_NAME ": Could not create "
-                      "class_dev for minor %d, %s (err %d)\n",
+                      "device for minor %d, %s (err %d)\n",
                       minor, name, -ret);
                goto err_cdev;
        }
@@ -1766,29 +2479,29 @@ err_cdev:
 done:
        if (ret >= 0) {
                *cdevp = cdev;
-               *class_devp = class_dev;
+               *devp = device;
        } else {
                *cdevp = NULL;
-               *class_devp = NULL;
+               *devp = NULL;
        }
 
        return ret;
 }
 
-int ipath_cdev_init(int minor, char *name, struct file_operations *fops,
-                   struct cdev **cdevp, struct class_device **class_devp)
+int ipath_cdev_init(int minor, char *name, const struct file_operations *fops,
+                   struct cdev **cdevp, struct device **devp)
 {
-       return init_cdev(minor, name, fops, cdevp, class_devp);
+       return init_cdev(minor, name, fops, cdevp, devp);
 }
 
 static void cleanup_cdev(struct cdev **cdevp,
-                        struct class_device **class_devp)
+                        struct device **devp)
 {
-       struct class_device *class_dev = *class_devp;
+       struct device *dev = *devp;
 
-       if (class_dev) {
-               class_device_unregister(class_dev);
-               *class_devp = NULL;
+       if (dev) {
+               device_unregister(dev);
+               *devp = NULL;
        }
 
        if (*cdevp) {
@@ -1798,13 +2511,13 @@ static void cleanup_cdev(struct cdev **cdevp,
 }
 
 void ipath_cdev_cleanup(struct cdev **cdevp,
-                       struct class_device **class_devp)
+                       struct device **devp)
 {
-       cleanup_cdev(cdevp, class_devp);
+       cleanup_cdev(cdevp, devp);
 }
 
 static struct cdev *wildcard_cdev;
-static struct class_device *wildcard_class_dev;
+static struct device *wildcard_dev;
 
 static const dev_t dev = MKDEV(IPATH_MAJOR, 0);
 
@@ -1861,11 +2574,11 @@ int ipath_user_add(struct ipath_devdata *dd)
                        goto bail;
                }
                ret = init_cdev(0, "ipath", &ipath_file_ops, &wildcard_cdev,
-                               &wildcard_class_dev);
+                               &wildcard_dev);
                if (ret < 0) {
                        ipath_dev_err(dd, "Could not create wildcard "
                                      "minor: error %d\n", -ret);
-                       goto bail_sma;
+                       goto bail_user;
                }
 
                atomic_set(&user_setup, 1);
@@ -1874,14 +2587,14 @@ int ipath_user_add(struct ipath_devdata *dd)
        snprintf(name, sizeof(name), "ipath%d", dd->ipath_unit);
 
        ret = init_cdev(dd->ipath_unit + 1, name, &ipath_file_ops,
-                       &dd->user_cdev, &dd->user_class_dev);
+                       &dd->user_cdev, &dd->user_dev);
        if (ret < 0)
                ipath_dev_err(dd, "Could not create user minor %d, %s\n",
                              dd->ipath_unit + 1, name);
 
        goto bail;
 
-bail_sma:
+bail_user:
        user_cleanup();
 bail:
        return ret;
@@ -1889,13 +2602,13 @@ bail:
 
 void ipath_user_remove(struct ipath_devdata *dd)
 {
-       cleanup_cdev(&dd->user_cdev, &dd->user_class_dev);
+       cleanup_cdev(&dd->user_cdev, &dd->user_dev);
 
        if (atomic_dec_return(&user_count) == 0) {
                if (atomic_read(&user_setup) == 0)
                        goto bail;
 
-               cleanup_cdev(&wildcard_cdev, &wildcard_class_dev);
+               cleanup_cdev(&wildcard_cdev, &wildcard_dev);
                user_cleanup();
 
                atomic_set(&user_setup, 0);