Merge commit 'v3.4.5' into android-t114-3.4-rebased
[linux-2.6.git] / drivers / base / dma-buf.c
index 965833a..7cfb405 100644 (file)
@@ -44,8 +44,26 @@ static int dma_buf_release(struct inode *inode, struct file *file)
        return 0;
 }
 
+static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
+{
+       struct dma_buf *dmabuf;
+
+       if (!is_dma_buf_file(file))
+               return -EINVAL;
+
+       dmabuf = file->private_data;
+
+       /* check for overflowing the buffer's size */
+       if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
+           dmabuf->size >> PAGE_SHIFT)
+               return -EINVAL;
+
+       return dmabuf->ops->mmap(dmabuf, vma);
+}
+
 static const struct file_operations dma_buf_fops = {
        .release        = dma_buf_release,
+       .mmap           = dma_buf_mmap_internal,
 };
 
 /*
@@ -80,7 +98,10 @@ struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops,
        if (WARN_ON(!priv || !ops
                          || !ops->map_dma_buf
                          || !ops->unmap_dma_buf
-                         || !ops->release)) {
+                         || !ops->release
+                         || !ops->kmap_atomic
+                         || !ops->kmap
+                         || !ops->mmap)) {
                return ERR_PTR(-EINVAL);
        }
 
@@ -107,17 +128,18 @@ EXPORT_SYMBOL_GPL(dma_buf_export);
 /**
  * dma_buf_fd - returns a file descriptor for the given dma_buf
  * @dmabuf:    [in]    pointer to dma_buf for which fd is required.
+ * @flags:      [in]    flags to give to fd
  *
  * On success, returns an associated 'fd'. Else, returns error.
  */
-int dma_buf_fd(struct dma_buf *dmabuf)
+int dma_buf_fd(struct dma_buf *dmabuf, int flags)
 {
        int error, fd;
 
        if (!dmabuf || !dmabuf->file)
                return -EINVAL;
 
-       error = get_unused_fd();
+       error = get_unused_fd_flags(flags);
        if (error < 0)
                return error;
        fd = error;
@@ -185,17 +207,18 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
        struct dma_buf_attachment *attach;
        int ret;
 
-       if (WARN_ON(!dmabuf || !dev || !dmabuf->ops))
+       if (WARN_ON(!dmabuf || !dev))
                return ERR_PTR(-EINVAL);
 
        attach = kzalloc(sizeof(struct dma_buf_attachment), GFP_KERNEL);
        if (attach == NULL)
-               goto err_alloc;
-
-       mutex_lock(&dmabuf->lock);
+               return ERR_PTR(-ENOMEM);
 
        attach->dev = dev;
        attach->dmabuf = dmabuf;
+
+       mutex_lock(&dmabuf->lock);
+
        if (dmabuf->ops->attach) {
                ret = dmabuf->ops->attach(dmabuf, dev, attach);
                if (ret)
@@ -206,8 +229,6 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
        mutex_unlock(&dmabuf->lock);
        return attach;
 
-err_alloc:
-       return ERR_PTR(-ENOMEM);
 err_attach:
        kfree(attach);
        mutex_unlock(&dmabuf->lock);
@@ -224,7 +245,7 @@ EXPORT_SYMBOL_GPL(dma_buf_attach);
  */
 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
 {
-       if (WARN_ON(!dmabuf || !attach || !dmabuf->ops))
+       if (WARN_ON(!dmabuf || !attach))
                return;
 
        mutex_lock(&dmabuf->lock);
@@ -255,13 +276,10 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
 
        might_sleep();
 
-       if (WARN_ON(!attach || !attach->dmabuf || !attach->dmabuf->ops))
+       if (WARN_ON(!attach || !attach->dmabuf))
                return ERR_PTR(-EINVAL);
 
-       mutex_lock(&attach->dmabuf->lock);
-       if (attach->dmabuf->ops->map_dma_buf)
-               sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
-       mutex_unlock(&attach->dmabuf->lock);
+       sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
 
        return sg_table;
 }
@@ -273,19 +291,180 @@ EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
  * dma_buf_ops.
  * @attach:    [in]    attachment to unmap buffer from
  * @sg_table:  [in]    scatterlist info of the buffer to unmap
+ * @direction:  [in]    direction of DMA transfer
  *
  */
 void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
-                               struct sg_table *sg_table)
+                               struct sg_table *sg_table,
+                               enum dma_data_direction direction)
 {
-       if (WARN_ON(!attach || !attach->dmabuf || !sg_table
-                           || !attach->dmabuf->ops))
+       if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
                return;
 
-       mutex_lock(&attach->dmabuf->lock);
-       if (attach->dmabuf->ops->unmap_dma_buf)
-               attach->dmabuf->ops->unmap_dma_buf(attach, sg_table);
-       mutex_unlock(&attach->dmabuf->lock);
-
+       attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
+                                               direction);
 }
 EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
+
+
+/**
+ * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
+ * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
+ * preparations. Coherency is only guaranteed in the specified range for the
+ * specified access direction.
+ * @dma_buf:   [in]    buffer to prepare cpu access for.
+ * @start:     [in]    start of range for cpu access.
+ * @len:       [in]    length of range for cpu access.
+ * @direction: [in]    length of range for cpu access.
+ *
+ * Can return negative error values, returns 0 on success.
+ */
+int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
+                            enum dma_data_direction direction)
+{
+       int ret = 0;
+
+       if (WARN_ON(!dmabuf))
+               return -EINVAL;
+
+       if (dmabuf->ops->begin_cpu_access)
+               ret = dmabuf->ops->begin_cpu_access(dmabuf, start, len, direction);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
+
+/**
+ * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
+ * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
+ * actions. Coherency is only guaranteed in the specified range for the
+ * specified access direction.
+ * @dma_buf:   [in]    buffer to complete cpu access for.
+ * @start:     [in]    start of range for cpu access.
+ * @len:       [in]    length of range for cpu access.
+ * @direction: [in]    length of range for cpu access.
+ *
+ * This call must always succeed.
+ */
+void dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
+                           enum dma_data_direction direction)
+{
+       WARN_ON(!dmabuf);
+
+       if (dmabuf->ops->end_cpu_access)
+               dmabuf->ops->end_cpu_access(dmabuf, start, len, direction);
+}
+EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
+
+/**
+ * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address
+ * space. The same restrictions as for kmap_atomic and friends apply.
+ * @dma_buf:   [in]    buffer to map page from.
+ * @page_num:  [in]    page in PAGE_SIZE units to map.
+ *
+ * This call must always succeed, any necessary preparations that might fail
+ * need to be done in begin_cpu_access.
+ */
+void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num)
+{
+       WARN_ON(!dmabuf);
+
+       return dmabuf->ops->kmap_atomic(dmabuf, page_num);
+}
+EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic);
+
+/**
+ * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic.
+ * @dma_buf:   [in]    buffer to unmap page from.
+ * @page_num:  [in]    page in PAGE_SIZE units to unmap.
+ * @vaddr:     [in]    kernel space pointer obtained from dma_buf_kmap_atomic.
+ *
+ * This call must always succeed.
+ */
+void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num,
+                          void *vaddr)
+{
+       WARN_ON(!dmabuf);
+
+       if (dmabuf->ops->kunmap_atomic)
+               dmabuf->ops->kunmap_atomic(dmabuf, page_num, vaddr);
+}
+EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic);
+
+/**
+ * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
+ * same restrictions as for kmap and friends apply.
+ * @dma_buf:   [in]    buffer to map page from.
+ * @page_num:  [in]    page in PAGE_SIZE units to map.
+ *
+ * This call must always succeed, any necessary preparations that might fail
+ * need to be done in begin_cpu_access.
+ */
+void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
+{
+       WARN_ON(!dmabuf);
+
+       return dmabuf->ops->kmap(dmabuf, page_num);
+}
+EXPORT_SYMBOL_GPL(dma_buf_kmap);
+
+/**
+ * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap.
+ * @dma_buf:   [in]    buffer to unmap page from.
+ * @page_num:  [in]    page in PAGE_SIZE units to unmap.
+ * @vaddr:     [in]    kernel space pointer obtained from dma_buf_kmap.
+ *
+ * This call must always succeed.
+ */
+void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
+                   void *vaddr)
+{
+       WARN_ON(!dmabuf);
+
+       if (dmabuf->ops->kunmap)
+               dmabuf->ops->kunmap(dmabuf, page_num, vaddr);
+}
+EXPORT_SYMBOL_GPL(dma_buf_kunmap);
+
+
+/**
+ * dma_buf_mmap - Setup up a userspace mmap with the given vma
+ * @dma_buf:   [in]    buffer that should back the vma
+ * @vma:       [in]    vma for the mmap
+ * @pgoff:     [in]    offset in pages where this mmap should start within the
+ *                     dma-buf buffer.
+ *
+ * This function adjusts the passed in vma so that it points at the file of the
+ * dma_buf operation. It alsog adjusts the starting pgoff and does bounds
+ * checking on the size of the vma. Then it calls the exporters mmap function to
+ * set up the mapping.
+ *
+ * Can return negative error values, returns 0 on success.
+ */
+int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
+                unsigned long pgoff)
+{
+       if (WARN_ON(!dmabuf || !vma))
+               return -EINVAL;
+
+       /* check for offset overflow */
+       if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff)
+               return -EOVERFLOW;
+
+       /* check for overflowing the buffer's size */
+       if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
+           dmabuf->size >> PAGE_SHIFT)
+               return -EINVAL;
+
+       /* readjust the vma */
+       if (vma->vm_file)
+               fput(vma->vm_file);
+
+       vma->vm_file = dmabuf->file;
+       get_file(vma->vm_file);
+
+       vma->vm_pgoff = pgoff;
+
+       return dmabuf->ops->mmap(dmabuf, vma);
+}
+EXPORT_SYMBOL_GPL(dma_buf_mmap);