blob: 7e8fdb1859dd9adcc8759438585cbec92f830df3 [file] [log] [blame]
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/dma-buf.h>
19#include "etnaviv_drv.h"
20#include "etnaviv_gem.h"
21
22
23struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj)
24{
25 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
26
27 BUG_ON(!etnaviv_obj->sgt); /* should have already pinned! */
28
29 return etnaviv_obj->sgt;
30}
31
32void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj)
33{
Lucas Stachce3088f2016-01-26 18:10:32 +010034 return etnaviv_gem_vmap(obj);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +010035}
36
37void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
38{
39 /* TODO msm_gem_vunmap() */
40}
41
Lucas Stach5688e572016-11-18 15:15:16 +010042int etnaviv_gem_prime_mmap(struct drm_gem_object *obj,
43 struct vm_area_struct *vma)
44{
45 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
46 int ret;
47
48 ret = drm_gem_mmap_obj(obj, obj->size, vma);
49 if (ret < 0)
50 return ret;
51
52 return etnaviv_obj->ops->mmap(etnaviv_obj, vma);
53}
54
The etnaviv authorsa8c21a52015-12-03 18:21:29 +010055int etnaviv_gem_prime_pin(struct drm_gem_object *obj)
56{
57 if (!obj->import_attach) {
58 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
59
60 mutex_lock(&etnaviv_obj->lock);
61 etnaviv_gem_get_pages(etnaviv_obj);
62 mutex_unlock(&etnaviv_obj->lock);
63 }
64 return 0;
65}
66
67void etnaviv_gem_prime_unpin(struct drm_gem_object *obj)
68{
69 if (!obj->import_attach) {
70 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
71
72 mutex_lock(&etnaviv_obj->lock);
73 etnaviv_gem_put_pages(to_etnaviv_bo(obj));
74 mutex_unlock(&etnaviv_obj->lock);
75 }
76}
77
78static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
79{
80 if (etnaviv_obj->vaddr)
81 dma_buf_vunmap(etnaviv_obj->base.import_attach->dmabuf,
82 etnaviv_obj->vaddr);
83
84 /* Don't drop the pages for imported dmabuf, as they are not
85 * ours, just free the array we allocated:
86 */
87 if (etnaviv_obj->pages)
88 drm_free_large(etnaviv_obj->pages);
89
90 drm_prime_gem_destroy(&etnaviv_obj->base, etnaviv_obj->sgt);
91}
92
Lucas Stacha0a5ab32016-01-25 15:47:28 +010093static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj)
94{
95 lockdep_assert_held(&etnaviv_obj->lock);
96
97 return dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf);
98}
99
Lucas Stacha10e2bde2016-04-27 12:27:02 +0200100static int etnaviv_gem_prime_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
101 struct vm_area_struct *vma)
102{
103 return dma_buf_mmap(etnaviv_obj->base.dma_buf, vma, 0);
104}
105
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100106static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = {
107 /* .get_pages should never be called */
108 .release = etnaviv_gem_prime_release,
Lucas Stacha0a5ab32016-01-25 15:47:28 +0100109 .vmap = etnaviv_gem_prime_vmap_impl,
Lucas Stacha10e2bde2016-04-27 12:27:02 +0200110 .mmap = etnaviv_gem_prime_mmap_obj,
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100111};
112
113struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
114 struct dma_buf_attachment *attach, struct sg_table *sgt)
115{
116 struct etnaviv_gem_object *etnaviv_obj;
117 size_t size = PAGE_ALIGN(attach->dmabuf->size);
118 int ret, npages;
119
120 ret = etnaviv_gem_new_private(dev, size, ETNA_BO_WC,
121 attach->dmabuf->resv,
122 &etnaviv_gem_prime_ops, &etnaviv_obj);
123 if (ret < 0)
124 return ERR_PTR(ret);
125
126 npages = size / PAGE_SIZE;
127
128 etnaviv_obj->sgt = sgt;
129 etnaviv_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
130 if (!etnaviv_obj->pages) {
131 ret = -ENOMEM;
132 goto fail;
133 }
134
135 ret = drm_prime_sg_to_page_addr_arrays(sgt, etnaviv_obj->pages,
136 NULL, npages);
137 if (ret)
138 goto fail;
139
140 ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
141 if (ret)
142 goto fail;
143
144 return &etnaviv_obj->base;
145
146fail:
147 drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
148
149 return ERR_PTR(ret);
150}