blob: a55f5ac41bf3d3d0275d081ce675776aaed22b99 [file] [log] [blame]
Inki Dae1c248b72011-10-04 19:19:01 +09001/* exynos_drm_gem.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
5 *
Inki Daed81aecb2012-12-18 02:30:17 +09006 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
Inki Dae1c248b72011-10-04 19:19:01 +090010 */
11
David Howells760285e2012-10-02 18:01:07 +010012#include <drm/drmP.h>
David Herrmann0de23972013-07-24 21:07:52 +020013#include <drm/drm_vma_manager.h>
Inki Dae1c248b72011-10-04 19:19:01 +090014
Inki Dae2b358922012-03-16 18:47:05 +090015#include <linux/shmem_fs.h>
Joonyoung Shim01ed50d2015-08-16 14:33:08 +090016#include <linux/dma-buf.h>
Dan Williams01c8f1c2016-01-15 16:56:40 -080017#include <linux/pfn_t.h>
Inki Dae1c248b72011-10-04 19:19:01 +090018#include <drm/exynos_drm.h>
19
20#include "exynos_drm_drv.h"
21#include "exynos_drm_gem.h"
Inki Dae1c248b72011-10-04 19:19:01 +090022
Joonyoung Shim813fd67b2015-10-02 09:33:47 +090023static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090024{
Joonyoung Shim813fd67b2015-10-02 09:33:47 +090025 struct drm_device *dev = exynos_gem->base.dev;
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070026 unsigned long attr;
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090027 unsigned int nr_pages;
Marek Szyprowskidf547bf72015-10-13 13:47:20 +020028 struct sg_table sgt;
29 int ret = -ENOMEM;
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090030
Joonyoung Shim813fd67b2015-10-02 09:33:47 +090031 if (exynos_gem->dma_addr) {
Inki Dae6be90052019-04-15 16:25:12 +090032 DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "already allocated.\n");
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090033 return 0;
34 }
35
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070036 exynos_gem->dma_attrs = 0;
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090037
38 /*
39 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
40 * region will be allocated else physically contiguous
41 * as possible.
42 */
Joonyoung Shim813fd67b2015-10-02 09:33:47 +090043 if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070044 exynos_gem->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090045
46 /*
47 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
48 * else cachable mapping.
49 */
Joonyoung Shim813fd67b2015-10-02 09:33:47 +090050 if (exynos_gem->flags & EXYNOS_BO_WC ||
51 !(exynos_gem->flags & EXYNOS_BO_CACHABLE))
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090052 attr = DMA_ATTR_WRITE_COMBINE;
53 else
54 attr = DMA_ATTR_NON_CONSISTENT;
55
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070056 exynos_gem->dma_attrs |= attr;
57 exynos_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090058
Joonyoung Shim813fd67b2015-10-02 09:33:47 +090059 nr_pages = exynos_gem->size >> PAGE_SHIFT;
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090060
Michal Hocko20981052017-05-17 14:23:12 +020061 exynos_gem->pages = kvmalloc_array(nr_pages, sizeof(struct page *),
62 GFP_KERNEL | __GFP_ZERO);
Marek Szyprowskidf547bf72015-10-13 13:47:20 +020063 if (!exynos_gem->pages) {
Inki Dae6f83d202019-04-15 14:24:36 +090064 DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate pages.\n");
Marek Szyprowskidf547bf72015-10-13 13:47:20 +020065 return -ENOMEM;
Joonyoung Shim333e8e52015-09-16 14:29:34 +090066 }
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090067
Marek Szyprowskif43c3592016-02-29 17:50:53 +090068 exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size,
Joonyoung Shim813fd67b2015-10-02 09:33:47 +090069 &exynos_gem->dma_addr, GFP_KERNEL,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070070 exynos_gem->dma_attrs);
Joonyoung Shim813fd67b2015-10-02 09:33:47 +090071 if (!exynos_gem->cookie) {
Inki Dae6f83d202019-04-15 14:24:36 +090072 DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate buffer.\n");
Marek Szyprowskidf547bf72015-10-13 13:47:20 +020073 goto err_free;
Joonyoung Shim333e8e52015-09-16 14:29:34 +090074 }
75
Marek Szyprowskif43c3592016-02-29 17:50:53 +090076 ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt, exynos_gem->cookie,
Marek Szyprowskidf547bf72015-10-13 13:47:20 +020077 exynos_gem->dma_addr, exynos_gem->size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070078 exynos_gem->dma_attrs);
Marek Szyprowskidf547bf72015-10-13 13:47:20 +020079 if (ret < 0) {
Inki Dae6f83d202019-04-15 14:24:36 +090080 DRM_DEV_ERROR(to_dma_dev(dev), "failed to get sgtable.\n");
Marek Szyprowskidf547bf72015-10-13 13:47:20 +020081 goto err_dma_free;
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090082 }
83
Marek Szyprowskidf547bf72015-10-13 13:47:20 +020084 if (drm_prime_sg_to_page_addr_arrays(&sgt, exynos_gem->pages, NULL,
85 nr_pages)) {
Inki Dae6f83d202019-04-15 14:24:36 +090086 DRM_DEV_ERROR(to_dma_dev(dev), "invalid sgtable.\n");
Marek Szyprowskidf547bf72015-10-13 13:47:20 +020087 ret = -EINVAL;
88 goto err_sgt_free;
89 }
90
91 sg_free_table(&sgt);
92
Inki Dae6be90052019-04-15 16:25:12 +090093 DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "dma_addr(0x%lx), size(0x%lx)\n",
Joonyoung Shim813fd67b2015-10-02 09:33:47 +090094 (unsigned long)exynos_gem->dma_addr, exynos_gem->size);
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090095
96 return 0;
Marek Szyprowskidf547bf72015-10-13 13:47:20 +020097
98err_sgt_free:
99 sg_free_table(&sgt);
100err_dma_free:
Marek Szyprowskif43c3592016-02-29 17:50:53 +0900101 dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700102 exynos_gem->dma_addr, exynos_gem->dma_attrs);
Marek Szyprowskidf547bf72015-10-13 13:47:20 +0200103err_free:
Michal Hocko20981052017-05-17 14:23:12 +0200104 kvfree(exynos_gem->pages);
Marek Szyprowskidf547bf72015-10-13 13:47:20 +0200105
106 return ret;
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900107}
108
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900109static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900110{
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900111 struct drm_device *dev = exynos_gem->base.dev;
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900112
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900113 if (!exynos_gem->dma_addr) {
Inki Dae6be90052019-04-15 16:25:12 +0900114 DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr is invalid.\n");
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900115 return;
116 }
117
Inki Dae6be90052019-04-15 16:25:12 +0900118 DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr(0x%lx), size(0x%lx)\n",
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900119 (unsigned long)exynos_gem->dma_addr, exynos_gem->size);
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900120
Marek Szyprowskif43c3592016-02-29 17:50:53 +0900121 dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900122 (dma_addr_t)exynos_gem->dma_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700123 exynos_gem->dma_attrs);
Joonyoung Shim333e8e52015-09-16 14:29:34 +0900124
Michal Hocko20981052017-05-17 14:23:12 +0200125 kvfree(exynos_gem->pages);
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900126}
127
Joonyoung Shim23648392011-12-13 14:39:13 +0900128static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
129 struct drm_file *file_priv,
130 unsigned int *handle)
Inki Dae1c248b72011-10-04 19:19:01 +0900131{
Inki Dae1c248b72011-10-04 19:19:01 +0900132 int ret;
133
Inki Dae1c248b72011-10-04 19:19:01 +0900134 /*
135 * allocate a id of idr table where the obj is registered
136 * and handle has the id what user can see.
137 */
138 ret = drm_gem_handle_create(file_priv, obj, handle);
139 if (ret)
Joonyoung Shim23648392011-12-13 14:39:13 +0900140 return ret;
Inki Dae1c248b72011-10-04 19:19:01 +0900141
Inki Dae6be90052019-04-15 16:25:12 +0900142 DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "gem handle = 0x%x\n", *handle);
Inki Dae1c248b72011-10-04 19:19:01 +0900143
144 /* drop reference from allocate - handle holds it now. */
Thomas Zimmermannaf7d9102018-06-18 15:17:38 +0200145 drm_gem_object_put_unlocked(obj);
Inki Dae1c248b72011-10-04 19:19:01 +0900146
Joonyoung Shim23648392011-12-13 14:39:13 +0900147 return 0;
148}
Inki Dae1c248b72011-10-04 19:19:01 +0900149
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900150void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem)
Joonyoung Shim23648392011-12-13 14:39:13 +0900151{
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900152 struct drm_gem_object *obj = &exynos_gem->base;
Joonyoung Shim23648392011-12-13 14:39:13 +0900153
Inki Dae6be90052019-04-15 16:25:12 +0900154 DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "handle count = %d\n",
155 obj->handle_count);
Joonyoung Shim23648392011-12-13 14:39:13 +0900156
Inki Daec374e732012-06-12 16:52:54 +0900157 /*
158 * do not release memory region from exporter.
159 *
160 * the region will be released by exporter
161 * once dmabuf's refcount becomes 0.
162 */
163 if (obj->import_attach)
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900164 drm_prime_gem_destroy(obj, exynos_gem->sgt);
Joonyoung Shim7c935372015-09-16 14:14:54 +0900165 else
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900166 exynos_drm_free_buf(exynos_gem);
Inki Daec374e732012-06-12 16:52:54 +0900167
Joonyoung Shim23648392011-12-13 14:39:13 +0900168 /* release file pointer to gem object. */
Inki Dae1c248b72011-10-04 19:19:01 +0900169 drm_gem_object_release(obj);
170
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900171 kfree(exynos_gem);
Joonyoung Shim23648392011-12-13 14:39:13 +0900172}
173
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900174static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
175 unsigned long size)
Joonyoung Shim23648392011-12-13 14:39:13 +0900176{
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900177 struct exynos_drm_gem *exynos_gem;
Joonyoung Shim23648392011-12-13 14:39:13 +0900178 struct drm_gem_object *obj;
179 int ret;
180
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900181 exynos_gem = kzalloc(sizeof(*exynos_gem), GFP_KERNEL);
182 if (!exynos_gem)
Joonyoung Shim5f3f4262015-07-28 17:53:22 +0900183 return ERR_PTR(-ENOMEM);
Joonyoung Shim23648392011-12-13 14:39:13 +0900184
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900185 exynos_gem->size = size;
186 obj = &exynos_gem->base;
Joonyoung Shim23648392011-12-13 14:39:13 +0900187
188 ret = drm_gem_object_init(dev, obj, size);
189 if (ret < 0) {
Inki Dae6f83d202019-04-15 14:24:36 +0900190 DRM_DEV_ERROR(dev->dev, "failed to initialize gem object\n");
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900191 kfree(exynos_gem);
Joonyoung Shim5f3f4262015-07-28 17:53:22 +0900192 return ERR_PTR(ret);
Joonyoung Shim23648392011-12-13 14:39:13 +0900193 }
194
Joonyoung Shim48cf53f2015-07-28 17:53:23 +0900195 ret = drm_gem_create_mmap_offset(obj);
196 if (ret < 0) {
197 drm_gem_object_release(obj);
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900198 kfree(exynos_gem);
Joonyoung Shim48cf53f2015-07-28 17:53:23 +0900199 return ERR_PTR(ret);
200 }
201
Inki Dae6be90052019-04-15 16:25:12 +0900202 DRM_DEV_DEBUG_KMS(dev->dev, "created file object = %pK\n", obj->filp);
Joonyoung Shim23648392011-12-13 14:39:13 +0900203
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900204 return exynos_gem;
Inki Dae1c248b72011-10-04 19:19:01 +0900205}
206
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900207struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
208 unsigned int flags,
209 unsigned long size)
Inki Daef088d5a2011-11-12 14:51:23 +0900210{
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900211 struct exynos_drm_gem *exynos_gem;
Inki Dae2b358922012-03-16 18:47:05 +0900212 int ret;
Inki Daef088d5a2011-11-12 14:51:23 +0900213
Joonyoung Shimc4130bc2015-08-16 14:15:06 +0900214 if (flags & ~(EXYNOS_BO_MASK)) {
Inki Dae6f83d202019-04-15 14:24:36 +0900215 DRM_DEV_ERROR(dev->dev,
216 "invalid GEM buffer flags: %u\n", flags);
Joonyoung Shimc4130bc2015-08-16 14:15:06 +0900217 return ERR_PTR(-EINVAL);
218 }
219
Inki Daedcf9af82012-04-03 21:27:58 +0900220 if (!size) {
Inki Dae6f83d202019-04-15 14:24:36 +0900221 DRM_DEV_ERROR(dev->dev, "invalid GEM buffer size: %lu\n", size);
Inki Daedcf9af82012-04-03 21:27:58 +0900222 return ERR_PTR(-EINVAL);
223 }
Inki Daef088d5a2011-11-12 14:51:23 +0900224
Joonyoung Shimeb57da82015-07-28 17:53:27 +0900225 size = roundup(size, PAGE_SIZE);
Inki Daedcf9af82012-04-03 21:27:58 +0900226
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900227 exynos_gem = exynos_drm_gem_init(dev, size);
228 if (IS_ERR(exynos_gem))
229 return exynos_gem;
Inki Dae2b358922012-03-16 18:47:05 +0900230
Marek Szyprowski120a2642017-11-22 14:14:47 +0100231 if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) {
232 /*
233 * when no IOMMU is available, all allocated buffers are
234 * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag
235 */
236 flags &= ~EXYNOS_BO_NONCONTIG;
237 DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n");
238 }
239
Inki Dae2b358922012-03-16 18:47:05 +0900240 /* set memory type and cache attribute from user side. */
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900241 exynos_gem->flags = flags;
Inki Dae2b358922012-03-16 18:47:05 +0900242
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900243 ret = exynos_drm_alloc_buf(exynos_gem);
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900244 if (ret < 0) {
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900245 drm_gem_object_release(&exynos_gem->base);
246 kfree(exynos_gem);
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900247 return ERR_PTR(ret);
248 }
Inki Daef088d5a2011-11-12 14:51:23 +0900249
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900250 return exynos_gem;
Inki Daef088d5a2011-11-12 14:51:23 +0900251}
252
Inki Dae1c248b72011-10-04 19:19:01 +0900253int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900254 struct drm_file *file_priv)
Inki Dae1c248b72011-10-04 19:19:01 +0900255{
256 struct drm_exynos_gem_create *args = data;
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900257 struct exynos_drm_gem *exynos_gem;
Joonyoung Shim23648392011-12-13 14:39:13 +0900258 int ret;
Inki Dae1c248b72011-10-04 19:19:01 +0900259
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900260 exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size);
261 if (IS_ERR(exynos_gem))
262 return PTR_ERR(exynos_gem);
Inki Dae1c248b72011-10-04 19:19:01 +0900263
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900264 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
265 &args->handle);
Joonyoung Shim23648392011-12-13 14:39:13 +0900266 if (ret) {
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900267 exynos_drm_gem_destroy(exynos_gem);
Joonyoung Shim23648392011-12-13 14:39:13 +0900268 return ret;
269 }
270
Inki Dae1c248b72011-10-04 19:19:01 +0900271 return 0;
272}
273
Joonyoung Shim6564c652016-03-08 14:12:59 +0900274int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
275 struct drm_file *file_priv)
276{
277 struct drm_exynos_gem_map *args = data;
278
Noralf Trønnes4d12c2332017-08-06 17:41:02 +0200279 return drm_gem_dumb_map_offset(file_priv, dev, args->handle,
280 &args->offset);
Joonyoung Shim6564c652016-03-08 14:12:59 +0900281}
282
Marek Szyprowski3aa2a5c2018-07-09 15:44:31 +0200283struct exynos_drm_gem *exynos_drm_gem_get(struct drm_file *filp,
284 unsigned int gem_handle)
Inki Daef0b1bda2012-03-16 18:47:06 +0900285{
Inki Daef0b1bda2012-03-16 18:47:06 +0900286 struct drm_gem_object *obj;
287
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100288 obj = drm_gem_object_lookup(filp, gem_handle);
Marek Szyprowski3aa2a5c2018-07-09 15:44:31 +0200289 if (!obj)
290 return NULL;
291 return to_exynos_gem(obj);
Inki Daef0b1bda2012-03-16 18:47:06 +0900292}
293
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900294static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900295 struct vm_area_struct *vma)
Inki Dae1c248b72011-10-04 19:19:01 +0900296{
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900297 struct drm_device *drm_dev = exynos_gem->base.dev;
Inki Dae0519f9a2012-10-20 07:53:42 -0700298 unsigned long vm_size;
Inki Dae5b07c662012-11-08 21:52:54 +0900299 int ret;
Inki Dae1c248b72011-10-04 19:19:01 +0900300
Inki Dae832316c2014-09-18 14:19:01 +0900301 vma->vm_flags &= ~VM_PFNMAP;
302 vma->vm_pgoff = 0;
Inki Dae1c248b72011-10-04 19:19:01 +0900303
Inki Dae0519f9a2012-10-20 07:53:42 -0700304 vm_size = vma->vm_end - vma->vm_start;
Inki Dae2b358922012-03-16 18:47:05 +0900305
Inki Dae1c248b72011-10-04 19:19:01 +0900306 /* check if user-requested size is valid. */
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900307 if (vm_size > exynos_gem->size)
Inki Dae1c248b72011-10-04 19:19:01 +0900308 return -EINVAL;
309
Marek Szyprowskif43c3592016-02-29 17:50:53 +0900310 ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie,
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900311 exynos_gem->dma_addr, exynos_gem->size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700312 exynos_gem->dma_attrs);
Inki Dae5b07c662012-11-08 21:52:54 +0900313 if (ret < 0) {
314 DRM_ERROR("failed to mmap.\n");
315 return ret;
316 }
317
Inki Dae1c248b72011-10-04 19:19:01 +0900318 return 0;
319}
320
Inki Dae40cd7e02012-05-04 15:51:17 +0900321int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
322 struct drm_file *file_priv)
Joonyoung Shimb4cfd4d2015-09-16 14:29:35 +0900323{
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900324 struct exynos_drm_gem *exynos_gem;
Inki Dae40cd7e02012-05-04 15:51:17 +0900325 struct drm_exynos_gem_info *args = data;
326 struct drm_gem_object *obj;
327
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100328 obj = drm_gem_object_lookup(file_priv, args->handle);
Inki Dae40cd7e02012-05-04 15:51:17 +0900329 if (!obj) {
Inki Dae6f83d202019-04-15 14:24:36 +0900330 DRM_DEV_ERROR(dev->dev, "failed to lookup gem object.\n");
Inki Dae40cd7e02012-05-04 15:51:17 +0900331 return -EINVAL;
332 }
333
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900334 exynos_gem = to_exynos_gem(obj);
Inki Dae40cd7e02012-05-04 15:51:17 +0900335
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900336 args->flags = exynos_gem->flags;
337 args->size = exynos_gem->size;
Inki Dae40cd7e02012-05-04 15:51:17 +0900338
Thomas Zimmermannaf7d9102018-06-18 15:17:38 +0200339 drm_gem_object_put_unlocked(obj);
Inki Dae40cd7e02012-05-04 15:51:17 +0900340
341 return 0;
342}
343
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900344void exynos_drm_gem_free_object(struct drm_gem_object *obj)
Inki Dae1c248b72011-10-04 19:19:01 +0900345{
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900346 exynos_drm_gem_destroy(to_exynos_gem(obj));
Inki Dae1c248b72011-10-04 19:19:01 +0900347}
348
349int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900350 struct drm_device *dev,
351 struct drm_mode_create_dumb *args)
Inki Dae1c248b72011-10-04 19:19:01 +0900352{
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900353 struct exynos_drm_gem *exynos_gem;
Joonyoung Shim333e8e52015-09-16 14:29:34 +0900354 unsigned int flags;
Joonyoung Shim23648392011-12-13 14:39:13 +0900355 int ret;
Inki Dae1c248b72011-10-04 19:19:01 +0900356
Inki Dae1c248b72011-10-04 19:19:01 +0900357 /*
Masanari Iidac6b78bc2013-10-24 16:02:57 +0900358 * allocate memory to be used for framebuffer.
Inki Dae1c248b72011-10-04 19:19:01 +0900359 * - this callback would be called by user application
360 * with DRM_IOCTL_MODE_CREATE_DUMB command.
361 */
362
Cooper Yuan3fd6b692012-06-29 11:49:45 +0900363 args->pitch = args->width * ((args->bpp + 7) / 8);
Inki Dae7da59072012-08-17 15:24:03 +0900364 args->size = args->pitch * args->height;
Inki Dae1c248b72011-10-04 19:19:01 +0900365
Joonyoung Shim333e8e52015-09-16 14:29:34 +0900366 if (is_drm_iommu_supported(dev))
367 flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
368 else
369 flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
Vikas Sajjan3fec4532013-08-23 12:05:06 +0530370
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900371 exynos_gem = exynos_drm_gem_create(dev, flags, args->size);
372 if (IS_ERR(exynos_gem)) {
Rahul Sharma122beea2014-05-07 17:21:29 +0530373 dev_warn(dev->dev, "FB allocation failed.\n");
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900374 return PTR_ERR(exynos_gem);
Rahul Sharma122beea2014-05-07 17:21:29 +0530375 }
Inki Dae1c248b72011-10-04 19:19:01 +0900376
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900377 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
378 &args->handle);
Joonyoung Shim23648392011-12-13 14:39:13 +0900379 if (ret) {
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900380 exynos_drm_gem_destroy(exynos_gem);
Joonyoung Shim23648392011-12-13 14:39:13 +0900381 return ret;
382 }
383
Inki Dae1c248b72011-10-04 19:19:01 +0900384 return 0;
385}
386
Souptick Joarder8a8d9b22018-04-14 21:34:29 +0530387vm_fault_t exynos_drm_gem_fault(struct vm_fault *vmf)
Inki Dae1c248b72011-10-04 19:19:01 +0900388{
Dave Jiang11bac802017-02-24 14:56:41 -0800389 struct vm_area_struct *vma = vmf->vma;
Inki Dae1c248b72011-10-04 19:19:01 +0900390 struct drm_gem_object *obj = vma->vm_private_data;
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900391 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
Joonyoung Shim0e9a2ee2015-07-28 17:53:19 +0900392 unsigned long pfn;
Inki Dae1c248b72011-10-04 19:19:01 +0900393 pgoff_t page_offset;
Inki Dae1c248b72011-10-04 19:19:01 +0900394
Jan Kara1a29d852016-12-14 15:07:01 -0800395 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
Inki Dae1c248b72011-10-04 19:19:01 +0900396
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900397 if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) {
Joonyoung Shim0e9a2ee2015-07-28 17:53:19 +0900398 DRM_ERROR("invalid page offset\n");
Souptick Joarder8a8d9b22018-04-14 21:34:29 +0530399 return VM_FAULT_SIGBUS;
Joonyoung Shim0e9a2ee2015-07-28 17:53:19 +0900400 }
Inki Dae1c248b72011-10-04 19:19:01 +0900401
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900402 pfn = page_to_pfn(exynos_gem->pages[page_offset]);
Souptick Joarder8a8d9b22018-04-14 21:34:29 +0530403 return vmf_insert_mixed(vma, vmf->address,
404 __pfn_to_pfn_t(pfn, PFN_DEV));
Inki Dae1c248b72011-10-04 19:19:01 +0900405}
406
Joonyoung Shim5a0202f2016-04-22 16:30:47 +0900407static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj,
408 struct vm_area_struct *vma)
Inki Dae1c248b72011-10-04 19:19:01 +0900409{
Joonyoung Shim5a0202f2016-04-22 16:30:47 +0900410 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
Inki Dae1c248b72011-10-04 19:19:01 +0900411 int ret;
412
Inki Dae6be90052019-04-15 16:25:12 +0900413 DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "flags = 0x%x\n",
414 exynos_gem->flags);
Joonyoung Shim211b8872015-08-16 14:16:49 +0900415
416 /* non-cachable as default. */
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900417 if (exynos_gem->flags & EXYNOS_BO_CACHABLE)
Joonyoung Shim211b8872015-08-16 14:16:49 +0900418 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900419 else if (exynos_gem->flags & EXYNOS_BO_WC)
Joonyoung Shim211b8872015-08-16 14:16:49 +0900420 vma->vm_page_prot =
421 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
422 else
423 vma->vm_page_prot =
424 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
Inki Daec01d73fa2012-04-23 19:26:34 +0900425
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900426 ret = exynos_drm_gem_mmap_buffer(exynos_gem, vma);
Inki Dae832316c2014-09-18 14:19:01 +0900427 if (ret)
428 goto err_close_vm;
429
430 return ret;
431
432err_close_vm:
433 drm_gem_vm_close(vma);
Inki Dae832316c2014-09-18 14:19:01 +0900434
Inki Dae1c248b72011-10-04 19:19:01 +0900435 return ret;
436}
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900437
Joonyoung Shim5a0202f2016-04-22 16:30:47 +0900438int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
439{
440 struct drm_gem_object *obj;
441 int ret;
442
443 /* set vm_area_struct. */
444 ret = drm_gem_mmap(filp, vma);
445 if (ret < 0) {
446 DRM_ERROR("failed to mmap.\n");
447 return ret;
448 }
449
450 obj = vma->vm_private_data;
451
Joonyoung Shim55b19fc2016-04-22 16:30:48 +0900452 if (obj->import_attach)
453 return dma_buf_mmap(obj->dma_buf, vma, 0);
454
Joonyoung Shim5a0202f2016-04-22 16:30:47 +0900455 return exynos_drm_gem_mmap_obj(obj, vma);
456}
457
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900458/* low-level interface prime helpers */
Marek Szyprowski89452d42017-10-30 08:28:09 +0100459struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
460 struct dma_buf *dma_buf)
461{
462 return drm_gem_prime_import_dev(dev, dma_buf, to_dma_dev(dev));
463}
464
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900465struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
466{
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900467 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900468 int npages;
469
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900470 npages = exynos_gem->size >> PAGE_SHIFT;
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900471
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900472 return drm_prime_pages_to_sg(exynos_gem->pages, npages);
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900473}
474
475struct drm_gem_object *
476exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
477 struct dma_buf_attachment *attach,
478 struct sg_table *sgt)
479{
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900480 struct exynos_drm_gem *exynos_gem;
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900481 int npages;
482 int ret;
483
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900484 exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
485 if (IS_ERR(exynos_gem)) {
486 ret = PTR_ERR(exynos_gem);
Inki Dae50002d42015-08-31 01:11:53 +0900487 return ERR_PTR(ret);
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900488 }
489
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900490 exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900491
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900492 npages = exynos_gem->size >> PAGE_SHIFT;
Michal Hocko20981052017-05-17 14:23:12 +0200493 exynos_gem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900494 if (!exynos_gem->pages) {
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900495 ret = -ENOMEM;
496 goto err;
497 }
498
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900499 ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem->pages, NULL,
500 npages);
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900501 if (ret < 0)
502 goto err_free_large;
503
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900504 exynos_gem->sgt = sgt;
Joonyoung Shim7c935372015-09-16 14:14:54 +0900505
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900506 if (sgt->nents == 1) {
507 /* always physically continuous memory if sgt->nents is 1. */
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900508 exynos_gem->flags |= EXYNOS_BO_CONTIG;
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900509 } else {
510 /*
511 * this case could be CONTIG or NONCONTIG type but for now
512 * sets NONCONTIG.
513 * TODO. we have to find a way that exporter can notify
514 * the type of its own buffer to importer.
515 */
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900516 exynos_gem->flags |= EXYNOS_BO_NONCONTIG;
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900517 }
518
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900519 return &exynos_gem->base;
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900520
521err_free_large:
Michal Hocko20981052017-05-17 14:23:12 +0200522 kvfree(exynos_gem->pages);
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900523err:
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900524 drm_gem_object_release(&exynos_gem->base);
525 kfree(exynos_gem);
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900526 return ERR_PTR(ret);
527}
528
529void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
530{
531 return NULL;
532}
533
534void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
535{
536 /* Nothing to do */
537}
Joonyoung Shim5a0202f2016-04-22 16:30:47 +0900538
539int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj,
540 struct vm_area_struct *vma)
541{
542 int ret;
543
544 ret = drm_gem_mmap_obj(obj, obj->size, vma);
545 if (ret < 0)
546 return ret;
547
548 return exynos_drm_gem_mmap_obj(obj, vma);
549}