blob: 7b4883b1e29c3a420474847b06e2447ea638ffc4 [file] [log] [blame]
Inki Dae1c248b72011-10-04 19:19:01 +09001/* exynos_drm_gem.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
5 *
Inki Daed81aecb2012-12-18 02:30:17 +09006 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
Inki Dae1c248b72011-10-04 19:19:01 +090010 */
11
David Howells760285e2012-10-02 18:01:07 +010012#include <drm/drmP.h>
David Herrmann0de23972013-07-24 21:07:52 +020013#include <drm/drm_vma_manager.h>
Inki Dae1c248b72011-10-04 19:19:01 +090014
Inki Dae2b358922012-03-16 18:47:05 +090015#include <linux/shmem_fs.h>
Joonyoung Shim01ed50d2015-08-16 14:33:08 +090016#include <linux/dma-buf.h>
Dan Williams01c8f1c2016-01-15 16:56:40 -080017#include <linux/pfn_t.h>
Inki Dae1c248b72011-10-04 19:19:01 +090018#include <drm/exynos_drm.h>
19
20#include "exynos_drm_drv.h"
21#include "exynos_drm_gem.h"
Inki Dae1c248b72011-10-04 19:19:01 +090022
Joonyoung Shim813fd67b2015-10-02 09:33:47 +090023static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090024{
Joonyoung Shim813fd67b2015-10-02 09:33:47 +090025 struct drm_device *dev = exynos_gem->base.dev;
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070026 unsigned long attr;
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090027 unsigned int nr_pages;
Marek Szyprowskidf547bf72015-10-13 13:47:20 +020028 struct sg_table sgt;
29 int ret = -ENOMEM;
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090030
Joonyoung Shim813fd67b2015-10-02 09:33:47 +090031 if (exynos_gem->dma_addr) {
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090032 DRM_DEBUG_KMS("already allocated.\n");
33 return 0;
34 }
35
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070036 exynos_gem->dma_attrs = 0;
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090037
38 /*
39 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
40 * region will be allocated else physically contiguous
41 * as possible.
42 */
Joonyoung Shim813fd67b2015-10-02 09:33:47 +090043 if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070044 exynos_gem->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090045
46 /*
47 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
48 * else cachable mapping.
49 */
Joonyoung Shim813fd67b2015-10-02 09:33:47 +090050 if (exynos_gem->flags & EXYNOS_BO_WC ||
51 !(exynos_gem->flags & EXYNOS_BO_CACHABLE))
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090052 attr = DMA_ATTR_WRITE_COMBINE;
53 else
54 attr = DMA_ATTR_NON_CONSISTENT;
55
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070056 exynos_gem->dma_attrs |= attr;
57 exynos_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090058
Joonyoung Shim813fd67b2015-10-02 09:33:47 +090059 nr_pages = exynos_gem->size >> PAGE_SHIFT;
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090060
Michal Hocko20981052017-05-17 14:23:12 +020061 exynos_gem->pages = kvmalloc_array(nr_pages, sizeof(struct page *),
62 GFP_KERNEL | __GFP_ZERO);
Marek Szyprowskidf547bf72015-10-13 13:47:20 +020063 if (!exynos_gem->pages) {
Inki Dae6f83d202019-04-15 14:24:36 +090064 DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate pages.\n");
Marek Szyprowskidf547bf72015-10-13 13:47:20 +020065 return -ENOMEM;
Joonyoung Shim333e8e52015-09-16 14:29:34 +090066 }
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090067
Marek Szyprowskif43c3592016-02-29 17:50:53 +090068 exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size,
Joonyoung Shim813fd67b2015-10-02 09:33:47 +090069 &exynos_gem->dma_addr, GFP_KERNEL,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070070 exynos_gem->dma_attrs);
Joonyoung Shim813fd67b2015-10-02 09:33:47 +090071 if (!exynos_gem->cookie) {
Inki Dae6f83d202019-04-15 14:24:36 +090072 DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate buffer.\n");
Marek Szyprowskidf547bf72015-10-13 13:47:20 +020073 goto err_free;
Joonyoung Shim333e8e52015-09-16 14:29:34 +090074 }
75
Marek Szyprowskif43c3592016-02-29 17:50:53 +090076 ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt, exynos_gem->cookie,
Marek Szyprowskidf547bf72015-10-13 13:47:20 +020077 exynos_gem->dma_addr, exynos_gem->size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070078 exynos_gem->dma_attrs);
Marek Szyprowskidf547bf72015-10-13 13:47:20 +020079 if (ret < 0) {
Inki Dae6f83d202019-04-15 14:24:36 +090080 DRM_DEV_ERROR(to_dma_dev(dev), "failed to get sgtable.\n");
Marek Szyprowskidf547bf72015-10-13 13:47:20 +020081 goto err_dma_free;
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090082 }
83
Marek Szyprowskidf547bf72015-10-13 13:47:20 +020084 if (drm_prime_sg_to_page_addr_arrays(&sgt, exynos_gem->pages, NULL,
85 nr_pages)) {
Inki Dae6f83d202019-04-15 14:24:36 +090086 DRM_DEV_ERROR(to_dma_dev(dev), "invalid sgtable.\n");
Marek Szyprowskidf547bf72015-10-13 13:47:20 +020087 ret = -EINVAL;
88 goto err_sgt_free;
89 }
90
91 sg_free_table(&sgt);
92
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090093 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
Joonyoung Shim813fd67b2015-10-02 09:33:47 +090094 (unsigned long)exynos_gem->dma_addr, exynos_gem->size);
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090095
96 return 0;
Marek Szyprowskidf547bf72015-10-13 13:47:20 +020097
98err_sgt_free:
99 sg_free_table(&sgt);
100err_dma_free:
Marek Szyprowskif43c3592016-02-29 17:50:53 +0900101 dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700102 exynos_gem->dma_addr, exynos_gem->dma_attrs);
Marek Szyprowskidf547bf72015-10-13 13:47:20 +0200103err_free:
Michal Hocko20981052017-05-17 14:23:12 +0200104 kvfree(exynos_gem->pages);
Marek Szyprowskidf547bf72015-10-13 13:47:20 +0200105
106 return ret;
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900107}
108
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900109static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900110{
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900111 struct drm_device *dev = exynos_gem->base.dev;
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900112
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900113 if (!exynos_gem->dma_addr) {
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900114 DRM_DEBUG_KMS("dma_addr is invalid.\n");
115 return;
116 }
117
118 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900119 (unsigned long)exynos_gem->dma_addr, exynos_gem->size);
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900120
Marek Szyprowskif43c3592016-02-29 17:50:53 +0900121 dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900122 (dma_addr_t)exynos_gem->dma_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700123 exynos_gem->dma_attrs);
Joonyoung Shim333e8e52015-09-16 14:29:34 +0900124
Michal Hocko20981052017-05-17 14:23:12 +0200125 kvfree(exynos_gem->pages);
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900126}
127
Joonyoung Shim23648392011-12-13 14:39:13 +0900128static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
129 struct drm_file *file_priv,
130 unsigned int *handle)
Inki Dae1c248b72011-10-04 19:19:01 +0900131{
Inki Dae1c248b72011-10-04 19:19:01 +0900132 int ret;
133
Inki Dae1c248b72011-10-04 19:19:01 +0900134 /*
135 * allocate a id of idr table where the obj is registered
136 * and handle has the id what user can see.
137 */
138 ret = drm_gem_handle_create(file_priv, obj, handle);
139 if (ret)
Joonyoung Shim23648392011-12-13 14:39:13 +0900140 return ret;
Inki Dae1c248b72011-10-04 19:19:01 +0900141
142 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
143
144 /* drop reference from allocate - handle holds it now. */
Thomas Zimmermannaf7d9102018-06-18 15:17:38 +0200145 drm_gem_object_put_unlocked(obj);
Inki Dae1c248b72011-10-04 19:19:01 +0900146
Joonyoung Shim23648392011-12-13 14:39:13 +0900147 return 0;
148}
Inki Dae1c248b72011-10-04 19:19:01 +0900149
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900150void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem)
Joonyoung Shim23648392011-12-13 14:39:13 +0900151{
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900152 struct drm_gem_object *obj = &exynos_gem->base;
Joonyoung Shim23648392011-12-13 14:39:13 +0900153
Daniel Vettera8e11d12013-08-15 00:02:37 +0200154 DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
Joonyoung Shim23648392011-12-13 14:39:13 +0900155
Inki Daec374e732012-06-12 16:52:54 +0900156 /*
157 * do not release memory region from exporter.
158 *
159 * the region will be released by exporter
160 * once dmabuf's refcount becomes 0.
161 */
162 if (obj->import_attach)
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900163 drm_prime_gem_destroy(obj, exynos_gem->sgt);
Joonyoung Shim7c935372015-09-16 14:14:54 +0900164 else
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900165 exynos_drm_free_buf(exynos_gem);
Inki Daec374e732012-06-12 16:52:54 +0900166
Joonyoung Shim23648392011-12-13 14:39:13 +0900167 /* release file pointer to gem object. */
Inki Dae1c248b72011-10-04 19:19:01 +0900168 drm_gem_object_release(obj);
169
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900170 kfree(exynos_gem);
Joonyoung Shim23648392011-12-13 14:39:13 +0900171}
172
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900173static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
174 unsigned long size)
Joonyoung Shim23648392011-12-13 14:39:13 +0900175{
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900176 struct exynos_drm_gem *exynos_gem;
Joonyoung Shim23648392011-12-13 14:39:13 +0900177 struct drm_gem_object *obj;
178 int ret;
179
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900180 exynos_gem = kzalloc(sizeof(*exynos_gem), GFP_KERNEL);
181 if (!exynos_gem)
Joonyoung Shim5f3f4262015-07-28 17:53:22 +0900182 return ERR_PTR(-ENOMEM);
Joonyoung Shim23648392011-12-13 14:39:13 +0900183
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900184 exynos_gem->size = size;
185 obj = &exynos_gem->base;
Joonyoung Shim23648392011-12-13 14:39:13 +0900186
187 ret = drm_gem_object_init(dev, obj, size);
188 if (ret < 0) {
Inki Dae6f83d202019-04-15 14:24:36 +0900189 DRM_DEV_ERROR(dev->dev, "failed to initialize gem object\n");
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900190 kfree(exynos_gem);
Joonyoung Shim5f3f4262015-07-28 17:53:22 +0900191 return ERR_PTR(ret);
Joonyoung Shim23648392011-12-13 14:39:13 +0900192 }
193
Joonyoung Shim48cf53f2015-07-28 17:53:23 +0900194 ret = drm_gem_create_mmap_offset(obj);
195 if (ret < 0) {
196 drm_gem_object_release(obj);
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900197 kfree(exynos_gem);
Joonyoung Shim48cf53f2015-07-28 17:53:23 +0900198 return ERR_PTR(ret);
199 }
200
Krzysztof Kozlowski9cdf0ed2017-03-14 20:38:04 +0200201 DRM_DEBUG_KMS("created file object = %pK\n", obj->filp);
Joonyoung Shim23648392011-12-13 14:39:13 +0900202
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900203 return exynos_gem;
Inki Dae1c248b72011-10-04 19:19:01 +0900204}
205
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900206struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
207 unsigned int flags,
208 unsigned long size)
Inki Daef088d5a2011-11-12 14:51:23 +0900209{
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900210 struct exynos_drm_gem *exynos_gem;
Inki Dae2b358922012-03-16 18:47:05 +0900211 int ret;
Inki Daef088d5a2011-11-12 14:51:23 +0900212
Joonyoung Shimc4130bc2015-08-16 14:15:06 +0900213 if (flags & ~(EXYNOS_BO_MASK)) {
Inki Dae6f83d202019-04-15 14:24:36 +0900214 DRM_DEV_ERROR(dev->dev,
215 "invalid GEM buffer flags: %u\n", flags);
Joonyoung Shimc4130bc2015-08-16 14:15:06 +0900216 return ERR_PTR(-EINVAL);
217 }
218
Inki Daedcf9af82012-04-03 21:27:58 +0900219 if (!size) {
Inki Dae6f83d202019-04-15 14:24:36 +0900220 DRM_DEV_ERROR(dev->dev, "invalid GEM buffer size: %lu\n", size);
Inki Daedcf9af82012-04-03 21:27:58 +0900221 return ERR_PTR(-EINVAL);
222 }
Inki Daef088d5a2011-11-12 14:51:23 +0900223
Joonyoung Shimeb57da82015-07-28 17:53:27 +0900224 size = roundup(size, PAGE_SIZE);
Inki Daedcf9af82012-04-03 21:27:58 +0900225
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900226 exynos_gem = exynos_drm_gem_init(dev, size);
227 if (IS_ERR(exynos_gem))
228 return exynos_gem;
Inki Dae2b358922012-03-16 18:47:05 +0900229
Marek Szyprowski120a2642017-11-22 14:14:47 +0100230 if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) {
231 /*
232 * when no IOMMU is available, all allocated buffers are
233 * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag
234 */
235 flags &= ~EXYNOS_BO_NONCONTIG;
236 DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n");
237 }
238
Inki Dae2b358922012-03-16 18:47:05 +0900239 /* set memory type and cache attribute from user side. */
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900240 exynos_gem->flags = flags;
Inki Dae2b358922012-03-16 18:47:05 +0900241
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900242 ret = exynos_drm_alloc_buf(exynos_gem);
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900243 if (ret < 0) {
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900244 drm_gem_object_release(&exynos_gem->base);
245 kfree(exynos_gem);
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900246 return ERR_PTR(ret);
247 }
Inki Daef088d5a2011-11-12 14:51:23 +0900248
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900249 return exynos_gem;
Inki Daef088d5a2011-11-12 14:51:23 +0900250}
251
Inki Dae1c248b72011-10-04 19:19:01 +0900252int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900253 struct drm_file *file_priv)
Inki Dae1c248b72011-10-04 19:19:01 +0900254{
255 struct drm_exynos_gem_create *args = data;
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900256 struct exynos_drm_gem *exynos_gem;
Joonyoung Shim23648392011-12-13 14:39:13 +0900257 int ret;
Inki Dae1c248b72011-10-04 19:19:01 +0900258
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900259 exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size);
260 if (IS_ERR(exynos_gem))
261 return PTR_ERR(exynos_gem);
Inki Dae1c248b72011-10-04 19:19:01 +0900262
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900263 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
264 &args->handle);
Joonyoung Shim23648392011-12-13 14:39:13 +0900265 if (ret) {
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900266 exynos_drm_gem_destroy(exynos_gem);
Joonyoung Shim23648392011-12-13 14:39:13 +0900267 return ret;
268 }
269
Inki Dae1c248b72011-10-04 19:19:01 +0900270 return 0;
271}
272
Joonyoung Shim6564c652016-03-08 14:12:59 +0900273int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
274 struct drm_file *file_priv)
275{
276 struct drm_exynos_gem_map *args = data;
277
Noralf Trønnes4d12c2332017-08-06 17:41:02 +0200278 return drm_gem_dumb_map_offset(file_priv, dev, args->handle,
279 &args->offset);
Joonyoung Shim6564c652016-03-08 14:12:59 +0900280}
281
Marek Szyprowski3aa2a5c2018-07-09 15:44:31 +0200282struct exynos_drm_gem *exynos_drm_gem_get(struct drm_file *filp,
283 unsigned int gem_handle)
Inki Daef0b1bda2012-03-16 18:47:06 +0900284{
Inki Daef0b1bda2012-03-16 18:47:06 +0900285 struct drm_gem_object *obj;
286
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100287 obj = drm_gem_object_lookup(filp, gem_handle);
Marek Szyprowski3aa2a5c2018-07-09 15:44:31 +0200288 if (!obj)
289 return NULL;
290 return to_exynos_gem(obj);
Inki Daef0b1bda2012-03-16 18:47:06 +0900291}
292
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900293static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900294 struct vm_area_struct *vma)
Inki Dae1c248b72011-10-04 19:19:01 +0900295{
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900296 struct drm_device *drm_dev = exynos_gem->base.dev;
Inki Dae0519f9a2012-10-20 07:53:42 -0700297 unsigned long vm_size;
Inki Dae5b07c662012-11-08 21:52:54 +0900298 int ret;
Inki Dae1c248b72011-10-04 19:19:01 +0900299
Inki Dae832316c2014-09-18 14:19:01 +0900300 vma->vm_flags &= ~VM_PFNMAP;
301 vma->vm_pgoff = 0;
Inki Dae1c248b72011-10-04 19:19:01 +0900302
Inki Dae0519f9a2012-10-20 07:53:42 -0700303 vm_size = vma->vm_end - vma->vm_start;
Inki Dae2b358922012-03-16 18:47:05 +0900304
Inki Dae1c248b72011-10-04 19:19:01 +0900305 /* check if user-requested size is valid. */
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900306 if (vm_size > exynos_gem->size)
Inki Dae1c248b72011-10-04 19:19:01 +0900307 return -EINVAL;
308
Marek Szyprowskif43c3592016-02-29 17:50:53 +0900309 ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie,
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900310 exynos_gem->dma_addr, exynos_gem->size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700311 exynos_gem->dma_attrs);
Inki Dae5b07c662012-11-08 21:52:54 +0900312 if (ret < 0) {
313 DRM_ERROR("failed to mmap.\n");
314 return ret;
315 }
316
Inki Dae1c248b72011-10-04 19:19:01 +0900317 return 0;
318}
319
Inki Dae40cd7e02012-05-04 15:51:17 +0900320int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
321 struct drm_file *file_priv)
Joonyoung Shimb4cfd4d2015-09-16 14:29:35 +0900322{
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900323 struct exynos_drm_gem *exynos_gem;
Inki Dae40cd7e02012-05-04 15:51:17 +0900324 struct drm_exynos_gem_info *args = data;
325 struct drm_gem_object *obj;
326
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100327 obj = drm_gem_object_lookup(file_priv, args->handle);
Inki Dae40cd7e02012-05-04 15:51:17 +0900328 if (!obj) {
Inki Dae6f83d202019-04-15 14:24:36 +0900329 DRM_DEV_ERROR(dev->dev, "failed to lookup gem object.\n");
Inki Dae40cd7e02012-05-04 15:51:17 +0900330 return -EINVAL;
331 }
332
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900333 exynos_gem = to_exynos_gem(obj);
Inki Dae40cd7e02012-05-04 15:51:17 +0900334
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900335 args->flags = exynos_gem->flags;
336 args->size = exynos_gem->size;
Inki Dae40cd7e02012-05-04 15:51:17 +0900337
Thomas Zimmermannaf7d9102018-06-18 15:17:38 +0200338 drm_gem_object_put_unlocked(obj);
Inki Dae40cd7e02012-05-04 15:51:17 +0900339
340 return 0;
341}
342
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900343void exynos_drm_gem_free_object(struct drm_gem_object *obj)
Inki Dae1c248b72011-10-04 19:19:01 +0900344{
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900345 exynos_drm_gem_destroy(to_exynos_gem(obj));
Inki Dae1c248b72011-10-04 19:19:01 +0900346}
347
348int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900349 struct drm_device *dev,
350 struct drm_mode_create_dumb *args)
Inki Dae1c248b72011-10-04 19:19:01 +0900351{
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900352 struct exynos_drm_gem *exynos_gem;
Joonyoung Shim333e8e52015-09-16 14:29:34 +0900353 unsigned int flags;
Joonyoung Shim23648392011-12-13 14:39:13 +0900354 int ret;
Inki Dae1c248b72011-10-04 19:19:01 +0900355
Inki Dae1c248b72011-10-04 19:19:01 +0900356 /*
Masanari Iidac6b78bc2013-10-24 16:02:57 +0900357 * allocate memory to be used for framebuffer.
Inki Dae1c248b72011-10-04 19:19:01 +0900358 * - this callback would be called by user application
359 * with DRM_IOCTL_MODE_CREATE_DUMB command.
360 */
361
Cooper Yuan3fd6b692012-06-29 11:49:45 +0900362 args->pitch = args->width * ((args->bpp + 7) / 8);
Inki Dae7da59072012-08-17 15:24:03 +0900363 args->size = args->pitch * args->height;
Inki Dae1c248b72011-10-04 19:19:01 +0900364
Joonyoung Shim333e8e52015-09-16 14:29:34 +0900365 if (is_drm_iommu_supported(dev))
366 flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
367 else
368 flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
Vikas Sajjan3fec4532013-08-23 12:05:06 +0530369
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900370 exynos_gem = exynos_drm_gem_create(dev, flags, args->size);
371 if (IS_ERR(exynos_gem)) {
Rahul Sharma122beea2014-05-07 17:21:29 +0530372 dev_warn(dev->dev, "FB allocation failed.\n");
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900373 return PTR_ERR(exynos_gem);
Rahul Sharma122beea2014-05-07 17:21:29 +0530374 }
Inki Dae1c248b72011-10-04 19:19:01 +0900375
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900376 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
377 &args->handle);
Joonyoung Shim23648392011-12-13 14:39:13 +0900378 if (ret) {
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900379 exynos_drm_gem_destroy(exynos_gem);
Joonyoung Shim23648392011-12-13 14:39:13 +0900380 return ret;
381 }
382
Inki Dae1c248b72011-10-04 19:19:01 +0900383 return 0;
384}
385
Souptick Joarder8a8d9b22018-04-14 21:34:29 +0530386vm_fault_t exynos_drm_gem_fault(struct vm_fault *vmf)
Inki Dae1c248b72011-10-04 19:19:01 +0900387{
Dave Jiang11bac802017-02-24 14:56:41 -0800388 struct vm_area_struct *vma = vmf->vma;
Inki Dae1c248b72011-10-04 19:19:01 +0900389 struct drm_gem_object *obj = vma->vm_private_data;
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900390 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
Joonyoung Shim0e9a2ee2015-07-28 17:53:19 +0900391 unsigned long pfn;
Inki Dae1c248b72011-10-04 19:19:01 +0900392 pgoff_t page_offset;
Inki Dae1c248b72011-10-04 19:19:01 +0900393
Jan Kara1a29d852016-12-14 15:07:01 -0800394 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
Inki Dae1c248b72011-10-04 19:19:01 +0900395
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900396 if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) {
Joonyoung Shim0e9a2ee2015-07-28 17:53:19 +0900397 DRM_ERROR("invalid page offset\n");
Souptick Joarder8a8d9b22018-04-14 21:34:29 +0530398 return VM_FAULT_SIGBUS;
Joonyoung Shim0e9a2ee2015-07-28 17:53:19 +0900399 }
Inki Dae1c248b72011-10-04 19:19:01 +0900400
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900401 pfn = page_to_pfn(exynos_gem->pages[page_offset]);
Souptick Joarder8a8d9b22018-04-14 21:34:29 +0530402 return vmf_insert_mixed(vma, vmf->address,
403 __pfn_to_pfn_t(pfn, PFN_DEV));
Inki Dae1c248b72011-10-04 19:19:01 +0900404}
405
Joonyoung Shim5a0202f2016-04-22 16:30:47 +0900406static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj,
407 struct vm_area_struct *vma)
Inki Dae1c248b72011-10-04 19:19:01 +0900408{
Joonyoung Shim5a0202f2016-04-22 16:30:47 +0900409 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
Inki Dae1c248b72011-10-04 19:19:01 +0900410 int ret;
411
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900412 DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem->flags);
Joonyoung Shim211b8872015-08-16 14:16:49 +0900413
414 /* non-cachable as default. */
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900415 if (exynos_gem->flags & EXYNOS_BO_CACHABLE)
Joonyoung Shim211b8872015-08-16 14:16:49 +0900416 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900417 else if (exynos_gem->flags & EXYNOS_BO_WC)
Joonyoung Shim211b8872015-08-16 14:16:49 +0900418 vma->vm_page_prot =
419 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
420 else
421 vma->vm_page_prot =
422 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
Inki Daec01d73fa2012-04-23 19:26:34 +0900423
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900424 ret = exynos_drm_gem_mmap_buffer(exynos_gem, vma);
Inki Dae832316c2014-09-18 14:19:01 +0900425 if (ret)
426 goto err_close_vm;
427
428 return ret;
429
430err_close_vm:
431 drm_gem_vm_close(vma);
Inki Dae832316c2014-09-18 14:19:01 +0900432
Inki Dae1c248b72011-10-04 19:19:01 +0900433 return ret;
434}
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900435
Joonyoung Shim5a0202f2016-04-22 16:30:47 +0900436int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
437{
438 struct drm_gem_object *obj;
439 int ret;
440
441 /* set vm_area_struct. */
442 ret = drm_gem_mmap(filp, vma);
443 if (ret < 0) {
444 DRM_ERROR("failed to mmap.\n");
445 return ret;
446 }
447
448 obj = vma->vm_private_data;
449
Joonyoung Shim55b19fc2016-04-22 16:30:48 +0900450 if (obj->import_attach)
451 return dma_buf_mmap(obj->dma_buf, vma, 0);
452
Joonyoung Shim5a0202f2016-04-22 16:30:47 +0900453 return exynos_drm_gem_mmap_obj(obj, vma);
454}
455
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900456/* low-level interface prime helpers */
Marek Szyprowski89452d42017-10-30 08:28:09 +0100457struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
458 struct dma_buf *dma_buf)
459{
460 return drm_gem_prime_import_dev(dev, dma_buf, to_dma_dev(dev));
461}
462
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900463struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
464{
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900465 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900466 int npages;
467
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900468 npages = exynos_gem->size >> PAGE_SHIFT;
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900469
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900470 return drm_prime_pages_to_sg(exynos_gem->pages, npages);
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900471}
472
473struct drm_gem_object *
474exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
475 struct dma_buf_attachment *attach,
476 struct sg_table *sgt)
477{
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900478 struct exynos_drm_gem *exynos_gem;
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900479 int npages;
480 int ret;
481
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900482 exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
483 if (IS_ERR(exynos_gem)) {
484 ret = PTR_ERR(exynos_gem);
Inki Dae50002d42015-08-31 01:11:53 +0900485 return ERR_PTR(ret);
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900486 }
487
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900488 exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900489
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900490 npages = exynos_gem->size >> PAGE_SHIFT;
Michal Hocko20981052017-05-17 14:23:12 +0200491 exynos_gem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900492 if (!exynos_gem->pages) {
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900493 ret = -ENOMEM;
494 goto err;
495 }
496
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900497 ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem->pages, NULL,
498 npages);
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900499 if (ret < 0)
500 goto err_free_large;
501
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900502 exynos_gem->sgt = sgt;
Joonyoung Shim7c935372015-09-16 14:14:54 +0900503
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900504 if (sgt->nents == 1) {
505 /* always physically continuous memory if sgt->nents is 1. */
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900506 exynos_gem->flags |= EXYNOS_BO_CONTIG;
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900507 } else {
508 /*
509 * this case could be CONTIG or NONCONTIG type but for now
510 * sets NONCONTIG.
511 * TODO. we have to find a way that exporter can notify
512 * the type of its own buffer to importer.
513 */
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900514 exynos_gem->flags |= EXYNOS_BO_NONCONTIG;
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900515 }
516
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900517 return &exynos_gem->base;
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900518
519err_free_large:
Michal Hocko20981052017-05-17 14:23:12 +0200520 kvfree(exynos_gem->pages);
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900521err:
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900522 drm_gem_object_release(&exynos_gem->base);
523 kfree(exynos_gem);
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900524 return ERR_PTR(ret);
525}
526
527void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
528{
529 return NULL;
530}
531
532void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
533{
534 /* Nothing to do */
535}
Joonyoung Shim5a0202f2016-04-22 16:30:47 +0900536
537int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj,
538 struct vm_area_struct *vma)
539{
540 int ret;
541
542 ret = drm_gem_mmap_obj(obj, obj->size, vma);
543 if (ret < 0)
544 return ret;
545
546 return exynos_drm_gem_mmap_obj(obj, vma);
547}