blob: d8f1fe9b68d88e918c6903e1aafd64031020c09f [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Inki Dae1c248b72011-10-04 19:19:01 +09002/* exynos_drm_gem.c
3 *
4 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
5 * Author: Inki Dae <inki.dae@samsung.com>
Inki Dae1c248b72011-10-04 19:19:01 +09006 */
7
David Howells760285e2012-10-02 18:01:07 +01008#include <drm/drmP.h>
David Herrmann0de23972013-07-24 21:07:52 +02009#include <drm/drm_vma_manager.h>
Inki Dae1c248b72011-10-04 19:19:01 +090010
Inki Dae2b358922012-03-16 18:47:05 +090011#include <linux/shmem_fs.h>
Joonyoung Shim01ed50d2015-08-16 14:33:08 +090012#include <linux/dma-buf.h>
Dan Williams01c8f1c2016-01-15 16:56:40 -080013#include <linux/pfn_t.h>
Inki Dae1c248b72011-10-04 19:19:01 +090014#include <drm/exynos_drm.h>
15
16#include "exynos_drm_drv.h"
17#include "exynos_drm_gem.h"
Inki Dae1c248b72011-10-04 19:19:01 +090018
Joonyoung Shim813fd67b2015-10-02 09:33:47 +090019static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090020{
Joonyoung Shim813fd67b2015-10-02 09:33:47 +090021 struct drm_device *dev = exynos_gem->base.dev;
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070022 unsigned long attr;
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090023 unsigned int nr_pages;
Marek Szyprowskidf547bf72015-10-13 13:47:20 +020024 struct sg_table sgt;
25 int ret = -ENOMEM;
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090026
Joonyoung Shim813fd67b2015-10-02 09:33:47 +090027 if (exynos_gem->dma_addr) {
Inki Dae6be90052019-04-15 16:25:12 +090028 DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "already allocated.\n");
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090029 return 0;
30 }
31
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070032 exynos_gem->dma_attrs = 0;
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090033
34 /*
35 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
36 * region will be allocated else physically contiguous
37 * as possible.
38 */
Joonyoung Shim813fd67b2015-10-02 09:33:47 +090039 if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070040 exynos_gem->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090041
42 /*
43 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
44 * else cachable mapping.
45 */
Joonyoung Shim813fd67b2015-10-02 09:33:47 +090046 if (exynos_gem->flags & EXYNOS_BO_WC ||
47 !(exynos_gem->flags & EXYNOS_BO_CACHABLE))
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090048 attr = DMA_ATTR_WRITE_COMBINE;
49 else
50 attr = DMA_ATTR_NON_CONSISTENT;
51
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070052 exynos_gem->dma_attrs |= attr;
53 exynos_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090054
Joonyoung Shim813fd67b2015-10-02 09:33:47 +090055 nr_pages = exynos_gem->size >> PAGE_SHIFT;
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090056
Michal Hocko20981052017-05-17 14:23:12 +020057 exynos_gem->pages = kvmalloc_array(nr_pages, sizeof(struct page *),
58 GFP_KERNEL | __GFP_ZERO);
Marek Szyprowskidf547bf72015-10-13 13:47:20 +020059 if (!exynos_gem->pages) {
Inki Dae6f83d202019-04-15 14:24:36 +090060 DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate pages.\n");
Marek Szyprowskidf547bf72015-10-13 13:47:20 +020061 return -ENOMEM;
Joonyoung Shim333e8e52015-09-16 14:29:34 +090062 }
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090063
Marek Szyprowskif43c3592016-02-29 17:50:53 +090064 exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size,
Joonyoung Shim813fd67b2015-10-02 09:33:47 +090065 &exynos_gem->dma_addr, GFP_KERNEL,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070066 exynos_gem->dma_attrs);
Joonyoung Shim813fd67b2015-10-02 09:33:47 +090067 if (!exynos_gem->cookie) {
Inki Dae6f83d202019-04-15 14:24:36 +090068 DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate buffer.\n");
Marek Szyprowskidf547bf72015-10-13 13:47:20 +020069 goto err_free;
Joonyoung Shim333e8e52015-09-16 14:29:34 +090070 }
71
Marek Szyprowskif43c3592016-02-29 17:50:53 +090072 ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt, exynos_gem->cookie,
Marek Szyprowskidf547bf72015-10-13 13:47:20 +020073 exynos_gem->dma_addr, exynos_gem->size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070074 exynos_gem->dma_attrs);
Marek Szyprowskidf547bf72015-10-13 13:47:20 +020075 if (ret < 0) {
Inki Dae6f83d202019-04-15 14:24:36 +090076 DRM_DEV_ERROR(to_dma_dev(dev), "failed to get sgtable.\n");
Marek Szyprowskidf547bf72015-10-13 13:47:20 +020077 goto err_dma_free;
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090078 }
79
Marek Szyprowskidf547bf72015-10-13 13:47:20 +020080 if (drm_prime_sg_to_page_addr_arrays(&sgt, exynos_gem->pages, NULL,
81 nr_pages)) {
Inki Dae6f83d202019-04-15 14:24:36 +090082 DRM_DEV_ERROR(to_dma_dev(dev), "invalid sgtable.\n");
Marek Szyprowskidf547bf72015-10-13 13:47:20 +020083 ret = -EINVAL;
84 goto err_sgt_free;
85 }
86
87 sg_free_table(&sgt);
88
Inki Dae6be90052019-04-15 16:25:12 +090089 DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "dma_addr(0x%lx), size(0x%lx)\n",
Joonyoung Shim813fd67b2015-10-02 09:33:47 +090090 (unsigned long)exynos_gem->dma_addr, exynos_gem->size);
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090091
92 return 0;
Marek Szyprowskidf547bf72015-10-13 13:47:20 +020093
94err_sgt_free:
95 sg_free_table(&sgt);
96err_dma_free:
Marek Szyprowskif43c3592016-02-29 17:50:53 +090097 dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070098 exynos_gem->dma_addr, exynos_gem->dma_attrs);
Marek Szyprowskidf547bf72015-10-13 13:47:20 +020099err_free:
Michal Hocko20981052017-05-17 14:23:12 +0200100 kvfree(exynos_gem->pages);
Marek Szyprowskidf547bf72015-10-13 13:47:20 +0200101
102 return ret;
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900103}
104
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900105static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900106{
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900107 struct drm_device *dev = exynos_gem->base.dev;
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900108
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900109 if (!exynos_gem->dma_addr) {
Inki Dae6be90052019-04-15 16:25:12 +0900110 DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr is invalid.\n");
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900111 return;
112 }
113
Inki Dae6be90052019-04-15 16:25:12 +0900114 DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr(0x%lx), size(0x%lx)\n",
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900115 (unsigned long)exynos_gem->dma_addr, exynos_gem->size);
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900116
Marek Szyprowskif43c3592016-02-29 17:50:53 +0900117 dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900118 (dma_addr_t)exynos_gem->dma_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700119 exynos_gem->dma_attrs);
Joonyoung Shim333e8e52015-09-16 14:29:34 +0900120
Michal Hocko20981052017-05-17 14:23:12 +0200121 kvfree(exynos_gem->pages);
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900122}
123
Joonyoung Shim23648392011-12-13 14:39:13 +0900124static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
125 struct drm_file *file_priv,
126 unsigned int *handle)
Inki Dae1c248b72011-10-04 19:19:01 +0900127{
Inki Dae1c248b72011-10-04 19:19:01 +0900128 int ret;
129
Inki Dae1c248b72011-10-04 19:19:01 +0900130 /*
131 * allocate a id of idr table where the obj is registered
132 * and handle has the id what user can see.
133 */
134 ret = drm_gem_handle_create(file_priv, obj, handle);
135 if (ret)
Joonyoung Shim23648392011-12-13 14:39:13 +0900136 return ret;
Inki Dae1c248b72011-10-04 19:19:01 +0900137
Inki Dae6be90052019-04-15 16:25:12 +0900138 DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "gem handle = 0x%x\n", *handle);
Inki Dae1c248b72011-10-04 19:19:01 +0900139
140 /* drop reference from allocate - handle holds it now. */
Thomas Zimmermannaf7d9102018-06-18 15:17:38 +0200141 drm_gem_object_put_unlocked(obj);
Inki Dae1c248b72011-10-04 19:19:01 +0900142
Joonyoung Shim23648392011-12-13 14:39:13 +0900143 return 0;
144}
Inki Dae1c248b72011-10-04 19:19:01 +0900145
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900146void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem)
Joonyoung Shim23648392011-12-13 14:39:13 +0900147{
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900148 struct drm_gem_object *obj = &exynos_gem->base;
Joonyoung Shim23648392011-12-13 14:39:13 +0900149
Inki Dae6be90052019-04-15 16:25:12 +0900150 DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "handle count = %d\n",
151 obj->handle_count);
Joonyoung Shim23648392011-12-13 14:39:13 +0900152
Inki Daec374e732012-06-12 16:52:54 +0900153 /*
154 * do not release memory region from exporter.
155 *
156 * the region will be released by exporter
157 * once dmabuf's refcount becomes 0.
158 */
159 if (obj->import_attach)
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900160 drm_prime_gem_destroy(obj, exynos_gem->sgt);
Joonyoung Shim7c935372015-09-16 14:14:54 +0900161 else
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900162 exynos_drm_free_buf(exynos_gem);
Inki Daec374e732012-06-12 16:52:54 +0900163
Joonyoung Shim23648392011-12-13 14:39:13 +0900164 /* release file pointer to gem object. */
Inki Dae1c248b72011-10-04 19:19:01 +0900165 drm_gem_object_release(obj);
166
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900167 kfree(exynos_gem);
Joonyoung Shim23648392011-12-13 14:39:13 +0900168}
169
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900170static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
171 unsigned long size)
Joonyoung Shim23648392011-12-13 14:39:13 +0900172{
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900173 struct exynos_drm_gem *exynos_gem;
Joonyoung Shim23648392011-12-13 14:39:13 +0900174 struct drm_gem_object *obj;
175 int ret;
176
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900177 exynos_gem = kzalloc(sizeof(*exynos_gem), GFP_KERNEL);
178 if (!exynos_gem)
Joonyoung Shim5f3f4262015-07-28 17:53:22 +0900179 return ERR_PTR(-ENOMEM);
Joonyoung Shim23648392011-12-13 14:39:13 +0900180
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900181 exynos_gem->size = size;
182 obj = &exynos_gem->base;
Joonyoung Shim23648392011-12-13 14:39:13 +0900183
184 ret = drm_gem_object_init(dev, obj, size);
185 if (ret < 0) {
Inki Dae6f83d202019-04-15 14:24:36 +0900186 DRM_DEV_ERROR(dev->dev, "failed to initialize gem object\n");
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900187 kfree(exynos_gem);
Joonyoung Shim5f3f4262015-07-28 17:53:22 +0900188 return ERR_PTR(ret);
Joonyoung Shim23648392011-12-13 14:39:13 +0900189 }
190
Joonyoung Shim48cf53f2015-07-28 17:53:23 +0900191 ret = drm_gem_create_mmap_offset(obj);
192 if (ret < 0) {
193 drm_gem_object_release(obj);
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900194 kfree(exynos_gem);
Joonyoung Shim48cf53f2015-07-28 17:53:23 +0900195 return ERR_PTR(ret);
196 }
197
Inki Dae6be90052019-04-15 16:25:12 +0900198 DRM_DEV_DEBUG_KMS(dev->dev, "created file object = %pK\n", obj->filp);
Joonyoung Shim23648392011-12-13 14:39:13 +0900199
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900200 return exynos_gem;
Inki Dae1c248b72011-10-04 19:19:01 +0900201}
202
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900203struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
204 unsigned int flags,
205 unsigned long size)
Inki Daef088d5a2011-11-12 14:51:23 +0900206{
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900207 struct exynos_drm_gem *exynos_gem;
Inki Dae2b358922012-03-16 18:47:05 +0900208 int ret;
Inki Daef088d5a2011-11-12 14:51:23 +0900209
Joonyoung Shimc4130bc2015-08-16 14:15:06 +0900210 if (flags & ~(EXYNOS_BO_MASK)) {
Inki Dae6f83d202019-04-15 14:24:36 +0900211 DRM_DEV_ERROR(dev->dev,
212 "invalid GEM buffer flags: %u\n", flags);
Joonyoung Shimc4130bc2015-08-16 14:15:06 +0900213 return ERR_PTR(-EINVAL);
214 }
215
Inki Daedcf9af82012-04-03 21:27:58 +0900216 if (!size) {
Inki Dae6f83d202019-04-15 14:24:36 +0900217 DRM_DEV_ERROR(dev->dev, "invalid GEM buffer size: %lu\n", size);
Inki Daedcf9af82012-04-03 21:27:58 +0900218 return ERR_PTR(-EINVAL);
219 }
Inki Daef088d5a2011-11-12 14:51:23 +0900220
Joonyoung Shimeb57da82015-07-28 17:53:27 +0900221 size = roundup(size, PAGE_SIZE);
Inki Daedcf9af82012-04-03 21:27:58 +0900222
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900223 exynos_gem = exynos_drm_gem_init(dev, size);
224 if (IS_ERR(exynos_gem))
225 return exynos_gem;
Inki Dae2b358922012-03-16 18:47:05 +0900226
Marek Szyprowski120a2642017-11-22 14:14:47 +0100227 if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) {
228 /*
229 * when no IOMMU is available, all allocated buffers are
230 * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag
231 */
232 flags &= ~EXYNOS_BO_NONCONTIG;
233 DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n");
234 }
235
Inki Dae2b358922012-03-16 18:47:05 +0900236 /* set memory type and cache attribute from user side. */
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900237 exynos_gem->flags = flags;
Inki Dae2b358922012-03-16 18:47:05 +0900238
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900239 ret = exynos_drm_alloc_buf(exynos_gem);
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900240 if (ret < 0) {
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900241 drm_gem_object_release(&exynos_gem->base);
242 kfree(exynos_gem);
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900243 return ERR_PTR(ret);
244 }
Inki Daef088d5a2011-11-12 14:51:23 +0900245
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900246 return exynos_gem;
Inki Daef088d5a2011-11-12 14:51:23 +0900247}
248
Inki Dae1c248b72011-10-04 19:19:01 +0900249int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900250 struct drm_file *file_priv)
Inki Dae1c248b72011-10-04 19:19:01 +0900251{
252 struct drm_exynos_gem_create *args = data;
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900253 struct exynos_drm_gem *exynos_gem;
Joonyoung Shim23648392011-12-13 14:39:13 +0900254 int ret;
Inki Dae1c248b72011-10-04 19:19:01 +0900255
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900256 exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size);
257 if (IS_ERR(exynos_gem))
258 return PTR_ERR(exynos_gem);
Inki Dae1c248b72011-10-04 19:19:01 +0900259
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900260 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
261 &args->handle);
Joonyoung Shim23648392011-12-13 14:39:13 +0900262 if (ret) {
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900263 exynos_drm_gem_destroy(exynos_gem);
Joonyoung Shim23648392011-12-13 14:39:13 +0900264 return ret;
265 }
266
Inki Dae1c248b72011-10-04 19:19:01 +0900267 return 0;
268}
269
Joonyoung Shim6564c652016-03-08 14:12:59 +0900270int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
271 struct drm_file *file_priv)
272{
273 struct drm_exynos_gem_map *args = data;
274
Noralf Trønnes4d12c2332017-08-06 17:41:02 +0200275 return drm_gem_dumb_map_offset(file_priv, dev, args->handle,
276 &args->offset);
Joonyoung Shim6564c652016-03-08 14:12:59 +0900277}
278
Marek Szyprowski3aa2a5c2018-07-09 15:44:31 +0200279struct exynos_drm_gem *exynos_drm_gem_get(struct drm_file *filp,
280 unsigned int gem_handle)
Inki Daef0b1bda2012-03-16 18:47:06 +0900281{
Inki Daef0b1bda2012-03-16 18:47:06 +0900282 struct drm_gem_object *obj;
283
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100284 obj = drm_gem_object_lookup(filp, gem_handle);
Marek Szyprowski3aa2a5c2018-07-09 15:44:31 +0200285 if (!obj)
286 return NULL;
287 return to_exynos_gem(obj);
Inki Daef0b1bda2012-03-16 18:47:06 +0900288}
289
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900290static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900291 struct vm_area_struct *vma)
Inki Dae1c248b72011-10-04 19:19:01 +0900292{
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900293 struct drm_device *drm_dev = exynos_gem->base.dev;
Inki Dae0519f9a2012-10-20 07:53:42 -0700294 unsigned long vm_size;
Inki Dae5b07c662012-11-08 21:52:54 +0900295 int ret;
Inki Dae1c248b72011-10-04 19:19:01 +0900296
Inki Dae832316c2014-09-18 14:19:01 +0900297 vma->vm_flags &= ~VM_PFNMAP;
298 vma->vm_pgoff = 0;
Inki Dae1c248b72011-10-04 19:19:01 +0900299
Inki Dae0519f9a2012-10-20 07:53:42 -0700300 vm_size = vma->vm_end - vma->vm_start;
Inki Dae2b358922012-03-16 18:47:05 +0900301
Inki Dae1c248b72011-10-04 19:19:01 +0900302 /* check if user-requested size is valid. */
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900303 if (vm_size > exynos_gem->size)
Inki Dae1c248b72011-10-04 19:19:01 +0900304 return -EINVAL;
305
Marek Szyprowskif43c3592016-02-29 17:50:53 +0900306 ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie,
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900307 exynos_gem->dma_addr, exynos_gem->size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700308 exynos_gem->dma_attrs);
Inki Dae5b07c662012-11-08 21:52:54 +0900309 if (ret < 0) {
310 DRM_ERROR("failed to mmap.\n");
311 return ret;
312 }
313
Inki Dae1c248b72011-10-04 19:19:01 +0900314 return 0;
315}
316
Inki Dae40cd7e02012-05-04 15:51:17 +0900317int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
318 struct drm_file *file_priv)
Joonyoung Shimb4cfd4d2015-09-16 14:29:35 +0900319{
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900320 struct exynos_drm_gem *exynos_gem;
Inki Dae40cd7e02012-05-04 15:51:17 +0900321 struct drm_exynos_gem_info *args = data;
322 struct drm_gem_object *obj;
323
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100324 obj = drm_gem_object_lookup(file_priv, args->handle);
Inki Dae40cd7e02012-05-04 15:51:17 +0900325 if (!obj) {
Inki Dae6f83d202019-04-15 14:24:36 +0900326 DRM_DEV_ERROR(dev->dev, "failed to lookup gem object.\n");
Inki Dae40cd7e02012-05-04 15:51:17 +0900327 return -EINVAL;
328 }
329
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900330 exynos_gem = to_exynos_gem(obj);
Inki Dae40cd7e02012-05-04 15:51:17 +0900331
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900332 args->flags = exynos_gem->flags;
333 args->size = exynos_gem->size;
Inki Dae40cd7e02012-05-04 15:51:17 +0900334
Thomas Zimmermannaf7d9102018-06-18 15:17:38 +0200335 drm_gem_object_put_unlocked(obj);
Inki Dae40cd7e02012-05-04 15:51:17 +0900336
337 return 0;
338}
339
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900340void exynos_drm_gem_free_object(struct drm_gem_object *obj)
Inki Dae1c248b72011-10-04 19:19:01 +0900341{
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900342 exynos_drm_gem_destroy(to_exynos_gem(obj));
Inki Dae1c248b72011-10-04 19:19:01 +0900343}
344
345int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900346 struct drm_device *dev,
347 struct drm_mode_create_dumb *args)
Inki Dae1c248b72011-10-04 19:19:01 +0900348{
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900349 struct exynos_drm_gem *exynos_gem;
Joonyoung Shim333e8e52015-09-16 14:29:34 +0900350 unsigned int flags;
Joonyoung Shim23648392011-12-13 14:39:13 +0900351 int ret;
Inki Dae1c248b72011-10-04 19:19:01 +0900352
Inki Dae1c248b72011-10-04 19:19:01 +0900353 /*
Masanari Iidac6b78bc2013-10-24 16:02:57 +0900354 * allocate memory to be used for framebuffer.
Inki Dae1c248b72011-10-04 19:19:01 +0900355 * - this callback would be called by user application
356 * with DRM_IOCTL_MODE_CREATE_DUMB command.
357 */
358
Cooper Yuan3fd6b692012-06-29 11:49:45 +0900359 args->pitch = args->width * ((args->bpp + 7) / 8);
Inki Dae7da59072012-08-17 15:24:03 +0900360 args->size = args->pitch * args->height;
Inki Dae1c248b72011-10-04 19:19:01 +0900361
Joonyoung Shim333e8e52015-09-16 14:29:34 +0900362 if (is_drm_iommu_supported(dev))
363 flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
364 else
365 flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
Vikas Sajjan3fec4532013-08-23 12:05:06 +0530366
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900367 exynos_gem = exynos_drm_gem_create(dev, flags, args->size);
368 if (IS_ERR(exynos_gem)) {
Rahul Sharma122beea2014-05-07 17:21:29 +0530369 dev_warn(dev->dev, "FB allocation failed.\n");
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900370 return PTR_ERR(exynos_gem);
Rahul Sharma122beea2014-05-07 17:21:29 +0530371 }
Inki Dae1c248b72011-10-04 19:19:01 +0900372
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900373 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
374 &args->handle);
Joonyoung Shim23648392011-12-13 14:39:13 +0900375 if (ret) {
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900376 exynos_drm_gem_destroy(exynos_gem);
Joonyoung Shim23648392011-12-13 14:39:13 +0900377 return ret;
378 }
379
Inki Dae1c248b72011-10-04 19:19:01 +0900380 return 0;
381}
382
Souptick Joarder8a8d9b22018-04-14 21:34:29 +0530383vm_fault_t exynos_drm_gem_fault(struct vm_fault *vmf)
Inki Dae1c248b72011-10-04 19:19:01 +0900384{
Dave Jiang11bac802017-02-24 14:56:41 -0800385 struct vm_area_struct *vma = vmf->vma;
Inki Dae1c248b72011-10-04 19:19:01 +0900386 struct drm_gem_object *obj = vma->vm_private_data;
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900387 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
Joonyoung Shim0e9a2ee2015-07-28 17:53:19 +0900388 unsigned long pfn;
Inki Dae1c248b72011-10-04 19:19:01 +0900389 pgoff_t page_offset;
Inki Dae1c248b72011-10-04 19:19:01 +0900390
Jan Kara1a29d852016-12-14 15:07:01 -0800391 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
Inki Dae1c248b72011-10-04 19:19:01 +0900392
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900393 if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) {
Joonyoung Shim0e9a2ee2015-07-28 17:53:19 +0900394 DRM_ERROR("invalid page offset\n");
Souptick Joarder8a8d9b22018-04-14 21:34:29 +0530395 return VM_FAULT_SIGBUS;
Joonyoung Shim0e9a2ee2015-07-28 17:53:19 +0900396 }
Inki Dae1c248b72011-10-04 19:19:01 +0900397
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900398 pfn = page_to_pfn(exynos_gem->pages[page_offset]);
Souptick Joarder8a8d9b22018-04-14 21:34:29 +0530399 return vmf_insert_mixed(vma, vmf->address,
400 __pfn_to_pfn_t(pfn, PFN_DEV));
Inki Dae1c248b72011-10-04 19:19:01 +0900401}
402
Joonyoung Shim5a0202f2016-04-22 16:30:47 +0900403static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj,
404 struct vm_area_struct *vma)
Inki Dae1c248b72011-10-04 19:19:01 +0900405{
Joonyoung Shim5a0202f2016-04-22 16:30:47 +0900406 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
Inki Dae1c248b72011-10-04 19:19:01 +0900407 int ret;
408
Inki Dae6be90052019-04-15 16:25:12 +0900409 DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "flags = 0x%x\n",
410 exynos_gem->flags);
Joonyoung Shim211b8872015-08-16 14:16:49 +0900411
412 /* non-cachable as default. */
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900413 if (exynos_gem->flags & EXYNOS_BO_CACHABLE)
Joonyoung Shim211b8872015-08-16 14:16:49 +0900414 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900415 else if (exynos_gem->flags & EXYNOS_BO_WC)
Joonyoung Shim211b8872015-08-16 14:16:49 +0900416 vma->vm_page_prot =
417 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
418 else
419 vma->vm_page_prot =
420 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
Inki Daec01d73fa2012-04-23 19:26:34 +0900421
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900422 ret = exynos_drm_gem_mmap_buffer(exynos_gem, vma);
Inki Dae832316c2014-09-18 14:19:01 +0900423 if (ret)
424 goto err_close_vm;
425
426 return ret;
427
428err_close_vm:
429 drm_gem_vm_close(vma);
Inki Dae832316c2014-09-18 14:19:01 +0900430
Inki Dae1c248b72011-10-04 19:19:01 +0900431 return ret;
432}
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900433
Joonyoung Shim5a0202f2016-04-22 16:30:47 +0900434int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
435{
436 struct drm_gem_object *obj;
437 int ret;
438
439 /* set vm_area_struct. */
440 ret = drm_gem_mmap(filp, vma);
441 if (ret < 0) {
442 DRM_ERROR("failed to mmap.\n");
443 return ret;
444 }
445
446 obj = vma->vm_private_data;
447
Joonyoung Shim55b19fc2016-04-22 16:30:48 +0900448 if (obj->import_attach)
449 return dma_buf_mmap(obj->dma_buf, vma, 0);
450
Joonyoung Shim5a0202f2016-04-22 16:30:47 +0900451 return exynos_drm_gem_mmap_obj(obj, vma);
452}
453
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900454/* low-level interface prime helpers */
Marek Szyprowski89452d42017-10-30 08:28:09 +0100455struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
456 struct dma_buf *dma_buf)
457{
458 return drm_gem_prime_import_dev(dev, dma_buf, to_dma_dev(dev));
459}
460
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900461struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
462{
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900463 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900464 int npages;
465
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900466 npages = exynos_gem->size >> PAGE_SHIFT;
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900467
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900468 return drm_prime_pages_to_sg(exynos_gem->pages, npages);
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900469}
470
471struct drm_gem_object *
472exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
473 struct dma_buf_attachment *attach,
474 struct sg_table *sgt)
475{
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900476 struct exynos_drm_gem *exynos_gem;
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900477 int npages;
478 int ret;
479
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900480 exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
481 if (IS_ERR(exynos_gem)) {
482 ret = PTR_ERR(exynos_gem);
Inki Dae50002d42015-08-31 01:11:53 +0900483 return ERR_PTR(ret);
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900484 }
485
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900486 exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900487
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900488 npages = exynos_gem->size >> PAGE_SHIFT;
Michal Hocko20981052017-05-17 14:23:12 +0200489 exynos_gem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900490 if (!exynos_gem->pages) {
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900491 ret = -ENOMEM;
492 goto err;
493 }
494
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900495 ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem->pages, NULL,
496 npages);
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900497 if (ret < 0)
498 goto err_free_large;
499
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900500 exynos_gem->sgt = sgt;
Joonyoung Shim7c935372015-09-16 14:14:54 +0900501
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900502 if (sgt->nents == 1) {
503 /* always physically continuous memory if sgt->nents is 1. */
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900504 exynos_gem->flags |= EXYNOS_BO_CONTIG;
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900505 } else {
506 /*
507 * this case could be CONTIG or NONCONTIG type but for now
508 * sets NONCONTIG.
509 * TODO. we have to find a way that exporter can notify
510 * the type of its own buffer to importer.
511 */
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900512 exynos_gem->flags |= EXYNOS_BO_NONCONTIG;
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900513 }
514
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900515 return &exynos_gem->base;
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900516
517err_free_large:
Michal Hocko20981052017-05-17 14:23:12 +0200518 kvfree(exynos_gem->pages);
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900519err:
Joonyoung Shim813fd67b2015-10-02 09:33:47 +0900520 drm_gem_object_release(&exynos_gem->base);
521 kfree(exynos_gem);
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900522 return ERR_PTR(ret);
523}
524
525void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
526{
527 return NULL;
528}
529
530void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
531{
532 /* Nothing to do */
533}
Joonyoung Shim5a0202f2016-04-22 16:30:47 +0900534
535int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj,
536 struct vm_area_struct *vma)
537{
538 int ret;
539
540 ret = drm_gem_mmap_obj(obj, obj->size, vma);
541 if (ret < 0)
542 return ret;
543
544 return exynos_drm_gem_mmap_obj(obj, vma);
545}