blob: 812d1b1369a5814a6275d0b951a87752578f0a6e [file] [log] [blame]
Rob Clarkc8afe682013-06-26 12:44:06 -04001/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __MSM_GEM_H__
19#define __MSM_GEM_H__
20
Jordan Crouseee546cd2017-03-07 10:02:52 -070021#include <linux/kref.h>
Rob Clark7198e6b2013-07-19 12:59:32 -040022#include <linux/reservation.h>
Rob Clarkc8afe682013-06-26 12:44:06 -040023#include "msm_drv.h"
24
Rob Clark072f1f92015-03-03 15:04:25 -050025/* Additional internal-use only BO flags: */
26#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
27
Rob Clark667ce332016-09-28 19:58:32 -040028struct msm_gem_address_space {
29 const char *name;
30 /* NOTE: mm managed at the page level, size is in # of pages
31 * and position mm_node->start is in # of pages:
32 */
33 struct drm_mm mm;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060034 spinlock_t lock; /* Protects drm_mm node allocation/removal */
Rob Clark667ce332016-09-28 19:58:32 -040035 struct msm_mmu *mmu;
Jordan Crouseee546cd2017-03-07 10:02:52 -070036 struct kref kref;
Rob Clark667ce332016-09-28 19:58:32 -040037};
38
39struct msm_gem_vma {
40 struct drm_mm_node node;
41 uint64_t iova;
Rob Clark4b85f7f2017-06-13 13:54:13 -040042 struct msm_gem_address_space *aspace;
43 struct list_head list; /* node in msm_gem_object::vmas */
Jordan Crousec0ee9792018-11-07 15:35:48 -070044 bool mapped;
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -070045 int inuse;
Rob Clark667ce332016-09-28 19:58:32 -040046};
47
Rob Clarkc8afe682013-06-26 12:44:06 -040048struct msm_gem_object {
49 struct drm_gem_object base;
50
51 uint32_t flags;
52
Rob Clark4cd33c42016-05-17 15:44:49 -040053 /**
54 * Advice: are the backing pages purgeable?
55 */
56 uint8_t madv;
57
Rob Clarke1e9db22016-05-27 11:16:28 -040058 /**
59 * count of active vmap'ing
60 */
61 uint8_t vmap_count;
62
Rob Clark7198e6b2013-07-19 12:59:32 -040063 /* And object is either:
64 * inactive - on priv->inactive_list
65 * active - on one one of the gpu's active_list.. well, at
66 * least for now we don't have (I don't think) hw sync between
67 * 2d and 3d one devices which have both, meaning we need to
68 * block on submit if a bo is already on other ring
69 *
70 */
Rob Clarkc8afe682013-06-26 12:44:06 -040071 struct list_head mm_list;
Rob Clark7198e6b2013-07-19 12:59:32 -040072 struct msm_gpu *gpu; /* non-null if active */
Rob Clark7198e6b2013-07-19 12:59:32 -040073
74 /* Transiently in the process of submit ioctl, objects associated
75 * with the submit are on submit->bo_list.. this only lasts for
76 * the duration of the ioctl, so one bo can never be on multiple
77 * submit lists.
78 */
79 struct list_head submit_entry;
80
Rob Clarkc8afe682013-06-26 12:44:06 -040081 struct page **pages;
82 struct sg_table *sgt;
83 void *vaddr;
84
Rob Clark4b85f7f2017-06-13 13:54:13 -040085 struct list_head vmas; /* list of msm_gem_vma */
Rob Clark7198e6b2013-07-19 12:59:32 -040086
Kristian H. Kristensen48e7f182019-03-20 10:09:08 -070087 struct llist_node freed;
88
Rob Clark871d8122013-11-16 12:56:06 -050089 /* For physically contiguous buffers. Used when we don't have
Rob Clark072f1f92015-03-03 15:04:25 -050090 * an IOMMU. Also used for stolen/splashscreen buffer.
Rob Clark871d8122013-11-16 12:56:06 -050091 */
92 struct drm_mm_node *vram_node;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060093 struct mutex lock; /* Protects resources associated with bo */
Jordan Crouse0815d772018-11-07 15:35:52 -070094
95 char name[32]; /* Identifier to print for the debugfs files */
Rob Clarkc8afe682013-06-26 12:44:06 -040096};
97#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
98
Rob Clark7198e6b2013-07-19 12:59:32 -040099static inline bool is_active(struct msm_gem_object *msm_obj)
100{
101 return msm_obj->gpu != NULL;
102}
103
Rob Clark68209392016-05-17 16:19:32 -0400104static inline bool is_purgeable(struct msm_gem_object *msm_obj)
105{
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600106 WARN_ON(!mutex_is_locked(&msm_obj->base.dev->struct_mutex));
Rob Clark68209392016-05-17 16:19:32 -0400107 return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt &&
108 !msm_obj->base.dma_buf && !msm_obj->base.import_attach;
109}
110
Rob Clarke1e9db22016-05-27 11:16:28 -0400111static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
112{
113 return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
114}
115
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600116/* The shrinker can be triggered while we hold objA->lock, and need
117 * to grab objB->lock to purge it. Lockdep just sees these as a single
118 * class of lock, so we use subclasses to teach it the difference.
119 *
120 * OBJ_LOCK_NORMAL is implicit (ie. normal mutex_lock() call), and
121 * OBJ_LOCK_SHRINKER is used by shrinker.
122 *
123 * It is *essential* that we never go down paths that could trigger the
124 * shrinker for a purgable object. This is ensured by checking that
125 * msm_obj->madv == MSM_MADV_WILLNEED.
126 */
127enum msm_gem_lock {
128 OBJ_LOCK_NORMAL,
129 OBJ_LOCK_SHRINKER,
130};
131
132void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass);
133void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass);
Kristian H. Kristensen48e7f182019-03-20 10:09:08 -0700134void msm_gem_free_work(struct work_struct *work);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600135
Rob Clark7198e6b2013-07-19 12:59:32 -0400136/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
137 * associated with the cmdstream submission for synchronization (and
138 * make it easier to unwind when things go wrong, etc). This only
139 * lasts for the duration of the submit-ioctl.
140 */
141struct msm_gem_submit {
142 struct drm_device *dev;
143 struct msm_gpu *gpu;
Jordan Crousef97deca2017-10-20 11:06:57 -0600144 struct list_head node; /* node in ring submit list */
Rob Clark7198e6b2013-07-19 12:59:32 -0400145 struct list_head bo_list;
146 struct ww_acquire_ctx ticket;
Jordan Crousef97deca2017-10-20 11:06:57 -0600147 uint32_t seqno; /* Sequence number of the submit on the ring */
Chris Wilsonf54d1862016-10-25 13:00:45 +0100148 struct dma_fence *fence;
Jordan Crousef7de1542017-10-20 11:06:55 -0600149 struct msm_gpu_submitqueue *queue;
Rob Clark4816b622016-05-03 10:10:15 -0400150 struct pid *pid; /* submitting process */
Rob Clark340faef2016-03-14 13:56:37 -0400151 bool valid; /* true if no cmdstream patching needed */
Rob Clark6a8bd082017-12-13 15:12:57 -0500152 bool in_rb; /* "sudo" mode, copy cmds into RB */
Jordan Crousef97deca2017-10-20 11:06:57 -0600153 struct msm_ringbuffer *ring;
Rob Clark7198e6b2013-07-19 12:59:32 -0400154 unsigned int nr_cmds;
155 unsigned int nr_bos;
Jordan Crouse4241db42018-11-02 09:25:21 -0600156 u32 ident; /* A "identifier" for the submit for logging */
Rob Clark7198e6b2013-07-19 12:59:32 -0400157 struct {
158 uint32_t type;
159 uint32_t size; /* in dwords */
Rob Clark78babc12016-11-11 12:06:46 -0500160 uint64_t iova;
Rob Clarka7d3c952014-05-30 14:47:38 -0400161 uint32_t idx; /* cmdstream buffer idx in bos[] */
Rob Clark6b597ce2016-06-01 14:17:40 -0400162 } *cmd; /* array of size nr_cmds */
Rob Clark7198e6b2013-07-19 12:59:32 -0400163 struct {
164 uint32_t flags;
Kristian H. Kristensenb6734992019-03-20 10:09:10 -0700165 union {
166 struct msm_gem_object *obj;
167 uint32_t handle;
168 };
Rob Clark78babc12016-11-11 12:06:46 -0500169 uint64_t iova;
Rob Clark7198e6b2013-07-19 12:59:32 -0400170 } bos[0];
171};
172
Rob Clarkc8afe682013-06-26 12:44:06 -0400173#endif /* __MSM_GEM_H__ */