blob: d9311be32a4ff6bd0c66a3e1fd83b01e9d2dfe2d [file] [log] [blame]
Eric Anholtd5b1a782015-11-30 12:13:37 -08001/*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include <linux/module.h>
25#include <linux/platform_device.h>
Eric Anholt001bdb52016-02-05 17:41:49 -080026#include <linux/pm_runtime.h>
Eric Anholtd5b1a782015-11-30 12:13:37 -080027#include <linux/device.h>
28#include <linux/io.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010029#include <linux/sched/signal.h>
Stefan Schake818f5c82018-04-25 00:03:45 +020030#include <linux/dma-fence-array.h>
Eric Anholtd5b1a782015-11-30 12:13:37 -080031
32#include "uapi/drm/vc4_drm.h"
33#include "vc4_drv.h"
34#include "vc4_regs.h"
35#include "vc4_trace.h"
36
37static void
38vc4_queue_hangcheck(struct drm_device *dev)
39{
40 struct vc4_dev *vc4 = to_vc4_dev(dev);
41
42 mod_timer(&vc4->hangcheck.timer,
43 round_jiffies_up(jiffies + msecs_to_jiffies(100)));
44}
45
Eric Anholt21461362015-10-30 10:09:02 -070046struct vc4_hang_state {
47 struct drm_vc4_get_hang_state user_state;
48
49 u32 bo_count;
50 struct drm_gem_object **bo;
51};
52
53static void
54vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state)
55{
56 unsigned int i;
57
Eric Anholt21461362015-10-30 10:09:02 -070058 for (i = 0; i < state->user_state.bo_count; i++)
Cihangir Akturk1d5494e2017-08-03 14:58:40 +030059 drm_gem_object_put_unlocked(state->bo[i]);
Eric Anholt21461362015-10-30 10:09:02 -070060
61 kfree(state);
62}
63
64int
65vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
66 struct drm_file *file_priv)
67{
68 struct drm_vc4_get_hang_state *get_state = data;
69 struct drm_vc4_get_hang_state_bo *bo_state;
70 struct vc4_hang_state *kernel_state;
71 struct drm_vc4_get_hang_state *state;
72 struct vc4_dev *vc4 = to_vc4_dev(dev);
73 unsigned long irqflags;
74 u32 i;
Dan Carpenter65c47772015-12-17 15:36:28 +030075 int ret = 0;
Eric Anholt21461362015-10-30 10:09:02 -070076
Eric Anholtffc26742019-04-01 11:35:59 -070077 if (!vc4->v3d) {
78 DRM_DEBUG("VC4_GET_HANG_STATE with no VC4 V3D probed\n");
79 return -ENODEV;
80 }
81
Eric Anholt21461362015-10-30 10:09:02 -070082 spin_lock_irqsave(&vc4->job_lock, irqflags);
83 kernel_state = vc4->hang_state;
84 if (!kernel_state) {
85 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
86 return -ENOENT;
87 }
88 state = &kernel_state->user_state;
89
90 /* If the user's array isn't big enough, just return the
91 * required array size.
92 */
93 if (get_state->bo_count < state->bo_count) {
94 get_state->bo_count = state->bo_count;
95 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
96 return 0;
97 }
98
99 vc4->hang_state = NULL;
100 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
101
102 /* Save the user's BO pointer, so we don't stomp it with the memcpy. */
103 state->bo = get_state->bo;
104 memcpy(get_state, state, sizeof(*state));
105
106 bo_state = kcalloc(state->bo_count, sizeof(*bo_state), GFP_KERNEL);
107 if (!bo_state) {
108 ret = -ENOMEM;
109 goto err_free;
110 }
111
112 for (i = 0; i < state->bo_count; i++) {
113 struct vc4_bo *vc4_bo = to_vc4_bo(kernel_state->bo[i]);
114 u32 handle;
115
116 ret = drm_gem_handle_create(file_priv, kernel_state->bo[i],
117 &handle);
118
119 if (ret) {
Christophe JAILLETd0b1d252017-05-12 14:38:03 +0200120 state->bo_count = i;
121 goto err_delete_handle;
Eric Anholt21461362015-10-30 10:09:02 -0700122 }
123 bo_state[i].handle = handle;
124 bo_state[i].paddr = vc4_bo->base.paddr;
125 bo_state[i].size = vc4_bo->base.base.size;
126 }
127
Eric Anholt95d7cbc2017-07-25 11:27:16 -0700128 if (copy_to_user(u64_to_user_ptr(get_state->bo),
Dan Carpenter65c47772015-12-17 15:36:28 +0300129 bo_state,
130 state->bo_count * sizeof(*bo_state)))
131 ret = -EFAULT;
132
Christophe JAILLETd0b1d252017-05-12 14:38:03 +0200133err_delete_handle:
134 if (ret) {
135 for (i = 0; i < state->bo_count; i++)
136 drm_gem_handle_delete(file_priv, bo_state[i].handle);
137 }
Eric Anholt21461362015-10-30 10:09:02 -0700138
139err_free:
Eric Anholt21461362015-10-30 10:09:02 -0700140 vc4_free_hang_state(dev, kernel_state);
Christophe JAILLETd0b1d252017-05-12 14:38:03 +0200141 kfree(bo_state);
Eric Anholt21461362015-10-30 10:09:02 -0700142
Eric Anholt21461362015-10-30 10:09:02 -0700143 return ret;
144}
145
146static void
147vc4_save_hang_state(struct drm_device *dev)
148{
149 struct vc4_dev *vc4 = to_vc4_dev(dev);
150 struct drm_vc4_get_hang_state *state;
151 struct vc4_hang_state *kernel_state;
Varad Gautamca26d282016-02-17 19:08:21 +0530152 struct vc4_exec_info *exec[2];
Eric Anholt21461362015-10-30 10:09:02 -0700153 struct vc4_bo *bo;
154 unsigned long irqflags;
Boris Brezillon17b11b72018-01-18 15:58:21 +0100155 unsigned int i, j, k, unref_list_count;
Eric Anholt21461362015-10-30 10:09:02 -0700156
Dan Carpenter7e5082f2015-12-17 15:39:08 +0300157 kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
Eric Anholt21461362015-10-30 10:09:02 -0700158 if (!kernel_state)
159 return;
160
161 state = &kernel_state->user_state;
162
163 spin_lock_irqsave(&vc4->job_lock, irqflags);
Varad Gautamca26d282016-02-17 19:08:21 +0530164 exec[0] = vc4_first_bin_job(vc4);
165 exec[1] = vc4_first_render_job(vc4);
166 if (!exec[0] && !exec[1]) {
Eric Anholt21461362015-10-30 10:09:02 -0700167 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
168 return;
169 }
170
Varad Gautamca26d282016-02-17 19:08:21 +0530171 /* Get the bos from both binner and renderer into hang state. */
172 state->bo_count = 0;
173 for (i = 0; i < 2; i++) {
174 if (!exec[i])
175 continue;
Eric Anholt21461362015-10-30 10:09:02 -0700176
Varad Gautamca26d282016-02-17 19:08:21 +0530177 unref_list_count = 0;
178 list_for_each_entry(bo, &exec[i]->unref_list, unref_head)
179 unref_list_count++;
180 state->bo_count += exec[i]->bo_count + unref_list_count;
181 }
182
183 kernel_state->bo = kcalloc(state->bo_count,
184 sizeof(*kernel_state->bo), GFP_ATOMIC);
185
Eric Anholt21461362015-10-30 10:09:02 -0700186 if (!kernel_state->bo) {
187 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
188 return;
189 }
190
Boris Brezillon17b11b72018-01-18 15:58:21 +0100191 k = 0;
Varad Gautamca26d282016-02-17 19:08:21 +0530192 for (i = 0; i < 2; i++) {
193 if (!exec[i])
194 continue;
195
196 for (j = 0; j < exec[i]->bo_count; j++) {
Boris Brezillonb9f19252017-10-19 14:57:48 +0200197 bo = to_vc4_bo(&exec[i]->bo[j]->base);
198
199 /* Retain BOs just in case they were marked purgeable.
200 * This prevents the BO from being purged before
201 * someone had a chance to dump the hang state.
202 */
203 WARN_ON(!refcount_read(&bo->usecnt));
204 refcount_inc(&bo->usecnt);
Cihangir Akturk1d5494e2017-08-03 14:58:40 +0300205 drm_gem_object_get(&exec[i]->bo[j]->base);
Boris Brezillon17b11b72018-01-18 15:58:21 +0100206 kernel_state->bo[k++] = &exec[i]->bo[j]->base;
Varad Gautamca26d282016-02-17 19:08:21 +0530207 }
208
209 list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
Boris Brezillonb9f19252017-10-19 14:57:48 +0200210 /* No need to retain BOs coming from the ->unref_list
211 * because they are naturally unpurgeable.
212 */
Cihangir Akturk1d5494e2017-08-03 14:58:40 +0300213 drm_gem_object_get(&bo->base.base);
Boris Brezillon17b11b72018-01-18 15:58:21 +0100214 kernel_state->bo[k++] = &bo->base.base;
Varad Gautamca26d282016-02-17 19:08:21 +0530215 }
Eric Anholt21461362015-10-30 10:09:02 -0700216 }
217
Boris Brezillon17b11b72018-01-18 15:58:21 +0100218 WARN_ON_ONCE(k != state->bo_count);
219
Varad Gautamca26d282016-02-17 19:08:21 +0530220 if (exec[0])
221 state->start_bin = exec[0]->ct0ca;
222 if (exec[1])
223 state->start_render = exec[1]->ct1ca;
Eric Anholt21461362015-10-30 10:09:02 -0700224
225 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
226
227 state->ct0ca = V3D_READ(V3D_CTNCA(0));
228 state->ct0ea = V3D_READ(V3D_CTNEA(0));
229
230 state->ct1ca = V3D_READ(V3D_CTNCA(1));
231 state->ct1ea = V3D_READ(V3D_CTNEA(1));
232
233 state->ct0cs = V3D_READ(V3D_CTNCS(0));
234 state->ct1cs = V3D_READ(V3D_CTNCS(1));
235
236 state->ct0ra0 = V3D_READ(V3D_CT00RA0);
237 state->ct1ra0 = V3D_READ(V3D_CT01RA0);
238
239 state->bpca = V3D_READ(V3D_BPCA);
240 state->bpcs = V3D_READ(V3D_BPCS);
241 state->bpoa = V3D_READ(V3D_BPOA);
242 state->bpos = V3D_READ(V3D_BPOS);
243
244 state->vpmbase = V3D_READ(V3D_VPMBASE);
245
246 state->dbge = V3D_READ(V3D_DBGE);
247 state->fdbgo = V3D_READ(V3D_FDBGO);
248 state->fdbgb = V3D_READ(V3D_FDBGB);
249 state->fdbgr = V3D_READ(V3D_FDBGR);
250 state->fdbgs = V3D_READ(V3D_FDBGS);
251 state->errstat = V3D_READ(V3D_ERRSTAT);
252
Boris Brezillonb9f19252017-10-19 14:57:48 +0200253 /* We need to turn purgeable BOs into unpurgeable ones so that
254 * userspace has a chance to dump the hang state before the kernel
255 * decides to purge those BOs.
256 * Note that BO consistency at dump time cannot be guaranteed. For
257 * example, if the owner of these BOs decides to re-use them or mark
258 * them purgeable again there's nothing we can do to prevent it.
259 */
260 for (i = 0; i < kernel_state->user_state.bo_count; i++) {
261 struct vc4_bo *bo = to_vc4_bo(kernel_state->bo[i]);
262
263 if (bo->madv == __VC4_MADV_NOTSUPP)
264 continue;
265
266 mutex_lock(&bo->madv_lock);
267 if (!WARN_ON(bo->madv == __VC4_MADV_PURGED))
268 bo->madv = VC4_MADV_WILLNEED;
269 refcount_dec(&bo->usecnt);
270 mutex_unlock(&bo->madv_lock);
271 }
272
Eric Anholt21461362015-10-30 10:09:02 -0700273 spin_lock_irqsave(&vc4->job_lock, irqflags);
274 if (vc4->hang_state) {
275 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
276 vc4_free_hang_state(dev, kernel_state);
277 } else {
278 vc4->hang_state = kernel_state;
279 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
280 }
281}
282
Eric Anholtd5b1a782015-11-30 12:13:37 -0800283static void
284vc4_reset(struct drm_device *dev)
285{
286 struct vc4_dev *vc4 = to_vc4_dev(dev);
287
288 DRM_INFO("Resetting GPU.\n");
Eric Anholt36cb6252016-02-08 12:59:02 -0800289
290 mutex_lock(&vc4->power_lock);
291 if (vc4->power_refcount) {
292 /* Power the device off and back on the by dropping the
293 * reference on runtime PM.
294 */
295 pm_runtime_put_sync_suspend(&vc4->v3d->pdev->dev);
296 pm_runtime_get_sync(&vc4->v3d->pdev->dev);
297 }
298 mutex_unlock(&vc4->power_lock);
Eric Anholtd5b1a782015-11-30 12:13:37 -0800299
300 vc4_irq_reset(dev);
301
302 /* Rearm the hangcheck -- another job might have been waiting
303 * for our hung one to get kicked off, and vc4_irq_reset()
304 * would have started it.
305 */
306 vc4_queue_hangcheck(dev);
307}
308
309static void
310vc4_reset_work(struct work_struct *work)
311{
312 struct vc4_dev *vc4 =
313 container_of(work, struct vc4_dev, hangcheck.reset_work);
314
Eric Anholt21461362015-10-30 10:09:02 -0700315 vc4_save_hang_state(vc4->dev);
316
Eric Anholtd5b1a782015-11-30 12:13:37 -0800317 vc4_reset(vc4->dev);
318}
319
320static void
Kees Cook00787302017-10-24 08:16:48 -0700321vc4_hangcheck_elapsed(struct timer_list *t)
Eric Anholtd5b1a782015-11-30 12:13:37 -0800322{
Kees Cook00787302017-10-24 08:16:48 -0700323 struct vc4_dev *vc4 = from_timer(vc4, t, hangcheck.timer);
324 struct drm_device *dev = vc4->dev;
Eric Anholtd5b1a782015-11-30 12:13:37 -0800325 uint32_t ct0ca, ct1ca;
Eric Anholtc4ce60d2016-02-08 11:19:14 -0800326 unsigned long irqflags;
Varad Gautamca26d282016-02-17 19:08:21 +0530327 struct vc4_exec_info *bin_exec, *render_exec;
Eric Anholtc4ce60d2016-02-08 11:19:14 -0800328
329 spin_lock_irqsave(&vc4->job_lock, irqflags);
Varad Gautamca26d282016-02-17 19:08:21 +0530330
331 bin_exec = vc4_first_bin_job(vc4);
332 render_exec = vc4_first_render_job(vc4);
Eric Anholtd5b1a782015-11-30 12:13:37 -0800333
334 /* If idle, we can stop watching for hangs. */
Varad Gautamca26d282016-02-17 19:08:21 +0530335 if (!bin_exec && !render_exec) {
Eric Anholtc4ce60d2016-02-08 11:19:14 -0800336 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
Eric Anholtd5b1a782015-11-30 12:13:37 -0800337 return;
Eric Anholtc4ce60d2016-02-08 11:19:14 -0800338 }
Eric Anholtd5b1a782015-11-30 12:13:37 -0800339
340 ct0ca = V3D_READ(V3D_CTNCA(0));
341 ct1ca = V3D_READ(V3D_CTNCA(1));
342
343 /* If we've made any progress in execution, rearm the timer
344 * and wait.
345 */
Varad Gautamca26d282016-02-17 19:08:21 +0530346 if ((bin_exec && ct0ca != bin_exec->last_ct0ca) ||
347 (render_exec && ct1ca != render_exec->last_ct1ca)) {
348 if (bin_exec)
349 bin_exec->last_ct0ca = ct0ca;
350 if (render_exec)
351 render_exec->last_ct1ca = ct1ca;
Eric Anholtc4ce60d2016-02-08 11:19:14 -0800352 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
Eric Anholtd5b1a782015-11-30 12:13:37 -0800353 vc4_queue_hangcheck(dev);
354 return;
355 }
356
Eric Anholtc4ce60d2016-02-08 11:19:14 -0800357 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
358
Eric Anholtd5b1a782015-11-30 12:13:37 -0800359 /* We've gone too long with no progress, reset. This has to
360 * be done from a work struct, since resetting can sleep and
361 * this timer hook isn't allowed to.
362 */
363 schedule_work(&vc4->hangcheck.reset_work);
364}
365
366static void
367submit_cl(struct drm_device *dev, uint32_t thread, uint32_t start, uint32_t end)
368{
369 struct vc4_dev *vc4 = to_vc4_dev(dev);
370
371 /* Set the current and end address of the control list.
372 * Writing the end register is what starts the job.
373 */
374 V3D_WRITE(V3D_CTNCA(thread), start);
375 V3D_WRITE(V3D_CTNEA(thread), end);
376}
377
378int
379vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns,
380 bool interruptible)
381{
382 struct vc4_dev *vc4 = to_vc4_dev(dev);
383 int ret = 0;
384 unsigned long timeout_expire;
385 DEFINE_WAIT(wait);
386
387 if (vc4->finished_seqno >= seqno)
388 return 0;
389
390 if (timeout_ns == 0)
391 return -ETIME;
392
393 timeout_expire = jiffies + nsecs_to_jiffies(timeout_ns);
394
395 trace_vc4_wait_for_seqno_begin(dev, seqno, timeout_ns);
396 for (;;) {
397 prepare_to_wait(&vc4->job_wait_queue, &wait,
398 interruptible ? TASK_INTERRUPTIBLE :
399 TASK_UNINTERRUPTIBLE);
400
401 if (interruptible && signal_pending(current)) {
402 ret = -ERESTARTSYS;
403 break;
404 }
405
406 if (vc4->finished_seqno >= seqno)
407 break;
408
409 if (timeout_ns != ~0ull) {
410 if (time_after_eq(jiffies, timeout_expire)) {
411 ret = -ETIME;
412 break;
413 }
414 schedule_timeout(timeout_expire - jiffies);
415 } else {
416 schedule();
417 }
418 }
419
420 finish_wait(&vc4->job_wait_queue, &wait);
421 trace_vc4_wait_for_seqno_end(dev, seqno);
422
Eric Anholt13cf8902016-01-25 14:32:41 -0800423 return ret;
Eric Anholtd5b1a782015-11-30 12:13:37 -0800424}
425
426static void
427vc4_flush_caches(struct drm_device *dev)
428{
429 struct vc4_dev *vc4 = to_vc4_dev(dev);
430
431 /* Flush the GPU L2 caches. These caches sit on top of system
432 * L3 (the 128kb or so shared with the CPU), and are
433 * non-allocating in the L3.
434 */
435 V3D_WRITE(V3D_L2CACTL,
436 V3D_L2CACTL_L2CCLR);
437
438 V3D_WRITE(V3D_SLCACTL,
439 VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
440 VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC) |
441 VC4_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
442 VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
443}
444
Eric Anholtf61145f2017-12-21 14:17:22 -0800445static void
446vc4_flush_texture_caches(struct drm_device *dev)
447{
448 struct vc4_dev *vc4 = to_vc4_dev(dev);
449
450 V3D_WRITE(V3D_L2CACTL,
451 V3D_L2CACTL_L2CCLR);
452
453 V3D_WRITE(V3D_SLCACTL,
454 VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
455 VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC));
456}
457
Eric Anholtd5b1a782015-11-30 12:13:37 -0800458/* Sets the registers for the next job to be actually be executed in
459 * the hardware.
460 *
461 * The job_lock should be held during this.
462 */
463void
Varad Gautamca26d282016-02-17 19:08:21 +0530464vc4_submit_next_bin_job(struct drm_device *dev)
Eric Anholtd5b1a782015-11-30 12:13:37 -0800465{
466 struct vc4_dev *vc4 = to_vc4_dev(dev);
Varad Gautamca26d282016-02-17 19:08:21 +0530467 struct vc4_exec_info *exec;
Eric Anholtd5b1a782015-11-30 12:13:37 -0800468
Varad Gautamca26d282016-02-17 19:08:21 +0530469again:
470 exec = vc4_first_bin_job(vc4);
Eric Anholtd5b1a782015-11-30 12:13:37 -0800471 if (!exec)
472 return;
473
474 vc4_flush_caches(dev);
475
Boris Brezillon65101d82018-01-12 10:09:26 +0100476 /* Only start the perfmon if it was not already started by a previous
477 * job.
478 */
479 if (exec->perfmon && vc4->active_perfmon != exec->perfmon)
480 vc4_perfmon_start(vc4, exec->perfmon);
481
Varad Gautamca26d282016-02-17 19:08:21 +0530482 /* Either put the job in the binner if it uses the binner, or
483 * immediately move it to the to-be-rendered queue.
484 */
485 if (exec->ct0ca != exec->ct0ea) {
Eric Anholtd5b1a782015-11-30 12:13:37 -0800486 submit_cl(dev, 0, exec->ct0ca, exec->ct0ea);
Varad Gautamca26d282016-02-17 19:08:21 +0530487 } else {
Boris Brezillon65101d82018-01-12 10:09:26 +0100488 struct vc4_exec_info *next;
489
Varad Gautamca26d282016-02-17 19:08:21 +0530490 vc4_move_job_to_render(dev, exec);
Boris Brezillon65101d82018-01-12 10:09:26 +0100491 next = vc4_first_bin_job(vc4);
492
493 /* We can't start the next bin job if the previous job had a
494 * different perfmon instance attached to it. The same goes
495 * if one of them had a perfmon attached to it and the other
496 * one doesn't.
497 */
498 if (next && next->perfmon == exec->perfmon)
499 goto again;
Varad Gautamca26d282016-02-17 19:08:21 +0530500 }
501}
502
503void
504vc4_submit_next_render_job(struct drm_device *dev)
505{
506 struct vc4_dev *vc4 = to_vc4_dev(dev);
507 struct vc4_exec_info *exec = vc4_first_render_job(vc4);
508
509 if (!exec)
510 return;
511
Eric Anholtf61145f2017-12-21 14:17:22 -0800512 /* A previous RCL may have written to one of our textures, and
513 * our full cache flush at bin time may have occurred before
514 * that RCL completed. Flush the texture cache now, but not
515 * the instructions or uniforms (since we don't write those
516 * from an RCL).
517 */
518 vc4_flush_texture_caches(dev);
519
Eric Anholtd5b1a782015-11-30 12:13:37 -0800520 submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
521}
522
Varad Gautamca26d282016-02-17 19:08:21 +0530523void
524vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec)
525{
526 struct vc4_dev *vc4 = to_vc4_dev(dev);
527 bool was_empty = list_empty(&vc4->render_job_list);
528
529 list_move_tail(&exec->head, &vc4->render_job_list);
530 if (was_empty)
531 vc4_submit_next_render_job(dev);
532}
533
Eric Anholtd5b1a782015-11-30 12:13:37 -0800534static void
535vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
536{
537 struct vc4_bo *bo;
538 unsigned i;
539
540 for (i = 0; i < exec->bo_count; i++) {
541 bo = to_vc4_bo(&exec->bo[i]->base);
542 bo->seqno = seqno;
Eric Anholtcdec4d32017-04-12 12:12:02 -0700543
Rob Herringbd7de1e2019-02-02 09:41:58 -0600544 reservation_object_add_shared_fence(bo->base.base.resv, exec->fence);
Eric Anholtd5b1a782015-11-30 12:13:37 -0800545 }
546
547 list_for_each_entry(bo, &exec->unref_list, unref_head) {
548 bo->seqno = seqno;
549 }
Eric Anholt7edabee2016-09-27 09:03:13 -0700550
551 for (i = 0; i < exec->rcl_write_bo_count; i++) {
552 bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
553 bo->write_seqno = seqno;
Eric Anholtcdec4d32017-04-12 12:12:02 -0700554
Rob Herringbd7de1e2019-02-02 09:41:58 -0600555 reservation_object_add_excl_fence(bo->base.base.resv, exec->fence);
Eric Anholt7edabee2016-09-27 09:03:13 -0700556 }
Eric Anholtd5b1a782015-11-30 12:13:37 -0800557}
558
Eric Anholtcdec4d32017-04-12 12:12:02 -0700559static void
560vc4_unlock_bo_reservations(struct drm_device *dev,
561 struct vc4_exec_info *exec,
562 struct ww_acquire_ctx *acquire_ctx)
563{
564 int i;
565
566 for (i = 0; i < exec->bo_count; i++) {
Rob Herringbd7de1e2019-02-02 09:41:58 -0600567 struct drm_gem_object *bo = &exec->bo[i]->base;
Eric Anholtcdec4d32017-04-12 12:12:02 -0700568
569 ww_mutex_unlock(&bo->resv->lock);
570 }
571
572 ww_acquire_fini(acquire_ctx);
573}
574
575/* Takes the reservation lock on all the BOs being referenced, so that
576 * at queue submit time we can update the reservations.
577 *
578 * We don't lock the RCL the tile alloc/state BOs, or overflow memory
579 * (all of which are on exec->unref_list). They're entirely private
580 * to vc4, so we don't attach dma-buf fences to them.
581 */
582static int
583vc4_lock_bo_reservations(struct drm_device *dev,
584 struct vc4_exec_info *exec,
585 struct ww_acquire_ctx *acquire_ctx)
586{
587 int contended_lock = -1;
588 int i, ret;
Rob Herringbd7de1e2019-02-02 09:41:58 -0600589 struct drm_gem_object *bo;
Eric Anholtcdec4d32017-04-12 12:12:02 -0700590
591 ww_acquire_init(acquire_ctx, &reservation_ww_class);
592
593retry:
594 if (contended_lock != -1) {
Rob Herringbd7de1e2019-02-02 09:41:58 -0600595 bo = &exec->bo[contended_lock]->base;
Eric Anholtcdec4d32017-04-12 12:12:02 -0700596 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
597 acquire_ctx);
598 if (ret) {
599 ww_acquire_done(acquire_ctx);
600 return ret;
601 }
602 }
603
604 for (i = 0; i < exec->bo_count; i++) {
605 if (i == contended_lock)
606 continue;
607
Rob Herringbd7de1e2019-02-02 09:41:58 -0600608 bo = &exec->bo[i]->base;
Eric Anholtcdec4d32017-04-12 12:12:02 -0700609
610 ret = ww_mutex_lock_interruptible(&bo->resv->lock, acquire_ctx);
611 if (ret) {
612 int j;
613
614 for (j = 0; j < i; j++) {
Rob Herringbd7de1e2019-02-02 09:41:58 -0600615 bo = &exec->bo[j]->base;
Eric Anholtcdec4d32017-04-12 12:12:02 -0700616 ww_mutex_unlock(&bo->resv->lock);
617 }
618
619 if (contended_lock != -1 && contended_lock >= i) {
Rob Herringbd7de1e2019-02-02 09:41:58 -0600620 bo = &exec->bo[contended_lock]->base;
Eric Anholtcdec4d32017-04-12 12:12:02 -0700621
622 ww_mutex_unlock(&bo->resv->lock);
623 }
624
625 if (ret == -EDEADLK) {
626 contended_lock = i;
627 goto retry;
628 }
629
630 ww_acquire_done(acquire_ctx);
631 return ret;
632 }
633 }
634
635 ww_acquire_done(acquire_ctx);
636
637 /* Reserve space for our shared (read-only) fence references,
638 * before we commit the CL to the hardware.
639 */
640 for (i = 0; i < exec->bo_count; i++) {
Rob Herringbd7de1e2019-02-02 09:41:58 -0600641 bo = &exec->bo[i]->base;
Eric Anholtcdec4d32017-04-12 12:12:02 -0700642
Christian Königca053592018-09-19 16:12:25 +0200643 ret = reservation_object_reserve_shared(bo->resv, 1);
Eric Anholtcdec4d32017-04-12 12:12:02 -0700644 if (ret) {
645 vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
646 return ret;
647 }
648 }
649
650 return 0;
651}
652
Eric Anholtd5b1a782015-11-30 12:13:37 -0800653/* Queues a struct vc4_exec_info for execution. If no job is
654 * currently executing, then submits it.
655 *
656 * Unlike most GPUs, our hardware only handles one command list at a
657 * time. To queue multiple jobs at once, we'd need to edit the
658 * previous command list to have a jump to the new one at the end, and
659 * then bump the end address. That's a change for a later date,
660 * though.
661 */
Eric Anholtcdec4d32017-04-12 12:12:02 -0700662static int
663vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
Stefan Schakee84fcb92018-04-25 00:03:46 +0200664 struct ww_acquire_ctx *acquire_ctx,
665 struct drm_syncobj *out_sync)
Eric Anholtd5b1a782015-11-30 12:13:37 -0800666{
667 struct vc4_dev *vc4 = to_vc4_dev(dev);
Boris Brezillon65101d82018-01-12 10:09:26 +0100668 struct vc4_exec_info *renderjob;
Eric Anholtd5b1a782015-11-30 12:13:37 -0800669 uint64_t seqno;
670 unsigned long irqflags;
Eric Anholtcdec4d32017-04-12 12:12:02 -0700671 struct vc4_fence *fence;
672
673 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
674 if (!fence)
675 return -ENOMEM;
676 fence->dev = dev;
Eric Anholtd5b1a782015-11-30 12:13:37 -0800677
678 spin_lock_irqsave(&vc4->job_lock, irqflags);
679
680 seqno = ++vc4->emit_seqno;
681 exec->seqno = seqno;
Eric Anholtcdec4d32017-04-12 12:12:02 -0700682
683 dma_fence_init(&fence->base, &vc4_fence_ops, &vc4->job_lock,
684 vc4->dma_fence_context, exec->seqno);
685 fence->seqno = exec->seqno;
686 exec->fence = &fence->base;
687
Stefan Schakee84fcb92018-04-25 00:03:46 +0200688 if (out_sync)
Christian König0b258ed2018-11-14 14:24:27 +0100689 drm_syncobj_replace_fence(out_sync, exec->fence);
Stefan Schakee84fcb92018-04-25 00:03:46 +0200690
Eric Anholtd5b1a782015-11-30 12:13:37 -0800691 vc4_update_bo_seqnos(exec, seqno);
692
Eric Anholtcdec4d32017-04-12 12:12:02 -0700693 vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
694
Varad Gautamca26d282016-02-17 19:08:21 +0530695 list_add_tail(&exec->head, &vc4->bin_job_list);
Eric Anholtd5b1a782015-11-30 12:13:37 -0800696
Boris Brezillon65101d82018-01-12 10:09:26 +0100697 /* If no bin job was executing and if the render job (if any) has the
698 * same perfmon as our job attached to it (or if both jobs don't have
699 * perfmon activated), then kick ours off. Otherwise, it'll get
700 * started when the previous job's flush/render done interrupt occurs.
Eric Anholtd5b1a782015-11-30 12:13:37 -0800701 */
Boris Brezillon65101d82018-01-12 10:09:26 +0100702 renderjob = vc4_first_render_job(vc4);
703 if (vc4_first_bin_job(vc4) == exec &&
704 (!renderjob || renderjob->perfmon == exec->perfmon)) {
Varad Gautamca26d282016-02-17 19:08:21 +0530705 vc4_submit_next_bin_job(dev);
Eric Anholtd5b1a782015-11-30 12:13:37 -0800706 vc4_queue_hangcheck(dev);
707 }
708
709 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
Eric Anholtcdec4d32017-04-12 12:12:02 -0700710
711 return 0;
Eric Anholtd5b1a782015-11-30 12:13:37 -0800712}
713
714/**
Eric Anholt72f793f2017-02-27 12:11:41 -0800715 * vc4_cl_lookup_bos() - Sets up exec->bo[] with the GEM objects
716 * referenced by the job.
717 * @dev: DRM device
718 * @file_priv: DRM file for this fd
719 * @exec: V3D job being set up
720 *
721 * The command validator needs to reference BOs by their index within
722 * the submitted job's BO list. This does the validation of the job's
723 * BO list and reference counting for the lifetime of the job.
Eric Anholtd5b1a782015-11-30 12:13:37 -0800724 */
725static int
726vc4_cl_lookup_bos(struct drm_device *dev,
727 struct drm_file *file_priv,
728 struct vc4_exec_info *exec)
729{
730 struct drm_vc4_submit_cl *args = exec->args;
731 uint32_t *handles;
732 int ret = 0;
733 int i;
734
735 exec->bo_count = args->bo_handle_count;
736
737 if (!exec->bo_count) {
738 /* See comment on bo_index for why we have to check
739 * this.
740 */
Eric Anholtfb959922017-07-25 09:27:32 -0700741 DRM_DEBUG("Rendering requires BOs to validate\n");
Eric Anholtd5b1a782015-11-30 12:13:37 -0800742 return -EINVAL;
743 }
744
Michal Hocko20981052017-05-17 14:23:12 +0200745 exec->bo = kvmalloc_array(exec->bo_count,
746 sizeof(struct drm_gem_cma_object *),
747 GFP_KERNEL | __GFP_ZERO);
Eric Anholtd5b1a782015-11-30 12:13:37 -0800748 if (!exec->bo) {
749 DRM_ERROR("Failed to allocate validated BO pointers\n");
750 return -ENOMEM;
751 }
752
Michal Hocko20981052017-05-17 14:23:12 +0200753 handles = kvmalloc_array(exec->bo_count, sizeof(uint32_t), GFP_KERNEL);
Eric Anholtd5b1a782015-11-30 12:13:37 -0800754 if (!handles) {
Dan Carpenterb2cdeb12016-10-13 11:54:31 +0300755 ret = -ENOMEM;
Eric Anholtd5b1a782015-11-30 12:13:37 -0800756 DRM_ERROR("Failed to allocate incoming GEM handles\n");
757 goto fail;
758 }
759
Eric Anholt95d7cbc2017-07-25 11:27:16 -0700760 if (copy_from_user(handles, u64_to_user_ptr(args->bo_handles),
Dan Carpenterb2cdeb12016-10-13 11:54:31 +0300761 exec->bo_count * sizeof(uint32_t))) {
762 ret = -EFAULT;
Eric Anholtd5b1a782015-11-30 12:13:37 -0800763 DRM_ERROR("Failed to copy in GEM handles\n");
764 goto fail;
765 }
766
767 spin_lock(&file_priv->table_lock);
768 for (i = 0; i < exec->bo_count; i++) {
769 struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
770 handles[i]);
771 if (!bo) {
Eric Anholtfb959922017-07-25 09:27:32 -0700772 DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
Eric Anholtd5b1a782015-11-30 12:13:37 -0800773 i, handles[i]);
774 ret = -EINVAL;
Boris Brezillonb9f19252017-10-19 14:57:48 +0200775 break;
Eric Anholtd5b1a782015-11-30 12:13:37 -0800776 }
Boris Brezillonb9f19252017-10-19 14:57:48 +0200777
Cihangir Akturk1d5494e2017-08-03 14:58:40 +0300778 drm_gem_object_get(bo);
Eric Anholtd5b1a782015-11-30 12:13:37 -0800779 exec->bo[i] = (struct drm_gem_cma_object *)bo;
780 }
781 spin_unlock(&file_priv->table_lock);
782
Boris Brezillonb9f19252017-10-19 14:57:48 +0200783 if (ret)
784 goto fail_put_bo;
785
786 for (i = 0; i < exec->bo_count; i++) {
787 ret = vc4_bo_inc_usecnt(to_vc4_bo(&exec->bo[i]->base));
788 if (ret)
789 goto fail_dec_usecnt;
790 }
791
792 kvfree(handles);
793 return 0;
794
795fail_dec_usecnt:
796 /* Decrease usecnt on acquired objects.
797 * We cannot rely on vc4_complete_exec() to release resources here,
798 * because vc4_complete_exec() has no information about which BO has
799 * had its ->usecnt incremented.
800 * To make things easier we just free everything explicitly and set
801 * exec->bo to NULL so that vc4_complete_exec() skips the 'BO release'
802 * step.
803 */
804 for (i-- ; i >= 0; i--)
805 vc4_bo_dec_usecnt(to_vc4_bo(&exec->bo[i]->base));
806
807fail_put_bo:
808 /* Release any reference to acquired objects. */
809 for (i = 0; i < exec->bo_count && exec->bo[i]; i++)
810 drm_gem_object_put_unlocked(&exec->bo[i]->base);
811
Eric Anholtd5b1a782015-11-30 12:13:37 -0800812fail:
Michal Hocko20981052017-05-17 14:23:12 +0200813 kvfree(handles);
Boris Brezillonb9f19252017-10-19 14:57:48 +0200814 kvfree(exec->bo);
815 exec->bo = NULL;
Eric Anholt552416c2016-07-26 13:47:15 -0700816 return ret;
Eric Anholtd5b1a782015-11-30 12:13:37 -0800817}
818
819static int
820vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
821{
822 struct drm_vc4_submit_cl *args = exec->args;
823 void *temp = NULL;
824 void *bin;
825 int ret = 0;
826 uint32_t bin_offset = 0;
827 uint32_t shader_rec_offset = roundup(bin_offset + args->bin_cl_size,
828 16);
829 uint32_t uniforms_offset = shader_rec_offset + args->shader_rec_size;
830 uint32_t exec_size = uniforms_offset + args->uniforms_size;
831 uint32_t temp_size = exec_size + (sizeof(struct vc4_shader_state) *
832 args->shader_rec_count);
833 struct vc4_bo *bo;
834
Eric Anholt0f2ff822017-01-17 21:42:53 +1100835 if (shader_rec_offset < args->bin_cl_size ||
836 uniforms_offset < shader_rec_offset ||
Eric Anholtd5b1a782015-11-30 12:13:37 -0800837 exec_size < uniforms_offset ||
838 args->shader_rec_count >= (UINT_MAX /
839 sizeof(struct vc4_shader_state)) ||
840 temp_size < exec_size) {
Eric Anholtfb959922017-07-25 09:27:32 -0700841 DRM_DEBUG("overflow in exec arguments\n");
Eric Anholt6b8ac632017-01-17 21:58:06 +1100842 ret = -EINVAL;
Eric Anholtd5b1a782015-11-30 12:13:37 -0800843 goto fail;
844 }
845
846 /* Allocate space where we'll store the copied in user command lists
847 * and shader records.
848 *
849 * We don't just copy directly into the BOs because we need to
850 * read the contents back for validation, and I think the
851 * bo->vaddr is uncached access.
852 */
Michal Hocko20981052017-05-17 14:23:12 +0200853 temp = kvmalloc_array(temp_size, 1, GFP_KERNEL);
Eric Anholtd5b1a782015-11-30 12:13:37 -0800854 if (!temp) {
855 DRM_ERROR("Failed to allocate storage for copying "
856 "in bin/render CLs.\n");
857 ret = -ENOMEM;
858 goto fail;
859 }
860 bin = temp + bin_offset;
861 exec->shader_rec_u = temp + shader_rec_offset;
862 exec->uniforms_u = temp + uniforms_offset;
863 exec->shader_state = temp + exec_size;
864 exec->shader_state_size = args->shader_rec_count;
865
Dan Carpenter65c47772015-12-17 15:36:28 +0300866 if (copy_from_user(bin,
Eric Anholt95d7cbc2017-07-25 11:27:16 -0700867 u64_to_user_ptr(args->bin_cl),
Dan Carpenter65c47772015-12-17 15:36:28 +0300868 args->bin_cl_size)) {
869 ret = -EFAULT;
Eric Anholtd5b1a782015-11-30 12:13:37 -0800870 goto fail;
871 }
872
Dan Carpenter65c47772015-12-17 15:36:28 +0300873 if (copy_from_user(exec->shader_rec_u,
Eric Anholt95d7cbc2017-07-25 11:27:16 -0700874 u64_to_user_ptr(args->shader_rec),
Dan Carpenter65c47772015-12-17 15:36:28 +0300875 args->shader_rec_size)) {
876 ret = -EFAULT;
Eric Anholtd5b1a782015-11-30 12:13:37 -0800877 goto fail;
878 }
879
Dan Carpenter65c47772015-12-17 15:36:28 +0300880 if (copy_from_user(exec->uniforms_u,
Eric Anholt95d7cbc2017-07-25 11:27:16 -0700881 u64_to_user_ptr(args->uniforms),
Dan Carpenter65c47772015-12-17 15:36:28 +0300882 args->uniforms_size)) {
883 ret = -EFAULT;
Eric Anholtd5b1a782015-11-30 12:13:37 -0800884 goto fail;
885 }
886
Eric Anholtf3099462017-07-25 11:27:17 -0700887 bo = vc4_bo_create(dev, exec_size, true, VC4_BO_TYPE_BCL);
Eric Anholt2c68f1f2016-01-25 14:13:12 -0800888 if (IS_ERR(bo)) {
Eric Anholtd5b1a782015-11-30 12:13:37 -0800889 DRM_ERROR("Couldn't allocate BO for binning\n");
Eric Anholt2c68f1f2016-01-25 14:13:12 -0800890 ret = PTR_ERR(bo);
Eric Anholtd5b1a782015-11-30 12:13:37 -0800891 goto fail;
892 }
893 exec->exec_bo = &bo->base;
894
895 list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head,
896 &exec->unref_list);
897
898 exec->ct0ca = exec->exec_bo->paddr + bin_offset;
899
900 exec->bin_u = bin;
901
902 exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset;
903 exec->shader_rec_p = exec->exec_bo->paddr + shader_rec_offset;
904 exec->shader_rec_size = args->shader_rec_size;
905
906 exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset;
907 exec->uniforms_p = exec->exec_bo->paddr + uniforms_offset;
908 exec->uniforms_size = args->uniforms_size;
909
910 ret = vc4_validate_bin_cl(dev,
911 exec->exec_bo->vaddr + bin_offset,
912 bin,
913 exec);
914 if (ret)
915 goto fail;
916
917 ret = vc4_validate_shader_recs(dev, exec);
Eric Anholt7edabee2016-09-27 09:03:13 -0700918 if (ret)
919 goto fail;
920
921 /* Block waiting on any previous rendering into the CS's VBO,
922 * IB, or textures, so that pixels are actually written by the
923 * time we try to read them.
924 */
925 ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true);
Eric Anholtd5b1a782015-11-30 12:13:37 -0800926
927fail:
Michal Hocko20981052017-05-17 14:23:12 +0200928 kvfree(temp);
Eric Anholtd5b1a782015-11-30 12:13:37 -0800929 return ret;
930}
931
932static void
933vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
934{
Eric Anholt001bdb52016-02-05 17:41:49 -0800935 struct vc4_dev *vc4 = to_vc4_dev(dev);
Eric Anholt553c9422017-03-27 16:10:25 -0700936 unsigned long irqflags;
Eric Anholtd5b1a782015-11-30 12:13:37 -0800937 unsigned i;
938
Eric Anholtcdec4d32017-04-12 12:12:02 -0700939 /* If we got force-completed because of GPU reset rather than
940 * through our IRQ handler, signal the fence now.
941 */
Stefan Schakebabc8112017-12-02 18:40:39 +0100942 if (exec->fence) {
Eric Anholtcdec4d32017-04-12 12:12:02 -0700943 dma_fence_signal(exec->fence);
Stefan Schakebabc8112017-12-02 18:40:39 +0100944 dma_fence_put(exec->fence);
945 }
Eric Anholtcdec4d32017-04-12 12:12:02 -0700946
Eric Anholtd5b1a782015-11-30 12:13:37 -0800947 if (exec->bo) {
Boris Brezillonb9f19252017-10-19 14:57:48 +0200948 for (i = 0; i < exec->bo_count; i++) {
949 struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base);
950
951 vc4_bo_dec_usecnt(bo);
Cihangir Akturk1d5494e2017-08-03 14:58:40 +0300952 drm_gem_object_put_unlocked(&exec->bo[i]->base);
Boris Brezillonb9f19252017-10-19 14:57:48 +0200953 }
Michal Hocko20981052017-05-17 14:23:12 +0200954 kvfree(exec->bo);
Eric Anholtd5b1a782015-11-30 12:13:37 -0800955 }
956
957 while (!list_empty(&exec->unref_list)) {
958 struct vc4_bo *bo = list_first_entry(&exec->unref_list,
959 struct vc4_bo, unref_head);
960 list_del(&bo->unref_head);
Cihangir Akturk1d5494e2017-08-03 14:58:40 +0300961 drm_gem_object_put_unlocked(&bo->base.base);
Eric Anholtd5b1a782015-11-30 12:13:37 -0800962 }
Eric Anholtd5b1a782015-11-30 12:13:37 -0800963
Eric Anholt553c9422017-03-27 16:10:25 -0700964 /* Free up the allocation of any bin slots we used. */
965 spin_lock_irqsave(&vc4->job_lock, irqflags);
966 vc4->bin_alloc_used &= ~exec->bin_slots;
967 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
968
Boris Brezillon65101d82018-01-12 10:09:26 +0100969 /* Release the reference we had on the perf monitor. */
970 vc4_perfmon_put(exec->perfmon);
971
Eric Anholtcb74f6e2019-02-20 13:03:42 -0800972 vc4_v3d_pm_put(vc4);
Eric Anholt001bdb52016-02-05 17:41:49 -0800973
Eric Anholtd5b1a782015-11-30 12:13:37 -0800974 kfree(exec);
975}
976
977void
978vc4_job_handle_completed(struct vc4_dev *vc4)
979{
980 unsigned long irqflags;
Eric Anholtb501bac2015-11-30 12:34:01 -0800981 struct vc4_seqno_cb *cb, *cb_temp;
Eric Anholtd5b1a782015-11-30 12:13:37 -0800982
983 spin_lock_irqsave(&vc4->job_lock, irqflags);
984 while (!list_empty(&vc4->job_done_list)) {
985 struct vc4_exec_info *exec =
986 list_first_entry(&vc4->job_done_list,
987 struct vc4_exec_info, head);
988 list_del(&exec->head);
989
990 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
991 vc4_complete_exec(vc4->dev, exec);
992 spin_lock_irqsave(&vc4->job_lock, irqflags);
993 }
Eric Anholtb501bac2015-11-30 12:34:01 -0800994
995 list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) {
996 if (cb->seqno <= vc4->finished_seqno) {
997 list_del_init(&cb->work.entry);
998 schedule_work(&cb->work);
999 }
1000 }
1001
Eric Anholtd5b1a782015-11-30 12:13:37 -08001002 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1003}
1004
Eric Anholtb501bac2015-11-30 12:34:01 -08001005static void vc4_seqno_cb_work(struct work_struct *work)
1006{
1007 struct vc4_seqno_cb *cb = container_of(work, struct vc4_seqno_cb, work);
1008
1009 cb->func(cb);
1010}
1011
1012int vc4_queue_seqno_cb(struct drm_device *dev,
1013 struct vc4_seqno_cb *cb, uint64_t seqno,
1014 void (*func)(struct vc4_seqno_cb *cb))
1015{
1016 struct vc4_dev *vc4 = to_vc4_dev(dev);
1017 int ret = 0;
1018 unsigned long irqflags;
1019
1020 cb->func = func;
1021 INIT_WORK(&cb->work, vc4_seqno_cb_work);
1022
1023 spin_lock_irqsave(&vc4->job_lock, irqflags);
1024 if (seqno > vc4->finished_seqno) {
1025 cb->seqno = seqno;
1026 list_add_tail(&cb->work.entry, &vc4->seqno_cb_list);
1027 } else {
1028 schedule_work(&cb->work);
1029 }
1030 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1031
1032 return ret;
1033}
1034
Eric Anholtd5b1a782015-11-30 12:13:37 -08001035/* Scheduled when any job has been completed, this walks the list of
1036 * jobs that had completed and unrefs their BOs and frees their exec
1037 * structs.
1038 */
1039static void
1040vc4_job_done_work(struct work_struct *work)
1041{
1042 struct vc4_dev *vc4 =
1043 container_of(work, struct vc4_dev, job_done_work);
1044
1045 vc4_job_handle_completed(vc4);
1046}
1047
1048static int
1049vc4_wait_for_seqno_ioctl_helper(struct drm_device *dev,
1050 uint64_t seqno,
1051 uint64_t *timeout_ns)
1052{
1053 unsigned long start = jiffies;
1054 int ret = vc4_wait_for_seqno(dev, seqno, *timeout_ns, true);
1055
1056 if ((ret == -EINTR || ret == -ERESTARTSYS) && *timeout_ns != ~0ull) {
1057 uint64_t delta = jiffies_to_nsecs(jiffies - start);
1058
1059 if (*timeout_ns >= delta)
1060 *timeout_ns -= delta;
1061 }
1062
1063 return ret;
1064}
1065
1066int
1067vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
1068 struct drm_file *file_priv)
1069{
1070 struct drm_vc4_wait_seqno *args = data;
1071
1072 return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno,
1073 &args->timeout_ns);
1074}
1075
1076int
1077vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
1078 struct drm_file *file_priv)
1079{
1080 int ret;
1081 struct drm_vc4_wait_bo *args = data;
1082 struct drm_gem_object *gem_obj;
1083 struct vc4_bo *bo;
1084
Eric Anholte0015232016-01-25 13:05:00 -08001085 if (args->pad != 0)
1086 return -EINVAL;
1087
Chris Wilsona8ad0bd2016-05-09 11:04:54 +01001088 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
Eric Anholtd5b1a782015-11-30 12:13:37 -08001089 if (!gem_obj) {
Eric Anholtfb959922017-07-25 09:27:32 -07001090 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
Eric Anholtd5b1a782015-11-30 12:13:37 -08001091 return -EINVAL;
1092 }
1093 bo = to_vc4_bo(gem_obj);
1094
1095 ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno,
1096 &args->timeout_ns);
1097
Cihangir Akturk1d5494e2017-08-03 14:58:40 +03001098 drm_gem_object_put_unlocked(gem_obj);
Eric Anholtd5b1a782015-11-30 12:13:37 -08001099 return ret;
1100}
1101
1102/**
Eric Anholt72f793f2017-02-27 12:11:41 -08001103 * vc4_submit_cl_ioctl() - Submits a job (frame) to the VC4.
1104 * @dev: DRM device
1105 * @data: ioctl argument
1106 * @file_priv: DRM file for this fd
Eric Anholtd5b1a782015-11-30 12:13:37 -08001107 *
Eric Anholt72f793f2017-02-27 12:11:41 -08001108 * This is the main entrypoint for userspace to submit a 3D frame to
1109 * the GPU. Userspace provides the binner command list (if
1110 * applicable), and the kernel sets up the render command list to draw
1111 * to the framebuffer described in the ioctl, using the command lists
1112 * that the 3D engine's binner will produce.
Eric Anholtd5b1a782015-11-30 12:13:37 -08001113 */
1114int
1115vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
1116 struct drm_file *file_priv)
1117{
1118 struct vc4_dev *vc4 = to_vc4_dev(dev);
Boris Brezillon65101d82018-01-12 10:09:26 +01001119 struct vc4_file *vc4file = file_priv->driver_priv;
Eric Anholtd5b1a782015-11-30 12:13:37 -08001120 struct drm_vc4_submit_cl *args = data;
Stefan Schakee84fcb92018-04-25 00:03:46 +02001121 struct drm_syncobj *out_sync = NULL;
Eric Anholtd5b1a782015-11-30 12:13:37 -08001122 struct vc4_exec_info *exec;
Eric Anholtcdec4d32017-04-12 12:12:02 -07001123 struct ww_acquire_ctx acquire_ctx;
Stefan Schake818f5c82018-04-25 00:03:45 +02001124 struct dma_fence *in_fence;
Eric Anholt36cb6252016-02-08 12:59:02 -08001125 int ret = 0;
Eric Anholtd5b1a782015-11-30 12:13:37 -08001126
Eric Anholtffc26742019-04-01 11:35:59 -07001127 if (!vc4->v3d) {
1128 DRM_DEBUG("VC4_SUBMIT_CL with no VC4 V3D probed\n");
1129 return -ENODEV;
1130 }
1131
Eric Anholt3be8edd2017-07-25 09:27:33 -07001132 if ((args->flags & ~(VC4_SUBMIT_CL_USE_CLEAR_COLOR |
1133 VC4_SUBMIT_CL_FIXED_RCL_ORDER |
1134 VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X |
1135 VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y)) != 0) {
Eric Anholtfb959922017-07-25 09:27:32 -07001136 DRM_DEBUG("Unknown flags: 0x%02x\n", args->flags);
Eric Anholtd5b1a782015-11-30 12:13:37 -08001137 return -EINVAL;
1138 }
1139
Eric Anholt4c70ac72018-04-30 16:59:27 -07001140 if (args->pad2 != 0) {
1141 DRM_DEBUG("Invalid pad: 0x%08x\n", args->pad2);
1142 return -EINVAL;
1143 }
1144
Eric Anholtd5b1a782015-11-30 12:13:37 -08001145 exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
1146 if (!exec) {
1147 DRM_ERROR("malloc failure on exec struct\n");
1148 return -ENOMEM;
1149 }
1150
Eric Anholtcb74f6e2019-02-20 13:03:42 -08001151 ret = vc4_v3d_pm_get(vc4);
1152 if (ret) {
1153 kfree(exec);
1154 return ret;
Eric Anholt001bdb52016-02-05 17:41:49 -08001155 }
1156
Eric Anholtd5b1a782015-11-30 12:13:37 -08001157 exec->args = args;
1158 INIT_LIST_HEAD(&exec->unref_list);
1159
1160 ret = vc4_cl_lookup_bos(dev, file_priv, exec);
1161 if (ret)
1162 goto fail;
1163
Boris Brezillon65101d82018-01-12 10:09:26 +01001164 if (args->perfmonid) {
1165 exec->perfmon = vc4_perfmon_find(vc4file,
1166 args->perfmonid);
1167 if (!exec->perfmon) {
1168 ret = -ENOENT;
1169 goto fail;
1170 }
1171 }
1172
Stefan Schake818f5c82018-04-25 00:03:45 +02001173 if (args->in_sync) {
1174 ret = drm_syncobj_find_fence(file_priv, args->in_sync,
Chunming Zhou649fdce2018-10-15 16:55:47 +08001175 0, 0, &in_fence);
Stefan Schake818f5c82018-04-25 00:03:45 +02001176 if (ret)
1177 goto fail;
1178
1179 /* When the fence (or fence array) is exclusively from our
1180 * context we can skip the wait since jobs are executed in
1181 * order of their submission through this ioctl and this can
1182 * only have fences from a prior job.
1183 */
1184 if (!dma_fence_match_context(in_fence,
1185 vc4->dma_fence_context)) {
1186 ret = dma_fence_wait(in_fence, true);
1187 if (ret) {
1188 dma_fence_put(in_fence);
1189 goto fail;
1190 }
1191 }
1192
1193 dma_fence_put(in_fence);
1194 }
1195
Eric Anholtd5b1a782015-11-30 12:13:37 -08001196 if (exec->args->bin_cl_size != 0) {
1197 ret = vc4_get_bcl(dev, exec);
1198 if (ret)
1199 goto fail;
1200 } else {
1201 exec->ct0ca = 0;
1202 exec->ct0ea = 0;
1203 }
1204
1205 ret = vc4_get_rcl(dev, exec);
1206 if (ret)
1207 goto fail;
1208
Eric Anholtcdec4d32017-04-12 12:12:02 -07001209 ret = vc4_lock_bo_reservations(dev, exec, &acquire_ctx);
1210 if (ret)
1211 goto fail;
1212
Stefan Schakee84fcb92018-04-25 00:03:46 +02001213 if (args->out_sync) {
1214 out_sync = drm_syncobj_find(file_priv, args->out_sync);
1215 if (!out_sync) {
1216 ret = -EINVAL;
1217 goto fail;
1218 }
1219
1220 /* We replace the fence in out_sync in vc4_queue_submit since
1221 * the render job could execute immediately after that call.
1222 * If it finishes before our ioctl processing resumes the
1223 * render job fence could already have been freed.
1224 */
1225 }
1226
Eric Anholtd5b1a782015-11-30 12:13:37 -08001227 /* Clear this out of the struct we'll be putting in the queue,
1228 * since it's part of our stack.
1229 */
1230 exec->args = NULL;
1231
Stefan Schakee84fcb92018-04-25 00:03:46 +02001232 ret = vc4_queue_submit(dev, exec, &acquire_ctx, out_sync);
1233
1234 /* The syncobj isn't part of the exec data and we need to free our
1235 * reference even if job submission failed.
1236 */
1237 if (out_sync)
1238 drm_syncobj_put(out_sync);
1239
Eric Anholtcdec4d32017-04-12 12:12:02 -07001240 if (ret)
1241 goto fail;
Eric Anholtd5b1a782015-11-30 12:13:37 -08001242
1243 /* Return the seqno for our job. */
1244 args->seqno = vc4->emit_seqno;
1245
1246 return 0;
1247
1248fail:
1249 vc4_complete_exec(vc4->dev, exec);
1250
1251 return ret;
1252}
1253
1254void
1255vc4_gem_init(struct drm_device *dev)
1256{
1257 struct vc4_dev *vc4 = to_vc4_dev(dev);
1258
Eric Anholtcdec4d32017-04-12 12:12:02 -07001259 vc4->dma_fence_context = dma_fence_context_alloc(1);
1260
Varad Gautamca26d282016-02-17 19:08:21 +05301261 INIT_LIST_HEAD(&vc4->bin_job_list);
1262 INIT_LIST_HEAD(&vc4->render_job_list);
Eric Anholtd5b1a782015-11-30 12:13:37 -08001263 INIT_LIST_HEAD(&vc4->job_done_list);
Eric Anholtb501bac2015-11-30 12:34:01 -08001264 INIT_LIST_HEAD(&vc4->seqno_cb_list);
Eric Anholtd5b1a782015-11-30 12:13:37 -08001265 spin_lock_init(&vc4->job_lock);
1266
1267 INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
Kees Cook00787302017-10-24 08:16:48 -07001268 timer_setup(&vc4->hangcheck.timer, vc4_hangcheck_elapsed, 0);
Eric Anholtd5b1a782015-11-30 12:13:37 -08001269
1270 INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
Eric Anholt36cb6252016-02-08 12:59:02 -08001271
1272 mutex_init(&vc4->power_lock);
Boris Brezillonb9f19252017-10-19 14:57:48 +02001273
1274 INIT_LIST_HEAD(&vc4->purgeable.list);
1275 mutex_init(&vc4->purgeable.lock);
Eric Anholtd5b1a782015-11-30 12:13:37 -08001276}
1277
1278void
1279vc4_gem_destroy(struct drm_device *dev)
1280{
1281 struct vc4_dev *vc4 = to_vc4_dev(dev);
1282
1283 /* Waiting for exec to finish would need to be done before
1284 * unregistering V3D.
1285 */
1286 WARN_ON(vc4->emit_seqno != vc4->finished_seqno);
1287
1288 /* V3D should already have disabled its interrupt and cleared
1289 * the overflow allocation registers. Now free the object.
1290 */
Eric Anholt553c9422017-03-27 16:10:25 -07001291 if (vc4->bin_bo) {
1292 drm_gem_object_put_unlocked(&vc4->bin_bo->base.base);
1293 vc4->bin_bo = NULL;
Eric Anholtd5b1a782015-11-30 12:13:37 -08001294 }
1295
Eric Anholt21461362015-10-30 10:09:02 -07001296 if (vc4->hang_state)
1297 vc4_free_hang_state(dev, vc4->hang_state);
Eric Anholtd5b1a782015-11-30 12:13:37 -08001298}
Boris Brezillonb9f19252017-10-19 14:57:48 +02001299
1300int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
1301 struct drm_file *file_priv)
1302{
1303 struct drm_vc4_gem_madvise *args = data;
1304 struct drm_gem_object *gem_obj;
1305 struct vc4_bo *bo;
1306 int ret;
1307
1308 switch (args->madv) {
1309 case VC4_MADV_DONTNEED:
1310 case VC4_MADV_WILLNEED:
1311 break;
1312 default:
1313 return -EINVAL;
1314 }
1315
1316 if (args->pad != 0)
1317 return -EINVAL;
1318
1319 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1320 if (!gem_obj) {
1321 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
1322 return -ENOENT;
1323 }
1324
1325 bo = to_vc4_bo(gem_obj);
1326
1327 /* Only BOs exposed to userspace can be purged. */
1328 if (bo->madv == __VC4_MADV_NOTSUPP) {
1329 DRM_DEBUG("madvise not supported on this BO\n");
1330 ret = -EINVAL;
1331 goto out_put_gem;
1332 }
1333
1334 /* Not sure it's safe to purge imported BOs. Let's just assume it's
1335 * not until proven otherwise.
1336 */
1337 if (gem_obj->import_attach) {
1338 DRM_DEBUG("madvise not supported on imported BOs\n");
1339 ret = -EINVAL;
1340 goto out_put_gem;
1341 }
1342
1343 mutex_lock(&bo->madv_lock);
1344
1345 if (args->madv == VC4_MADV_DONTNEED && bo->madv == VC4_MADV_WILLNEED &&
1346 !refcount_read(&bo->usecnt)) {
1347 /* If the BO is about to be marked as purgeable, is not used
1348 * and is not already purgeable or purged, add it to the
1349 * purgeable list.
1350 */
1351 vc4_bo_add_to_purgeable_pool(bo);
1352 } else if (args->madv == VC4_MADV_WILLNEED &&
1353 bo->madv == VC4_MADV_DONTNEED &&
1354 !refcount_read(&bo->usecnt)) {
1355 /* The BO has not been purged yet, just remove it from
1356 * the purgeable list.
1357 */
1358 vc4_bo_remove_from_purgeable_pool(bo);
1359 }
1360
1361 /* Save the purged state. */
1362 args->retained = bo->madv != __VC4_MADV_PURGED;
1363
1364 /* Update internal madv state only if the bo was not purged. */
1365 if (bo->madv != __VC4_MADV_PURGED)
1366 bo->madv = args->madv;
1367
1368 mutex_unlock(&bo->madv_lock);
1369
1370 ret = 0;
1371
1372out_put_gem:
1373 drm_gem_object_put_unlocked(gem_obj);
1374
1375 return ret;
1376}