blob: f160ec40a39b5836d176c4640ed497b58401c9f6 [file] [log] [blame]
Jordan Crousef7de1542017-10-20 11:06:55 -06001/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/kref.h>
15#include "msm_gpu.h"
16
17void msm_submitqueue_destroy(struct kref *kref)
18{
19 struct msm_gpu_submitqueue *queue = container_of(kref,
20 struct msm_gpu_submitqueue, ref);
21
22 kfree(queue);
23}
24
25struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
26 u32 id)
27{
28 struct msm_gpu_submitqueue *entry;
29
30 if (!ctx)
31 return NULL;
32
33 read_lock(&ctx->queuelock);
34
35 list_for_each_entry(entry, &ctx->submitqueues, node) {
36 if (entry->id == id) {
37 kref_get(&entry->ref);
38 read_unlock(&ctx->queuelock);
39
40 return entry;
41 }
42 }
43
44 read_unlock(&ctx->queuelock);
45 return NULL;
46}
47
48void msm_submitqueue_close(struct msm_file_private *ctx)
49{
50 struct msm_gpu_submitqueue *entry, *tmp;
51
52 if (!ctx)
53 return;
54
55 /*
56 * No lock needed in close and there won't
57 * be any more user ioctls coming our way
58 */
59 list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node)
60 msm_submitqueue_put(entry);
61}
62
Jordan Crousef97deca2017-10-20 11:06:57 -060063int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
64 u32 prio, u32 flags, u32 *id)
Jordan Crousef7de1542017-10-20 11:06:55 -060065{
Jordan Crousef97deca2017-10-20 11:06:57 -060066 struct msm_drm_private *priv = drm->dev_private;
Jordan Crousef7de1542017-10-20 11:06:55 -060067 struct msm_gpu_submitqueue *queue;
68
69 if (!ctx)
70 return -ENODEV;
71
72 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
73
74 if (!queue)
75 return -ENOMEM;
76
77 kref_init(&queue->ref);
78 queue->flags = flags;
Jordan Crousef97deca2017-10-20 11:06:57 -060079
80 if (priv->gpu) {
81 if (prio >= priv->gpu->nr_rings)
82 return -EINVAL;
83
84 queue->prio = prio;
85 }
Jordan Crousef7de1542017-10-20 11:06:55 -060086
87 write_lock(&ctx->queuelock);
88
89 queue->id = ctx->queueid++;
90
91 if (id)
92 *id = queue->id;
93
94 list_add_tail(&queue->node, &ctx->submitqueues);
95
96 write_unlock(&ctx->queuelock);
97
98 return 0;
99}
100
Jordan Crousef97deca2017-10-20 11:06:57 -0600101int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx)
Jordan Crousef7de1542017-10-20 11:06:55 -0600102{
Jordan Crousef97deca2017-10-20 11:06:57 -0600103 struct msm_drm_private *priv = drm->dev_private;
104 int default_prio;
105
Jordan Crousef7de1542017-10-20 11:06:55 -0600106 if (!ctx)
107 return 0;
108
Jordan Crousef97deca2017-10-20 11:06:57 -0600109 /*
110 * Select priority 2 as the "default priority" unless nr_rings is less
111 * than 2 and then pick the lowest pirority
112 */
113 default_prio = priv->gpu ?
114 clamp_t(uint32_t, 2, 0, priv->gpu->nr_rings - 1) : 0;
115
Jordan Crousef7de1542017-10-20 11:06:55 -0600116 INIT_LIST_HEAD(&ctx->submitqueues);
117
118 rwlock_init(&ctx->queuelock);
119
Jordan Crousef97deca2017-10-20 11:06:57 -0600120 return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL);
Jordan Crousef7de1542017-10-20 11:06:55 -0600121}
122
Jordan Crouseb0fb6602019-03-22 14:21:22 -0600123static int msm_submitqueue_query_faults(struct msm_gpu_submitqueue *queue,
124 struct drm_msm_submitqueue_query *args)
125{
126 size_t size = min_t(size_t, args->len, sizeof(queue->faults));
127 int ret;
128
129 /* If a zero length was passed in, return the data size we expect */
130 if (!args->len) {
131 args->len = sizeof(queue->faults);
132 return 0;
133 }
134
135 /* Set the length to the actual size of the data */
136 args->len = size;
137
138 ret = copy_to_user(u64_to_user_ptr(args->data), &queue->faults, size);
139
140 return ret ? -EFAULT : 0;
141}
142
143int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
144 struct drm_msm_submitqueue_query *args)
145{
146 struct msm_gpu_submitqueue *queue;
147 int ret = -EINVAL;
148
149 if (args->pad)
150 return -EINVAL;
151
152 queue = msm_submitqueue_get(ctx, args->id);
153 if (!queue)
154 return -ENOENT;
155
156 if (args->param == MSM_SUBMITQUEUE_PARAM_FAULTS)
157 ret = msm_submitqueue_query_faults(queue, args);
158
159 msm_submitqueue_put(queue);
160
161 return ret;
162}
163
Jordan Crousef7de1542017-10-20 11:06:55 -0600164int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id)
165{
166 struct msm_gpu_submitqueue *entry;
167
168 if (!ctx)
169 return 0;
170
171 /*
172 * id 0 is the "default" queue and can't be destroyed
173 * by the user
174 */
175 if (!id)
176 return -ENOENT;
177
178 write_lock(&ctx->queuelock);
179
180 list_for_each_entry(entry, &ctx->submitqueues, node) {
181 if (entry->id == id) {
182 list_del(&entry->node);
183 write_unlock(&ctx->queuelock);
184
185 msm_submitqueue_put(entry);
186 return 0;
187 }
188 }
189
190 write_unlock(&ctx->queuelock);
191 return -ENOENT;
192}
193