blob: ada45fdd0eaeead886cd7cac4068ad5f97c53add [file] [log] [blame]
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001/*
2 * Copyright (C) 2015 Etnaviv Project
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/component.h>
Chris Wilsonf54d1862016-10-25 13:00:45 +010018#include <linux/dma-fence.h>
The etnaviv authorsa8c21a52015-12-03 18:21:29 +010019#include <linux/moduleparam.h>
20#include <linux/of_device.h>
Russell Kingbcdfb5e2017-03-12 19:00:59 +000021#include <linux/thermal.h>
Lucas Stachea1f5722017-01-16 16:09:51 +010022
23#include "etnaviv_cmdbuf.h"
The etnaviv authorsa8c21a52015-12-03 18:21:29 +010024#include "etnaviv_dump.h"
25#include "etnaviv_gpu.h"
26#include "etnaviv_gem.h"
27#include "etnaviv_mmu.h"
The etnaviv authorsa8c21a52015-12-03 18:21:29 +010028#include "common.xml.h"
29#include "state.xml.h"
30#include "state_hi.xml.h"
31#include "cmdstream.xml.h"
32
33static const struct platform_device_id gpu_ids[] = {
34 { .name = "etnaviv-gpu,2d" },
35 { },
36};
37
38static bool etnaviv_dump_core = true;
39module_param_named(dump_core, etnaviv_dump_core, bool, 0600);
40
41/*
42 * Driver functions:
43 */
44
45int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
46{
47 switch (param) {
48 case ETNAVIV_PARAM_GPU_MODEL:
49 *value = gpu->identity.model;
50 break;
51
52 case ETNAVIV_PARAM_GPU_REVISION:
53 *value = gpu->identity.revision;
54 break;
55
56 case ETNAVIV_PARAM_GPU_FEATURES_0:
57 *value = gpu->identity.features;
58 break;
59
60 case ETNAVIV_PARAM_GPU_FEATURES_1:
61 *value = gpu->identity.minor_features0;
62 break;
63
64 case ETNAVIV_PARAM_GPU_FEATURES_2:
65 *value = gpu->identity.minor_features1;
66 break;
67
68 case ETNAVIV_PARAM_GPU_FEATURES_3:
69 *value = gpu->identity.minor_features2;
70 break;
71
72 case ETNAVIV_PARAM_GPU_FEATURES_4:
73 *value = gpu->identity.minor_features3;
74 break;
75
Russell King602eb482016-01-24 17:36:04 +000076 case ETNAVIV_PARAM_GPU_FEATURES_5:
77 *value = gpu->identity.minor_features4;
78 break;
79
80 case ETNAVIV_PARAM_GPU_FEATURES_6:
81 *value = gpu->identity.minor_features5;
82 break;
83
The etnaviv authorsa8c21a52015-12-03 18:21:29 +010084 case ETNAVIV_PARAM_GPU_STREAM_COUNT:
85 *value = gpu->identity.stream_count;
86 break;
87
88 case ETNAVIV_PARAM_GPU_REGISTER_MAX:
89 *value = gpu->identity.register_max;
90 break;
91
92 case ETNAVIV_PARAM_GPU_THREAD_COUNT:
93 *value = gpu->identity.thread_count;
94 break;
95
96 case ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE:
97 *value = gpu->identity.vertex_cache_size;
98 break;
99
100 case ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT:
101 *value = gpu->identity.shader_core_count;
102 break;
103
104 case ETNAVIV_PARAM_GPU_PIXEL_PIPES:
105 *value = gpu->identity.pixel_pipes;
106 break;
107
108 case ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE:
109 *value = gpu->identity.vertex_output_buffer_size;
110 break;
111
112 case ETNAVIV_PARAM_GPU_BUFFER_SIZE:
113 *value = gpu->identity.buffer_size;
114 break;
115
116 case ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT:
117 *value = gpu->identity.instruction_count;
118 break;
119
120 case ETNAVIV_PARAM_GPU_NUM_CONSTANTS:
121 *value = gpu->identity.num_constants;
122 break;
123
Russell King602eb482016-01-24 17:36:04 +0000124 case ETNAVIV_PARAM_GPU_NUM_VARYINGS:
125 *value = gpu->identity.varyings_count;
126 break;
127
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100128 default:
129 DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
130 return -EINVAL;
131 }
132
133 return 0;
134}
135
Russell King472f79d2016-01-24 17:35:59 +0000136
137#define etnaviv_is_model_rev(gpu, mod, rev) \
138 ((gpu)->identity.model == chipModel_##mod && \
139 (gpu)->identity.revision == rev)
Russell King52f36ba2016-01-24 17:35:54 +0000140#define etnaviv_field(val, field) \
141 (((val) & field##__MASK) >> field##__SHIFT)
142
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100143static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
144{
145 if (gpu->identity.minor_features0 &
146 chipMinorFeatures0_MORE_MINOR_FEATURES) {
Russell King602eb482016-01-24 17:36:04 +0000147 u32 specs[4];
148 unsigned int streams;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100149
150 specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS);
151 specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2);
Russell King602eb482016-01-24 17:36:04 +0000152 specs[2] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_3);
153 specs[3] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_4);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100154
Russell King52f36ba2016-01-24 17:35:54 +0000155 gpu->identity.stream_count = etnaviv_field(specs[0],
156 VIVS_HI_CHIP_SPECS_STREAM_COUNT);
157 gpu->identity.register_max = etnaviv_field(specs[0],
158 VIVS_HI_CHIP_SPECS_REGISTER_MAX);
159 gpu->identity.thread_count = etnaviv_field(specs[0],
160 VIVS_HI_CHIP_SPECS_THREAD_COUNT);
161 gpu->identity.vertex_cache_size = etnaviv_field(specs[0],
162 VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE);
163 gpu->identity.shader_core_count = etnaviv_field(specs[0],
164 VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT);
165 gpu->identity.pixel_pipes = etnaviv_field(specs[0],
166 VIVS_HI_CHIP_SPECS_PIXEL_PIPES);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100167 gpu->identity.vertex_output_buffer_size =
Russell King52f36ba2016-01-24 17:35:54 +0000168 etnaviv_field(specs[0],
169 VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100170
Russell King52f36ba2016-01-24 17:35:54 +0000171 gpu->identity.buffer_size = etnaviv_field(specs[1],
172 VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE);
173 gpu->identity.instruction_count = etnaviv_field(specs[1],
174 VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT);
175 gpu->identity.num_constants = etnaviv_field(specs[1],
176 VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS);
Russell King602eb482016-01-24 17:36:04 +0000177
178 gpu->identity.varyings_count = etnaviv_field(specs[2],
179 VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT);
180
181 /* This overrides the value from older register if non-zero */
182 streams = etnaviv_field(specs[3],
183 VIVS_HI_CHIP_SPECS_4_STREAM_COUNT);
184 if (streams)
185 gpu->identity.stream_count = streams;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100186 }
187
188 /* Fill in the stream count if not specified */
189 if (gpu->identity.stream_count == 0) {
190 if (gpu->identity.model >= 0x1000)
191 gpu->identity.stream_count = 4;
192 else
193 gpu->identity.stream_count = 1;
194 }
195
196 /* Convert the register max value */
197 if (gpu->identity.register_max)
198 gpu->identity.register_max = 1 << gpu->identity.register_max;
Russell King507f8992016-01-24 17:35:48 +0000199 else if (gpu->identity.model == chipModel_GC400)
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100200 gpu->identity.register_max = 32;
201 else
202 gpu->identity.register_max = 64;
203
204 /* Convert thread count */
205 if (gpu->identity.thread_count)
206 gpu->identity.thread_count = 1 << gpu->identity.thread_count;
Russell King507f8992016-01-24 17:35:48 +0000207 else if (gpu->identity.model == chipModel_GC400)
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100208 gpu->identity.thread_count = 64;
Russell King507f8992016-01-24 17:35:48 +0000209 else if (gpu->identity.model == chipModel_GC500 ||
210 gpu->identity.model == chipModel_GC530)
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100211 gpu->identity.thread_count = 128;
212 else
213 gpu->identity.thread_count = 256;
214
215 if (gpu->identity.vertex_cache_size == 0)
216 gpu->identity.vertex_cache_size = 8;
217
218 if (gpu->identity.shader_core_count == 0) {
219 if (gpu->identity.model >= 0x1000)
220 gpu->identity.shader_core_count = 2;
221 else
222 gpu->identity.shader_core_count = 1;
223 }
224
225 if (gpu->identity.pixel_pipes == 0)
226 gpu->identity.pixel_pipes = 1;
227
228 /* Convert virtex buffer size */
229 if (gpu->identity.vertex_output_buffer_size) {
230 gpu->identity.vertex_output_buffer_size =
231 1 << gpu->identity.vertex_output_buffer_size;
Russell King507f8992016-01-24 17:35:48 +0000232 } else if (gpu->identity.model == chipModel_GC400) {
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100233 if (gpu->identity.revision < 0x4000)
234 gpu->identity.vertex_output_buffer_size = 512;
235 else if (gpu->identity.revision < 0x4200)
236 gpu->identity.vertex_output_buffer_size = 256;
237 else
238 gpu->identity.vertex_output_buffer_size = 128;
239 } else {
240 gpu->identity.vertex_output_buffer_size = 512;
241 }
242
243 switch (gpu->identity.instruction_count) {
244 case 0:
Russell King472f79d2016-01-24 17:35:59 +0000245 if (etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
Russell King507f8992016-01-24 17:35:48 +0000246 gpu->identity.model == chipModel_GC880)
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100247 gpu->identity.instruction_count = 512;
248 else
249 gpu->identity.instruction_count = 256;
250 break;
251
252 case 1:
253 gpu->identity.instruction_count = 1024;
254 break;
255
256 case 2:
257 gpu->identity.instruction_count = 2048;
258 break;
259
260 default:
261 gpu->identity.instruction_count = 256;
262 break;
263 }
264
265 if (gpu->identity.num_constants == 0)
266 gpu->identity.num_constants = 168;
Russell King602eb482016-01-24 17:36:04 +0000267
268 if (gpu->identity.varyings_count == 0) {
269 if (gpu->identity.minor_features1 & chipMinorFeatures1_HALTI0)
270 gpu->identity.varyings_count = 12;
271 else
272 gpu->identity.varyings_count = 8;
273 }
274
275 /*
276 * For some cores, two varyings are consumed for position, so the
277 * maximum varying count needs to be reduced by one.
278 */
279 if (etnaviv_is_model_rev(gpu, GC5000, 0x5434) ||
280 etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
281 etnaviv_is_model_rev(gpu, GC4000, 0x5245) ||
282 etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
283 etnaviv_is_model_rev(gpu, GC3000, 0x5435) ||
284 etnaviv_is_model_rev(gpu, GC2200, 0x5244) ||
285 etnaviv_is_model_rev(gpu, GC2100, 0x5108) ||
286 etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
287 etnaviv_is_model_rev(gpu, GC1500, 0x5246) ||
288 etnaviv_is_model_rev(gpu, GC880, 0x5107) ||
289 etnaviv_is_model_rev(gpu, GC880, 0x5106))
290 gpu->identity.varyings_count -= 1;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100291}
292
293static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
294{
295 u32 chipIdentity;
296
297 chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY);
298
299 /* Special case for older graphic cores. */
Russell King52f36ba2016-01-24 17:35:54 +0000300 if (etnaviv_field(chipIdentity, VIVS_HI_CHIP_IDENTITY_FAMILY) == 0x01) {
Russell King507f8992016-01-24 17:35:48 +0000301 gpu->identity.model = chipModel_GC500;
Russell King52f36ba2016-01-24 17:35:54 +0000302 gpu->identity.revision = etnaviv_field(chipIdentity,
303 VIVS_HI_CHIP_IDENTITY_REVISION);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100304 } else {
305
306 gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
307 gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
308
309 /*
310 * !!!! HACK ALERT !!!!
311 * Because people change device IDs without letting software
312 * know about it - here is the hack to make it all look the
313 * same. Only for GC400 family.
314 */
315 if ((gpu->identity.model & 0xff00) == 0x0400 &&
Russell King507f8992016-01-24 17:35:48 +0000316 gpu->identity.model != chipModel_GC420) {
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100317 gpu->identity.model = gpu->identity.model & 0x0400;
318 }
319
320 /* Another special case */
Russell King472f79d2016-01-24 17:35:59 +0000321 if (etnaviv_is_model_rev(gpu, GC300, 0x2201)) {
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100322 u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
323 u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
324
325 if (chipDate == 0x20080814 && chipTime == 0x12051100) {
326 /*
327 * This IP has an ECO; put the correct
328 * revision in it.
329 */
330 gpu->identity.revision = 0x1051;
331 }
332 }
Lucas Stach12ff4bd2016-08-15 18:16:59 +0200333
334 /*
335 * NXP likes to call the GPU on the i.MX6QP GC2000+, but in
336 * reality it's just a re-branded GC3000. We can identify this
337 * core by the upper half of the revision register being all 1.
338 * Fix model/rev here, so all other places can refer to this
339 * core by its real identity.
340 */
341 if (etnaviv_is_model_rev(gpu, GC2000, 0xffff5450)) {
342 gpu->identity.model = chipModel_GC3000;
343 gpu->identity.revision &= 0xffff;
344 }
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100345 }
346
347 dev_info(gpu->dev, "model: GC%x, revision: %x\n",
348 gpu->identity.model, gpu->identity.revision);
349
350 gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE);
351
352 /* Disable fast clear on GC700. */
Russell King507f8992016-01-24 17:35:48 +0000353 if (gpu->identity.model == chipModel_GC700)
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100354 gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
355
Russell King507f8992016-01-24 17:35:48 +0000356 if ((gpu->identity.model == chipModel_GC500 &&
357 gpu->identity.revision < 2) ||
358 (gpu->identity.model == chipModel_GC300 &&
359 gpu->identity.revision < 0x2000)) {
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100360
361 /*
362 * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these
363 * registers.
364 */
365 gpu->identity.minor_features0 = 0;
366 gpu->identity.minor_features1 = 0;
367 gpu->identity.minor_features2 = 0;
368 gpu->identity.minor_features3 = 0;
Russell King602eb482016-01-24 17:36:04 +0000369 gpu->identity.minor_features4 = 0;
370 gpu->identity.minor_features5 = 0;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100371 } else
372 gpu->identity.minor_features0 =
373 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0);
374
375 if (gpu->identity.minor_features0 &
376 chipMinorFeatures0_MORE_MINOR_FEATURES) {
377 gpu->identity.minor_features1 =
378 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_1);
379 gpu->identity.minor_features2 =
380 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2);
381 gpu->identity.minor_features3 =
382 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3);
Russell King602eb482016-01-24 17:36:04 +0000383 gpu->identity.minor_features4 =
384 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_4);
385 gpu->identity.minor_features5 =
386 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100387 }
388
389 /* GC600 idle register reports zero bits where modules aren't present */
390 if (gpu->identity.model == chipModel_GC600) {
391 gpu->idle_mask = VIVS_HI_IDLE_STATE_TX |
392 VIVS_HI_IDLE_STATE_RA |
393 VIVS_HI_IDLE_STATE_SE |
394 VIVS_HI_IDLE_STATE_PA |
395 VIVS_HI_IDLE_STATE_SH |
396 VIVS_HI_IDLE_STATE_PE |
397 VIVS_HI_IDLE_STATE_DE |
398 VIVS_HI_IDLE_STATE_FE;
399 } else {
400 gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP;
401 }
402
403 etnaviv_hw_specs(gpu);
404}
405
406static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
407{
408 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock |
409 VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD);
410 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
411}
412
Russell Kingbcdfb5e2017-03-12 19:00:59 +0000413static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu)
414{
Lucas Stachd79fd1ccf22017-04-11 15:54:50 +0200415 if (gpu->identity.minor_features2 &
416 chipMinorFeatures2_DYNAMIC_FREQUENCY_SCALING) {
417 clk_set_rate(gpu->clk_core,
418 gpu->base_rate_core >> gpu->freq_scale);
419 clk_set_rate(gpu->clk_shader,
420 gpu->base_rate_shader >> gpu->freq_scale);
421 } else {
422 unsigned int fscale = 1 << (6 - gpu->freq_scale);
423 u32 clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
424 VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
Russell Kingbcdfb5e2017-03-12 19:00:59 +0000425
Lucas Stachd79fd1ccf22017-04-11 15:54:50 +0200426 etnaviv_gpu_load_clock(gpu, clock);
427 }
Russell Kingbcdfb5e2017-03-12 19:00:59 +0000428}
429
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100430static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
431{
432 u32 control, idle;
433 unsigned long timeout;
434 bool failed = true;
435
436 /* TODO
437 *
438 * - clock gating
439 * - puls eater
440 * - what about VG?
441 */
442
443 /* We hope that the GPU resets in under one second */
444 timeout = jiffies + msecs_to_jiffies(1000);
445
446 while (time_is_after_jiffies(timeout)) {
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100447 /* enable clock */
Russell Kingbcdfb5e2017-03-12 19:00:59 +0000448 etnaviv_gpu_update_clock(gpu);
449
450 control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100451
452 /* Wait for stable clock. Vivante's code waited for 1ms */
453 usleep_range(1000, 10000);
454
455 /* isolate the GPU. */
456 control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
457 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
458
459 /* set soft reset. */
460 control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
461 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
462
463 /* wait for reset. */
464 msleep(1);
465
466 /* reset soft reset bit. */
467 control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
468 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
469
470 /* reset GPU isolation. */
471 control &= ~VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
472 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
473
474 /* read idle register. */
475 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
476
477 /* try reseting again if FE it not idle */
478 if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) {
479 dev_dbg(gpu->dev, "FE is not idle\n");
480 continue;
481 }
482
483 /* read reset register. */
484 control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
485
486 /* is the GPU idle? */
487 if (((control & VIVS_HI_CLOCK_CONTROL_IDLE_3D) == 0) ||
488 ((control & VIVS_HI_CLOCK_CONTROL_IDLE_2D) == 0)) {
489 dev_dbg(gpu->dev, "GPU is not idle\n");
490 continue;
491 }
492
493 failed = false;
494 break;
495 }
496
497 if (failed) {
498 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
499 control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
500
501 dev_err(gpu->dev, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n",
502 idle & VIVS_HI_IDLE_STATE_FE ? "" : "not ",
503 control & VIVS_HI_CLOCK_CONTROL_IDLE_3D ? "" : "not ",
504 control & VIVS_HI_CLOCK_CONTROL_IDLE_2D ? "" : "not ");
505
506 return -EBUSY;
507 }
508
509 /* We rely on the GPU running, so program the clock */
Russell Kingbcdfb5e2017-03-12 19:00:59 +0000510 etnaviv_gpu_update_clock(gpu);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100511
512 return 0;
513}
514
Russell King7d0c6e72016-01-21 15:20:45 +0000515static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
516{
517 u32 pmc, ppc;
518
519 /* enable clock gating */
520 ppc = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
521 ppc |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
522
523 /* Disable stall module clock gating for 4.3.0.1 and 4.3.0.2 revs */
524 if (gpu->identity.revision == 0x4301 ||
525 gpu->identity.revision == 0x4302)
526 ppc |= VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING;
527
528 gpu_write(gpu, VIVS_PM_POWER_CONTROLS, ppc);
529
530 pmc = gpu_read(gpu, VIVS_PM_MODULE_CONTROLS);
531
Lucas Stach7cef6002017-03-17 12:42:30 +0100532 /* Disable PA clock gating for GC400+ without bugfix except for GC420 */
Russell King7d0c6e72016-01-21 15:20:45 +0000533 if (gpu->identity.model >= chipModel_GC400 &&
Lucas Stach7cef6002017-03-17 12:42:30 +0100534 gpu->identity.model != chipModel_GC420 &&
535 !(gpu->identity.minor_features3 & chipMinorFeatures3_BUG_FIXES12))
Russell King7d0c6e72016-01-21 15:20:45 +0000536 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA;
537
538 /*
539 * Disable PE clock gating on revs < 5.0.0.0 when HZ is
540 * present without a bug fix.
541 */
542 if (gpu->identity.revision < 0x5000 &&
543 gpu->identity.minor_features0 & chipMinorFeatures0_HZ &&
544 !(gpu->identity.minor_features1 &
545 chipMinorFeatures1_DISABLE_PE_GATING))
546 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE;
547
548 if (gpu->identity.revision < 0x5422)
549 pmc |= BIT(15); /* Unknown bit */
550
Lucas Stach7cef6002017-03-17 12:42:30 +0100551 /* Disable TX clock gating on affected core revisions. */
552 if (etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
553 etnaviv_is_model_rev(gpu, GC2000, 0x5108))
554 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX;
555
Russell King7d0c6e72016-01-21 15:20:45 +0000556 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ;
557 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ;
558
559 gpu_write(gpu, VIVS_PM_MODULE_CONTROLS, pmc);
560}
561
Lucas Stach229855b2016-08-17 15:27:52 +0200562void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
563{
564 gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS, address);
565 gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
566 VIVS_FE_COMMAND_CONTROL_ENABLE |
567 VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
568}
569
Wladimir J. van der Laane17a0de2016-12-15 13:11:30 +0100570static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu)
571{
572 /*
573 * Base value for VIVS_PM_PULSE_EATER register on models where it
574 * cannot be read, extracted from vivante kernel driver.
575 */
576 u32 pulse_eater = 0x01590880;
577
578 if (etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
579 etnaviv_is_model_rev(gpu, GC4000, 0x5222)) {
580 pulse_eater |= BIT(23);
581
582 }
583
584 if (etnaviv_is_model_rev(gpu, GC1000, 0x5039) ||
585 etnaviv_is_model_rev(gpu, GC1000, 0x5040)) {
586 pulse_eater &= ~BIT(16);
587 pulse_eater |= BIT(17);
588 }
589
590 if ((gpu->identity.revision > 0x5420) &&
591 (gpu->identity.features & chipFeatures_PIPE_3D))
592 {
593 /* Performance fix: disable internal DFS */
594 pulse_eater = gpu_read(gpu, VIVS_PM_PULSE_EATER);
595 pulse_eater |= BIT(18);
596 }
597
598 gpu_write(gpu, VIVS_PM_PULSE_EATER, pulse_eater);
599}
600
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100601static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
602{
603 u16 prefetch;
604
Russell King472f79d2016-01-24 17:35:59 +0000605 if ((etnaviv_is_model_rev(gpu, GC320, 0x5007) ||
606 etnaviv_is_model_rev(gpu, GC320, 0x5220)) &&
607 gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400) {
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100608 u32 mc_memory_debug;
609
610 mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff;
611
612 if (gpu->identity.revision == 0x5007)
613 mc_memory_debug |= 0x0c;
614 else
615 mc_memory_debug |= 0x08;
616
617 gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug);
618 }
619
Russell King7d0c6e72016-01-21 15:20:45 +0000620 /* enable module-level clock gating */
621 etnaviv_gpu_enable_mlcg(gpu);
622
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100623 /*
624 * Update GPU AXI cache atttribute to "cacheable, no allocate".
625 * This is necessary to prevent the iMX6 SoC locking up.
626 */
627 gpu_write(gpu, VIVS_HI_AXI_CONFIG,
628 VIVS_HI_AXI_CONFIG_AWCACHE(2) |
629 VIVS_HI_AXI_CONFIG_ARCACHE(2));
630
631 /* GC2000 rev 5108 needs a special bus config */
Russell King472f79d2016-01-24 17:35:59 +0000632 if (etnaviv_is_model_rev(gpu, GC2000, 0x5108)) {
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100633 u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG);
634 bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK |
635 VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK);
636 bus_config |= VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(1) |
637 VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(0);
638 gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
639 }
640
Wladimir J. van der Laane17a0de2016-12-15 13:11:30 +0100641 /* setup the pulse eater */
642 etnaviv_gpu_setup_pulse_eater(gpu);
643
Lucas Stach99f861b2016-08-16 11:48:49 +0200644 /* setup the MMU */
Lucas Stache095c8f2016-08-16 11:54:51 +0200645 etnaviv_iommu_restore(gpu);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100646
647 /* Start command processor */
648 prefetch = etnaviv_buffer_init(gpu);
649
650 gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
Lucas Stachc3ef4b82017-01-16 16:52:44 +0100651 etnaviv_gpu_start_fe(gpu, etnaviv_cmdbuf_get_va(gpu->buffer),
Lucas Stach229855b2016-08-17 15:27:52 +0200652 prefetch);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100653}
654
655int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
656{
657 int ret, i;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100658
659 ret = pm_runtime_get_sync(gpu->dev);
Lucas Stach1409df02016-06-17 12:29:02 +0200660 if (ret < 0) {
661 dev_err(gpu->dev, "Failed to enable GPU power domain\n");
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100662 return ret;
Lucas Stach1409df02016-06-17 12:29:02 +0200663 }
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100664
665 etnaviv_hw_identify(gpu);
666
667 if (gpu->identity.model == 0) {
668 dev_err(gpu->dev, "Unknown GPU model\n");
Russell Kingf6427762016-01-24 17:32:13 +0000669 ret = -ENXIO;
670 goto fail;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100671 }
672
Russell Kingb98c6682016-01-21 15:19:59 +0000673 /* Exclude VG cores with FE2.0 */
674 if (gpu->identity.features & chipFeatures_PIPE_VG &&
675 gpu->identity.features & chipFeatures_FE20) {
676 dev_info(gpu->dev, "Ignoring GPU with VG and FE2.0\n");
677 ret = -ENXIO;
678 goto fail;
679 }
680
Lucas Stach2144fff2016-04-21 13:52:38 +0200681 /*
682 * Set the GPU linear window to be at the end of the DMA window, where
683 * the CMA area is likely to reside. This ensures that we are able to
684 * map the command buffers while having the linear window overlap as
685 * much RAM as possible, so we can optimize mappings for other buffers.
686 *
687 * For 3D cores only do this if MC2.0 is present, as with MC1.0 it leads
688 * to different views of the memory on the individual engines.
689 */
690 if (!(gpu->identity.features & chipFeatures_PIPE_3D) ||
691 (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
692 u32 dma_mask = (u32)dma_get_required_mask(gpu->dev);
693 if (dma_mask < PHYS_OFFSET + SZ_2G)
694 gpu->memory_base = PHYS_OFFSET;
695 else
696 gpu->memory_base = dma_mask - SZ_2G + 1;
Lucas Stach1db01272016-12-02 12:19:16 +0100697 } else if (PHYS_OFFSET >= SZ_2G) {
698 dev_info(gpu->dev, "Need to move linear window on MC1.0, disabling TS\n");
699 gpu->memory_base = PHYS_OFFSET;
700 gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
Lucas Stach2144fff2016-04-21 13:52:38 +0200701 }
702
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100703 ret = etnaviv_hw_reset(gpu);
Lucas Stach1409df02016-06-17 12:29:02 +0200704 if (ret) {
705 dev_err(gpu->dev, "GPU reset failed\n");
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100706 goto fail;
Lucas Stach1409df02016-06-17 12:29:02 +0200707 }
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100708
Lucas Stachdd34bb92016-08-16 12:09:08 +0200709 gpu->mmu = etnaviv_iommu_new(gpu);
710 if (IS_ERR(gpu->mmu)) {
Lucas Stach1409df02016-06-17 12:29:02 +0200711 dev_err(gpu->dev, "Failed to instantiate GPU IOMMU\n");
Lucas Stachdd34bb92016-08-16 12:09:08 +0200712 ret = PTR_ERR(gpu->mmu);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100713 goto fail;
714 }
715
Lucas Stache66774d2017-01-16 17:29:57 +0100716 gpu->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(gpu);
717 if (IS_ERR(gpu->cmdbuf_suballoc)) {
718 dev_err(gpu->dev, "Failed to create cmdbuf suballocator\n");
719 ret = PTR_ERR(gpu->cmdbuf_suballoc);
720 goto fail;
721 }
722
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100723 /* Create buffer: */
Lucas Stache66774d2017-01-16 17:29:57 +0100724 gpu->buffer = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc, PAGE_SIZE, 0);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100725 if (!gpu->buffer) {
726 ret = -ENOMEM;
727 dev_err(gpu->dev, "could not create command buffer\n");
Lucas Stach45d16a62016-01-25 12:41:05 +0100728 goto destroy_iommu;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100729 }
Lucas Stachacfee0e2016-08-17 16:19:53 +0200730
731 if (gpu->mmu->version == ETNAVIV_IOMMU_V1 &&
Lucas Stachc3ef4b82017-01-16 16:52:44 +0100732 etnaviv_cmdbuf_get_va(gpu->buffer) > 0x80000000) {
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100733 ret = -EINVAL;
734 dev_err(gpu->dev,
735 "command buffer outside valid memory window\n");
736 goto free_buffer;
737 }
738
739 /* Setup event management */
740 spin_lock_init(&gpu->event_spinlock);
741 init_completion(&gpu->event_free);
742 for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
743 gpu->event[i].used = false;
744 complete(&gpu->event_free);
745 }
746
747 /* Now program the hardware */
748 mutex_lock(&gpu->lock);
749 etnaviv_gpu_hw_init(gpu);
Russell Kingf6086312016-01-21 15:20:19 +0000750 gpu->exec_state = -1;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100751 mutex_unlock(&gpu->lock);
752
753 pm_runtime_mark_last_busy(gpu->dev);
754 pm_runtime_put_autosuspend(gpu->dev);
755
756 return 0;
757
758free_buffer:
Lucas Stachea1f5722017-01-16 16:09:51 +0100759 etnaviv_cmdbuf_free(gpu->buffer);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100760 gpu->buffer = NULL;
Lucas Stach45d16a62016-01-25 12:41:05 +0100761destroy_iommu:
762 etnaviv_iommu_destroy(gpu->mmu);
763 gpu->mmu = NULL;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100764fail:
765 pm_runtime_mark_last_busy(gpu->dev);
766 pm_runtime_put_autosuspend(gpu->dev);
767
768 return ret;
769}
770
771#ifdef CONFIG_DEBUG_FS
772struct dma_debug {
773 u32 address[2];
774 u32 state[2];
775};
776
777static void verify_dma(struct etnaviv_gpu *gpu, struct dma_debug *debug)
778{
779 u32 i;
780
781 debug->address[0] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
782 debug->state[0] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
783
784 for (i = 0; i < 500; i++) {
785 debug->address[1] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
786 debug->state[1] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
787
788 if (debug->address[0] != debug->address[1])
789 break;
790
791 if (debug->state[0] != debug->state[1])
792 break;
793 }
794}
795
796int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
797{
798 struct dma_debug debug;
799 u32 dma_lo, dma_hi, axi, idle;
800 int ret;
801
802 seq_printf(m, "%s Status:\n", dev_name(gpu->dev));
803
804 ret = pm_runtime_get_sync(gpu->dev);
805 if (ret < 0)
806 return ret;
807
808 dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW);
809 dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH);
810 axi = gpu_read(gpu, VIVS_HI_AXI_STATUS);
811 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
812
813 verify_dma(gpu, &debug);
814
815 seq_puts(m, "\tfeatures\n");
816 seq_printf(m, "\t minor_features0: 0x%08x\n",
817 gpu->identity.minor_features0);
818 seq_printf(m, "\t minor_features1: 0x%08x\n",
819 gpu->identity.minor_features1);
820 seq_printf(m, "\t minor_features2: 0x%08x\n",
821 gpu->identity.minor_features2);
822 seq_printf(m, "\t minor_features3: 0x%08x\n",
823 gpu->identity.minor_features3);
Russell King602eb482016-01-24 17:36:04 +0000824 seq_printf(m, "\t minor_features4: 0x%08x\n",
825 gpu->identity.minor_features4);
826 seq_printf(m, "\t minor_features5: 0x%08x\n",
827 gpu->identity.minor_features5);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100828
829 seq_puts(m, "\tspecs\n");
830 seq_printf(m, "\t stream_count: %d\n",
831 gpu->identity.stream_count);
832 seq_printf(m, "\t register_max: %d\n",
833 gpu->identity.register_max);
834 seq_printf(m, "\t thread_count: %d\n",
835 gpu->identity.thread_count);
836 seq_printf(m, "\t vertex_cache_size: %d\n",
837 gpu->identity.vertex_cache_size);
838 seq_printf(m, "\t shader_core_count: %d\n",
839 gpu->identity.shader_core_count);
840 seq_printf(m, "\t pixel_pipes: %d\n",
841 gpu->identity.pixel_pipes);
842 seq_printf(m, "\t vertex_output_buffer_size: %d\n",
843 gpu->identity.vertex_output_buffer_size);
844 seq_printf(m, "\t buffer_size: %d\n",
845 gpu->identity.buffer_size);
846 seq_printf(m, "\t instruction_count: %d\n",
847 gpu->identity.instruction_count);
848 seq_printf(m, "\t num_constants: %d\n",
849 gpu->identity.num_constants);
Russell King602eb482016-01-24 17:36:04 +0000850 seq_printf(m, "\t varyings_count: %d\n",
851 gpu->identity.varyings_count);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100852
853 seq_printf(m, "\taxi: 0x%08x\n", axi);
854 seq_printf(m, "\tidle: 0x%08x\n", idle);
855 idle |= ~gpu->idle_mask & ~VIVS_HI_IDLE_STATE_AXI_LP;
856 if ((idle & VIVS_HI_IDLE_STATE_FE) == 0)
857 seq_puts(m, "\t FE is not idle\n");
858 if ((idle & VIVS_HI_IDLE_STATE_DE) == 0)
859 seq_puts(m, "\t DE is not idle\n");
860 if ((idle & VIVS_HI_IDLE_STATE_PE) == 0)
861 seq_puts(m, "\t PE is not idle\n");
862 if ((idle & VIVS_HI_IDLE_STATE_SH) == 0)
863 seq_puts(m, "\t SH is not idle\n");
864 if ((idle & VIVS_HI_IDLE_STATE_PA) == 0)
865 seq_puts(m, "\t PA is not idle\n");
866 if ((idle & VIVS_HI_IDLE_STATE_SE) == 0)
867 seq_puts(m, "\t SE is not idle\n");
868 if ((idle & VIVS_HI_IDLE_STATE_RA) == 0)
869 seq_puts(m, "\t RA is not idle\n");
870 if ((idle & VIVS_HI_IDLE_STATE_TX) == 0)
871 seq_puts(m, "\t TX is not idle\n");
872 if ((idle & VIVS_HI_IDLE_STATE_VG) == 0)
873 seq_puts(m, "\t VG is not idle\n");
874 if ((idle & VIVS_HI_IDLE_STATE_IM) == 0)
875 seq_puts(m, "\t IM is not idle\n");
876 if ((idle & VIVS_HI_IDLE_STATE_FP) == 0)
877 seq_puts(m, "\t FP is not idle\n");
878 if ((idle & VIVS_HI_IDLE_STATE_TS) == 0)
879 seq_puts(m, "\t TS is not idle\n");
880 if (idle & VIVS_HI_IDLE_STATE_AXI_LP)
881 seq_puts(m, "\t AXI low power mode\n");
882
883 if (gpu->identity.features & chipFeatures_DEBUG_MODE) {
884 u32 read0 = gpu_read(gpu, VIVS_MC_DEBUG_READ0);
885 u32 read1 = gpu_read(gpu, VIVS_MC_DEBUG_READ1);
886 u32 write = gpu_read(gpu, VIVS_MC_DEBUG_WRITE);
887
888 seq_puts(m, "\tMC\n");
889 seq_printf(m, "\t read0: 0x%08x\n", read0);
890 seq_printf(m, "\t read1: 0x%08x\n", read1);
891 seq_printf(m, "\t write: 0x%08x\n", write);
892 }
893
894 seq_puts(m, "\tDMA ");
895
896 if (debug.address[0] == debug.address[1] &&
897 debug.state[0] == debug.state[1]) {
898 seq_puts(m, "seems to be stuck\n");
899 } else if (debug.address[0] == debug.address[1]) {
Masanari Iidac01e0152016-04-20 00:27:33 +0900900 seq_puts(m, "address is constant\n");
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100901 } else {
Masanari Iidac01e0152016-04-20 00:27:33 +0900902 seq_puts(m, "is running\n");
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100903 }
904
905 seq_printf(m, "\t address 0: 0x%08x\n", debug.address[0]);
906 seq_printf(m, "\t address 1: 0x%08x\n", debug.address[1]);
907 seq_printf(m, "\t state 0: 0x%08x\n", debug.state[0]);
908 seq_printf(m, "\t state 1: 0x%08x\n", debug.state[1]);
909 seq_printf(m, "\t last fetch 64 bit word: 0x%08x 0x%08x\n",
910 dma_lo, dma_hi);
911
912 ret = 0;
913
914 pm_runtime_mark_last_busy(gpu->dev);
915 pm_runtime_put_autosuspend(gpu->dev);
916
917 return ret;
918}
919#endif
920
921/*
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100922 * Hangcheck detection for locked gpu:
923 */
924static void recover_worker(struct work_struct *work)
925{
926 struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
927 recover_work);
928 unsigned long flags;
929 unsigned int i;
930
931 dev_err(gpu->dev, "hangcheck recover!\n");
932
933 if (pm_runtime_get_sync(gpu->dev) < 0)
934 return;
935
936 mutex_lock(&gpu->lock);
937
938 /* Only catch the first event, or when manually re-armed */
939 if (etnaviv_dump_core) {
940 etnaviv_core_dump(gpu);
941 etnaviv_dump_core = false;
942 }
943
944 etnaviv_hw_reset(gpu);
945
946 /* complete all events, the GPU won't do it after the reset */
947 spin_lock_irqsave(&gpu->event_spinlock, flags);
948 for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
949 if (!gpu->event[i].used)
950 continue;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100951 dma_fence_signal(gpu->event[i].fence);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100952 gpu->event[i].fence = NULL;
953 gpu->event[i].used = false;
954 complete(&gpu->event_free);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100955 }
956 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
957 gpu->completed_fence = gpu->active_fence;
958
959 etnaviv_gpu_hw_init(gpu);
Lucas Stach1b94a9b2016-09-15 12:57:32 +0200960 gpu->lastctx = NULL;
Russell Kingf6086312016-01-21 15:20:19 +0000961 gpu->exec_state = -1;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100962
963 mutex_unlock(&gpu->lock);
964 pm_runtime_mark_last_busy(gpu->dev);
965 pm_runtime_put_autosuspend(gpu->dev);
966
967 /* Retire the buffer objects in a work */
968 etnaviv_queue_work(gpu->drm, &gpu->retire_work);
969}
970
971static void hangcheck_timer_reset(struct etnaviv_gpu *gpu)
972{
973 DBG("%s", dev_name(gpu->dev));
974 mod_timer(&gpu->hangcheck_timer,
975 round_jiffies_up(jiffies + DRM_ETNAVIV_HANGCHECK_JIFFIES));
976}
977
978static void hangcheck_handler(unsigned long data)
979{
980 struct etnaviv_gpu *gpu = (struct etnaviv_gpu *)data;
981 u32 fence = gpu->completed_fence;
982 bool progress = false;
983
984 if (fence != gpu->hangcheck_fence) {
985 gpu->hangcheck_fence = fence;
986 progress = true;
987 }
988
989 if (!progress) {
990 u32 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
991 int change = dma_addr - gpu->hangcheck_dma_addr;
992
993 if (change < 0 || change > 16) {
994 gpu->hangcheck_dma_addr = dma_addr;
995 progress = true;
996 }
997 }
998
999 if (!progress && fence_after(gpu->active_fence, fence)) {
1000 dev_err(gpu->dev, "hangcheck detected gpu lockup!\n");
1001 dev_err(gpu->dev, " completed fence: %u\n", fence);
1002 dev_err(gpu->dev, " active fence: %u\n",
1003 gpu->active_fence);
1004 etnaviv_queue_work(gpu->drm, &gpu->recover_work);
1005 }
1006
1007 /* if still more pending work, reset the hangcheck timer: */
1008 if (fence_after(gpu->active_fence, gpu->hangcheck_fence))
1009 hangcheck_timer_reset(gpu);
1010}
1011
1012static void hangcheck_disable(struct etnaviv_gpu *gpu)
1013{
1014 del_timer_sync(&gpu->hangcheck_timer);
1015 cancel_work_sync(&gpu->recover_work);
1016}
1017
1018/* fence object management */
1019struct etnaviv_fence {
1020 struct etnaviv_gpu *gpu;
Chris Wilsonf54d1862016-10-25 13:00:45 +01001021 struct dma_fence base;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001022};
1023
Chris Wilsonf54d1862016-10-25 13:00:45 +01001024static inline struct etnaviv_fence *to_etnaviv_fence(struct dma_fence *fence)
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001025{
1026 return container_of(fence, struct etnaviv_fence, base);
1027}
1028
Chris Wilsonf54d1862016-10-25 13:00:45 +01001029static const char *etnaviv_fence_get_driver_name(struct dma_fence *fence)
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001030{
1031 return "etnaviv";
1032}
1033
Chris Wilsonf54d1862016-10-25 13:00:45 +01001034static const char *etnaviv_fence_get_timeline_name(struct dma_fence *fence)
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001035{
1036 struct etnaviv_fence *f = to_etnaviv_fence(fence);
1037
1038 return dev_name(f->gpu->dev);
1039}
1040
Chris Wilsonf54d1862016-10-25 13:00:45 +01001041static bool etnaviv_fence_enable_signaling(struct dma_fence *fence)
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001042{
1043 return true;
1044}
1045
Chris Wilsonf54d1862016-10-25 13:00:45 +01001046static bool etnaviv_fence_signaled(struct dma_fence *fence)
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001047{
1048 struct etnaviv_fence *f = to_etnaviv_fence(fence);
1049
1050 return fence_completed(f->gpu, f->base.seqno);
1051}
1052
Chris Wilsonf54d1862016-10-25 13:00:45 +01001053static void etnaviv_fence_release(struct dma_fence *fence)
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001054{
1055 struct etnaviv_fence *f = to_etnaviv_fence(fence);
1056
1057 kfree_rcu(f, base.rcu);
1058}
1059
Chris Wilsonf54d1862016-10-25 13:00:45 +01001060static const struct dma_fence_ops etnaviv_fence_ops = {
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001061 .get_driver_name = etnaviv_fence_get_driver_name,
1062 .get_timeline_name = etnaviv_fence_get_timeline_name,
1063 .enable_signaling = etnaviv_fence_enable_signaling,
1064 .signaled = etnaviv_fence_signaled,
Chris Wilsonf54d1862016-10-25 13:00:45 +01001065 .wait = dma_fence_default_wait,
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001066 .release = etnaviv_fence_release,
1067};
1068
Chris Wilsonf54d1862016-10-25 13:00:45 +01001069static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001070{
1071 struct etnaviv_fence *f;
1072
Lucas Stachb27734c22017-03-22 12:23:43 +01001073 /*
1074 * GPU lock must already be held, otherwise fence completion order might
1075 * not match the seqno order assigned here.
1076 */
1077 lockdep_assert_held(&gpu->lock);
1078
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001079 f = kzalloc(sizeof(*f), GFP_KERNEL);
1080 if (!f)
1081 return NULL;
1082
1083 f->gpu = gpu;
1084
Chris Wilsonf54d1862016-10-25 13:00:45 +01001085 dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
1086 gpu->fence_context, ++gpu->next_fence);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001087
1088 return &f->base;
1089}
1090
1091int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
Philipp Zabel9ad59fe2017-03-02 16:05:45 +01001092 unsigned int context, bool exclusive, bool explicit)
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001093{
1094 struct reservation_object *robj = etnaviv_obj->resv;
1095 struct reservation_object_list *fobj;
Chris Wilsonf54d1862016-10-25 13:00:45 +01001096 struct dma_fence *fence;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001097 int i, ret;
1098
1099 if (!exclusive) {
1100 ret = reservation_object_reserve_shared(robj);
1101 if (ret)
1102 return ret;
1103 }
1104
Philipp Zabel9ad59fe2017-03-02 16:05:45 +01001105 if (explicit)
1106 return 0;
1107
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001108 /*
1109 * If we have any shared fences, then the exclusive fence
1110 * should be ignored as it will already have been signalled.
1111 */
1112 fobj = reservation_object_get_list(robj);
1113 if (!fobj || fobj->shared_count == 0) {
1114 /* Wait on any existing exclusive fence which isn't our own */
1115 fence = reservation_object_get_excl(robj);
1116 if (fence && fence->context != context) {
Chris Wilsonf54d1862016-10-25 13:00:45 +01001117 ret = dma_fence_wait(fence, true);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001118 if (ret)
1119 return ret;
1120 }
1121 }
1122
1123 if (!exclusive || !fobj)
1124 return 0;
1125
1126 for (i = 0; i < fobj->shared_count; i++) {
1127 fence = rcu_dereference_protected(fobj->shared[i],
1128 reservation_object_held(robj));
1129 if (fence->context != context) {
Chris Wilsonf54d1862016-10-25 13:00:45 +01001130 ret = dma_fence_wait(fence, true);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001131 if (ret)
1132 return ret;
1133 }
1134 }
1135
1136 return 0;
1137}
1138
1139/*
1140 * event management:
1141 */
1142
1143static unsigned int event_alloc(struct etnaviv_gpu *gpu)
1144{
1145 unsigned long ret, flags;
1146 unsigned int i, event = ~0U;
1147
1148 ret = wait_for_completion_timeout(&gpu->event_free,
1149 msecs_to_jiffies(10 * 10000));
1150 if (!ret)
1151 dev_err(gpu->dev, "wait_for_completion_timeout failed");
1152
1153 spin_lock_irqsave(&gpu->event_spinlock, flags);
1154
1155 /* find first free event */
1156 for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
1157 if (gpu->event[i].used == false) {
1158 gpu->event[i].used = true;
1159 event = i;
1160 break;
1161 }
1162 }
1163
1164 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1165
1166 return event;
1167}
1168
1169static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
1170{
1171 unsigned long flags;
1172
1173 spin_lock_irqsave(&gpu->event_spinlock, flags);
1174
1175 if (gpu->event[event].used == false) {
1176 dev_warn(gpu->dev, "event %u is already marked as free",
1177 event);
1178 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1179 } else {
1180 gpu->event[event].used = false;
1181 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1182
1183 complete(&gpu->event_free);
1184 }
1185}
1186
1187/*
1188 * Cmdstream submission/retirement:
1189 */
1190
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001191static void retire_worker(struct work_struct *work)
1192{
1193 struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
1194 retire_work);
1195 u32 fence = gpu->completed_fence;
1196 struct etnaviv_cmdbuf *cmdbuf, *tmp;
1197 unsigned int i;
1198
1199 mutex_lock(&gpu->lock);
1200 list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) {
Chris Wilsonf54d1862016-10-25 13:00:45 +01001201 if (!dma_fence_is_signaled(cmdbuf->fence))
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001202 break;
1203
1204 list_del(&cmdbuf->node);
Chris Wilsonf54d1862016-10-25 13:00:45 +01001205 dma_fence_put(cmdbuf->fence);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001206
1207 for (i = 0; i < cmdbuf->nr_bos; i++) {
Russell Kingb6325f42016-01-21 15:20:50 +00001208 struct etnaviv_vram_mapping *mapping = cmdbuf->bo_map[i];
1209 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001210
1211 atomic_dec(&etnaviv_obj->gpu_active);
1212 /* drop the refcount taken in etnaviv_gpu_submit */
Russell Kingb6325f42016-01-21 15:20:50 +00001213 etnaviv_gem_mapping_unreference(mapping);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001214 }
1215
Lucas Stachea1f5722017-01-16 16:09:51 +01001216 etnaviv_cmdbuf_free(cmdbuf);
Lucas Stachd9fd0c72016-01-07 12:43:15 +01001217 /*
1218 * We need to balance the runtime PM count caused by
1219 * each submission. Upon submission, we increment
1220 * the runtime PM counter, and allocate one event.
1221 * So here, we put the runtime PM count for each
1222 * completed event.
1223 */
1224 pm_runtime_put_autosuspend(gpu->dev);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001225 }
1226
1227 gpu->retired_fence = fence;
1228
1229 mutex_unlock(&gpu->lock);
1230
1231 wake_up_all(&gpu->fence_event);
1232}
1233
1234int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
1235 u32 fence, struct timespec *timeout)
1236{
1237 int ret;
1238
1239 if (fence_after(fence, gpu->next_fence)) {
1240 DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
1241 fence, gpu->next_fence);
1242 return -EINVAL;
1243 }
1244
1245 if (!timeout) {
1246 /* No timeout was requested: just test for completion */
1247 ret = fence_completed(gpu, fence) ? 0 : -EBUSY;
1248 } else {
1249 unsigned long remaining = etnaviv_timeout_to_jiffies(timeout);
1250
1251 ret = wait_event_interruptible_timeout(gpu->fence_event,
1252 fence_completed(gpu, fence),
1253 remaining);
1254 if (ret == 0) {
1255 DBG("timeout waiting for fence: %u (retired: %u completed: %u)",
1256 fence, gpu->retired_fence,
1257 gpu->completed_fence);
1258 ret = -ETIMEDOUT;
1259 } else if (ret != -ERESTARTSYS) {
1260 ret = 0;
1261 }
1262 }
1263
1264 return ret;
1265}
1266
1267/*
1268 * Wait for an object to become inactive. This, on it's own, is not race
1269 * free: the object is moved by the retire worker off the active list, and
1270 * then the iova is put. Moreover, the object could be re-submitted just
1271 * after we notice that it's become inactive.
1272 *
1273 * Although the retirement happens under the gpu lock, we don't want to hold
1274 * that lock in this function while waiting.
1275 */
1276int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
1277 struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout)
1278{
1279 unsigned long remaining;
1280 long ret;
1281
1282 if (!timeout)
1283 return !is_active(etnaviv_obj) ? 0 : -EBUSY;
1284
1285 remaining = etnaviv_timeout_to_jiffies(timeout);
1286
1287 ret = wait_event_interruptible_timeout(gpu->fence_event,
1288 !is_active(etnaviv_obj),
1289 remaining);
1290 if (ret > 0) {
1291 struct etnaviv_drm_private *priv = gpu->drm->dev_private;
1292
1293 /* Synchronise with the retire worker */
1294 flush_workqueue(priv->wq);
1295 return 0;
1296 } else if (ret == -ERESTARTSYS) {
1297 return -ERESTARTSYS;
1298 } else {
1299 return -ETIMEDOUT;
1300 }
1301}
1302
1303int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu)
1304{
1305 return pm_runtime_get_sync(gpu->dev);
1306}
1307
1308void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu)
1309{
1310 pm_runtime_mark_last_busy(gpu->dev);
1311 pm_runtime_put_autosuspend(gpu->dev);
1312}
1313
1314/* add bo's to gpu's ring, and kick gpu: */
1315int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1316 struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf)
1317{
Chris Wilsonf54d1862016-10-25 13:00:45 +01001318 struct dma_fence *fence;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001319 unsigned int event, i;
1320 int ret;
1321
1322 ret = etnaviv_gpu_pm_get_sync(gpu);
1323 if (ret < 0)
1324 return ret;
1325
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001326 /*
1327 * TODO
1328 *
1329 * - flush
1330 * - data endian
1331 * - prefetch
1332 *
1333 */
1334
1335 event = event_alloc(gpu);
1336 if (unlikely(event == ~0U)) {
1337 DRM_ERROR("no free event\n");
1338 ret = -EBUSY;
Lucas Stachd9853492016-07-28 11:50:48 +02001339 goto out_pm_put;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001340 }
1341
Lucas Stachf3cd1b02017-03-22 12:07:23 +01001342 mutex_lock(&gpu->lock);
1343
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001344 fence = etnaviv_gpu_fence_alloc(gpu);
1345 if (!fence) {
1346 event_free(gpu, event);
1347 ret = -ENOMEM;
Wei Yongjun45abdf32017-04-12 00:31:16 +00001348 goto out_unlock;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001349 }
1350
1351 gpu->event[event].fence = fence;
Lucas Stach6e2b98c2017-03-22 13:00:53 +01001352 submit->fence = dma_fence_get(fence);
1353 gpu->active_fence = submit->fence->seqno;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001354
1355 if (gpu->lastctx != cmdbuf->ctx) {
1356 gpu->mmu->need_flush = true;
1357 gpu->switch_context = true;
1358 gpu->lastctx = cmdbuf->ctx;
1359 }
1360
1361 etnaviv_buffer_queue(gpu, event, cmdbuf);
1362
1363 cmdbuf->fence = fence;
1364 list_add_tail(&cmdbuf->node, &gpu->active_cmd_list);
1365
1366 /* We're committed to adding this command buffer, hold a PM reference */
1367 pm_runtime_get_noresume(gpu->dev);
1368
1369 for (i = 0; i < submit->nr_bos; i++) {
1370 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001371
Russell Kingb6325f42016-01-21 15:20:50 +00001372 /* Each cmdbuf takes a refcount on the mapping */
1373 etnaviv_gem_mapping_reference(submit->bos[i].mapping);
1374 cmdbuf->bo_map[i] = submit->bos[i].mapping;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001375 atomic_inc(&etnaviv_obj->gpu_active);
1376
1377 if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
1378 reservation_object_add_excl_fence(etnaviv_obj->resv,
1379 fence);
1380 else
1381 reservation_object_add_shared_fence(etnaviv_obj->resv,
1382 fence);
1383 }
1384 cmdbuf->nr_bos = submit->nr_bos;
1385 hangcheck_timer_reset(gpu);
1386 ret = 0;
1387
Wei Yongjun45abdf32017-04-12 00:31:16 +00001388out_unlock:
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001389 mutex_unlock(&gpu->lock);
1390
Lucas Stachd9853492016-07-28 11:50:48 +02001391out_pm_put:
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001392 etnaviv_gpu_pm_put(gpu);
1393
1394 return ret;
1395}
1396
1397/*
1398 * Init/Cleanup:
1399 */
1400static irqreturn_t irq_handler(int irq, void *data)
1401{
1402 struct etnaviv_gpu *gpu = data;
1403 irqreturn_t ret = IRQ_NONE;
1404
1405 u32 intr = gpu_read(gpu, VIVS_HI_INTR_ACKNOWLEDGE);
1406
1407 if (intr != 0) {
1408 int event;
1409
1410 pm_runtime_mark_last_busy(gpu->dev);
1411
1412 dev_dbg(gpu->dev, "intr 0x%08x\n", intr);
1413
1414 if (intr & VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR) {
1415 dev_err(gpu->dev, "AXI bus error\n");
1416 intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR;
1417 }
1418
Lucas Stach128a9b12016-08-20 00:14:43 +02001419 if (intr & VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION) {
1420 int i;
1421
1422 dev_err_ratelimited(gpu->dev,
1423 "MMU fault status 0x%08x\n",
1424 gpu_read(gpu, VIVS_MMUv2_STATUS));
1425 for (i = 0; i < 4; i++) {
1426 dev_err_ratelimited(gpu->dev,
1427 "MMU %d fault addr 0x%08x\n",
1428 i, gpu_read(gpu,
1429 VIVS_MMUv2_EXCEPTION_ADDR(i)));
1430 }
1431 intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION;
1432 }
1433
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001434 while ((event = ffs(intr)) != 0) {
Chris Wilsonf54d1862016-10-25 13:00:45 +01001435 struct dma_fence *fence;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001436
1437 event -= 1;
1438
1439 intr &= ~(1 << event);
1440
1441 dev_dbg(gpu->dev, "event %u\n", event);
1442
1443 fence = gpu->event[event].fence;
1444 gpu->event[event].fence = NULL;
Chris Wilsonf54d1862016-10-25 13:00:45 +01001445 dma_fence_signal(fence);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001446
1447 /*
1448 * Events can be processed out of order. Eg,
1449 * - allocate and queue event 0
1450 * - allocate event 1
1451 * - event 0 completes, we process it
1452 * - allocate and queue event 0
1453 * - event 1 and event 0 complete
1454 * we can end up processing event 0 first, then 1.
1455 */
1456 if (fence_after(fence->seqno, gpu->completed_fence))
1457 gpu->completed_fence = fence->seqno;
1458
1459 event_free(gpu, event);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001460 }
1461
1462 /* Retire the buffer objects in a work */
1463 etnaviv_queue_work(gpu->drm, &gpu->retire_work);
1464
1465 ret = IRQ_HANDLED;
1466 }
1467
1468 return ret;
1469}
1470
1471static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
1472{
1473 int ret;
1474
Lucas Stach9c7310c2016-08-22 15:26:19 +02001475 if (gpu->clk_bus) {
1476 ret = clk_prepare_enable(gpu->clk_bus);
1477 if (ret)
1478 return ret;
1479 }
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001480
Lucas Stach9c7310c2016-08-22 15:26:19 +02001481 if (gpu->clk_core) {
1482 ret = clk_prepare_enable(gpu->clk_core);
1483 if (ret)
1484 goto disable_clk_bus;
1485 }
1486
1487 if (gpu->clk_shader) {
1488 ret = clk_prepare_enable(gpu->clk_shader);
1489 if (ret)
1490 goto disable_clk_core;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001491 }
1492
1493 return 0;
Lucas Stach9c7310c2016-08-22 15:26:19 +02001494
1495disable_clk_core:
1496 if (gpu->clk_core)
1497 clk_disable_unprepare(gpu->clk_core);
1498disable_clk_bus:
1499 if (gpu->clk_bus)
1500 clk_disable_unprepare(gpu->clk_bus);
1501
1502 return ret;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001503}
1504
1505static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu)
1506{
Lucas Stach9c7310c2016-08-22 15:26:19 +02001507 if (gpu->clk_shader)
1508 clk_disable_unprepare(gpu->clk_shader);
1509 if (gpu->clk_core)
1510 clk_disable_unprepare(gpu->clk_core);
1511 if (gpu->clk_bus)
1512 clk_disable_unprepare(gpu->clk_bus);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001513
1514 return 0;
1515}
1516
Lucas Stachb88163e2016-08-17 15:16:57 +02001517int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
1518{
1519 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
1520
1521 do {
1522 u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
1523
1524 if ((idle & gpu->idle_mask) == gpu->idle_mask)
1525 return 0;
1526
1527 if (time_is_before_jiffies(timeout)) {
1528 dev_warn(gpu->dev,
1529 "timed out waiting for idle: idle=0x%x\n",
1530 idle);
1531 return -ETIMEDOUT;
1532 }
1533
1534 udelay(5);
1535 } while (1);
1536}
1537
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001538static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
1539{
1540 if (gpu->buffer) {
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001541 /* Replace the last WAIT with END */
1542 etnaviv_buffer_end(gpu);
1543
1544 /*
1545 * We know that only the FE is busy here, this should
1546 * happen quickly (as the WAIT is only 200 cycles). If
1547 * we fail, just warn and continue.
1548 */
Lucas Stachb88163e2016-08-17 15:16:57 +02001549 etnaviv_gpu_wait_idle(gpu, 100);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001550 }
1551
1552 return etnaviv_gpu_clk_disable(gpu);
1553}
1554
1555#ifdef CONFIG_PM
1556static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
1557{
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001558 int ret;
1559
1560 ret = mutex_lock_killable(&gpu->lock);
1561 if (ret)
1562 return ret;
1563
Russell Kingbcdfb5e2017-03-12 19:00:59 +00001564 etnaviv_gpu_update_clock(gpu);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001565 etnaviv_gpu_hw_init(gpu);
1566
1567 gpu->switch_context = true;
Russell Kingf6086312016-01-21 15:20:19 +00001568 gpu->exec_state = -1;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001569
1570 mutex_unlock(&gpu->lock);
1571
1572 return 0;
1573}
1574#endif
1575
Russell Kingbcdfb5e2017-03-12 19:00:59 +00001576static int
1577etnaviv_gpu_cooling_get_max_state(struct thermal_cooling_device *cdev,
1578 unsigned long *state)
1579{
1580 *state = 6;
1581
1582 return 0;
1583}
1584
1585static int
1586etnaviv_gpu_cooling_get_cur_state(struct thermal_cooling_device *cdev,
1587 unsigned long *state)
1588{
1589 struct etnaviv_gpu *gpu = cdev->devdata;
1590
1591 *state = gpu->freq_scale;
1592
1593 return 0;
1594}
1595
1596static int
1597etnaviv_gpu_cooling_set_cur_state(struct thermal_cooling_device *cdev,
1598 unsigned long state)
1599{
1600 struct etnaviv_gpu *gpu = cdev->devdata;
1601
1602 mutex_lock(&gpu->lock);
1603 gpu->freq_scale = state;
1604 if (!pm_runtime_suspended(gpu->dev))
1605 etnaviv_gpu_update_clock(gpu);
1606 mutex_unlock(&gpu->lock);
1607
1608 return 0;
1609}
1610
1611static struct thermal_cooling_device_ops cooling_ops = {
1612 .get_max_state = etnaviv_gpu_cooling_get_max_state,
1613 .get_cur_state = etnaviv_gpu_cooling_get_cur_state,
1614 .set_cur_state = etnaviv_gpu_cooling_set_cur_state,
1615};
1616
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001617static int etnaviv_gpu_bind(struct device *dev, struct device *master,
1618 void *data)
1619{
1620 struct drm_device *drm = data;
1621 struct etnaviv_drm_private *priv = drm->dev_private;
1622 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1623 int ret;
1624
Russell Kingbcdfb5e2017-03-12 19:00:59 +00001625 gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
1626 (char *)dev_name(dev), gpu, &cooling_ops);
1627 if (IS_ERR(gpu->cooling))
1628 return PTR_ERR(gpu->cooling);
1629
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001630#ifdef CONFIG_PM
1631 ret = pm_runtime_get_sync(gpu->dev);
1632#else
1633 ret = etnaviv_gpu_clk_enable(gpu);
1634#endif
Russell Kingbcdfb5e2017-03-12 19:00:59 +00001635 if (ret < 0) {
1636 thermal_cooling_device_unregister(gpu->cooling);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001637 return ret;
Russell Kingbcdfb5e2017-03-12 19:00:59 +00001638 }
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001639
1640 gpu->drm = drm;
Chris Wilsonf54d1862016-10-25 13:00:45 +01001641 gpu->fence_context = dma_fence_context_alloc(1);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001642 spin_lock_init(&gpu->fence_spinlock);
1643
1644 INIT_LIST_HEAD(&gpu->active_cmd_list);
1645 INIT_WORK(&gpu->retire_work, retire_worker);
1646 INIT_WORK(&gpu->recover_work, recover_worker);
1647 init_waitqueue_head(&gpu->fence_event);
1648
Lucas Stach946dd8d2016-03-23 18:24:45 +01001649 setup_deferrable_timer(&gpu->hangcheck_timer, hangcheck_handler,
1650 (unsigned long)gpu);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001651
1652 priv->gpu[priv->num_gpus++] = gpu;
1653
1654 pm_runtime_mark_last_busy(gpu->dev);
1655 pm_runtime_put_autosuspend(gpu->dev);
1656
1657 return 0;
1658}
1659
1660static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
1661 void *data)
1662{
1663 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1664
1665 DBG("%s", dev_name(gpu->dev));
1666
1667 hangcheck_disable(gpu);
1668
1669#ifdef CONFIG_PM
1670 pm_runtime_get_sync(gpu->dev);
1671 pm_runtime_put_sync_suspend(gpu->dev);
1672#else
1673 etnaviv_gpu_hw_suspend(gpu);
1674#endif
1675
1676 if (gpu->buffer) {
Lucas Stachea1f5722017-01-16 16:09:51 +01001677 etnaviv_cmdbuf_free(gpu->buffer);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001678 gpu->buffer = NULL;
1679 }
1680
Lucas Stache66774d2017-01-16 17:29:57 +01001681 if (gpu->cmdbuf_suballoc) {
1682 etnaviv_cmdbuf_suballoc_destroy(gpu->cmdbuf_suballoc);
1683 gpu->cmdbuf_suballoc = NULL;
1684 }
1685
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001686 if (gpu->mmu) {
1687 etnaviv_iommu_destroy(gpu->mmu);
1688 gpu->mmu = NULL;
1689 }
1690
1691 gpu->drm = NULL;
Russell Kingbcdfb5e2017-03-12 19:00:59 +00001692
1693 thermal_cooling_device_unregister(gpu->cooling);
1694 gpu->cooling = NULL;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001695}
1696
1697static const struct component_ops gpu_ops = {
1698 .bind = etnaviv_gpu_bind,
1699 .unbind = etnaviv_gpu_unbind,
1700};
1701
1702static const struct of_device_id etnaviv_gpu_match[] = {
1703 {
1704 .compatible = "vivante,gc"
1705 },
1706 { /* sentinel */ }
1707};
1708
1709static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
1710{
1711 struct device *dev = &pdev->dev;
1712 struct etnaviv_gpu *gpu;
Fabio Estevamdc227892016-08-21 19:32:15 -03001713 int err;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001714
1715 gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
1716 if (!gpu)
1717 return -ENOMEM;
1718
1719 gpu->dev = &pdev->dev;
1720 mutex_init(&gpu->lock);
1721
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001722 /* Map registers: */
1723 gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev));
1724 if (IS_ERR(gpu->mmio))
1725 return PTR_ERR(gpu->mmio);
1726
1727 /* Get Interrupt: */
1728 gpu->irq = platform_get_irq(pdev, 0);
1729 if (gpu->irq < 0) {
Fabio Estevamdb60eda2016-08-21 19:32:14 -03001730 dev_err(dev, "failed to get irq: %d\n", gpu->irq);
1731 return gpu->irq;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001732 }
1733
1734 err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0,
1735 dev_name(gpu->dev), gpu);
1736 if (err) {
1737 dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err);
Fabio Estevamdb60eda2016-08-21 19:32:14 -03001738 return err;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001739 }
1740
1741 /* Get Clocks: */
1742 gpu->clk_bus = devm_clk_get(&pdev->dev, "bus");
1743 DBG("clk_bus: %p", gpu->clk_bus);
1744 if (IS_ERR(gpu->clk_bus))
1745 gpu->clk_bus = NULL;
1746
1747 gpu->clk_core = devm_clk_get(&pdev->dev, "core");
1748 DBG("clk_core: %p", gpu->clk_core);
1749 if (IS_ERR(gpu->clk_core))
1750 gpu->clk_core = NULL;
Lucas Stachd79fd1ccf22017-04-11 15:54:50 +02001751 gpu->base_rate_core = clk_get_rate(gpu->clk_core);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001752
1753 gpu->clk_shader = devm_clk_get(&pdev->dev, "shader");
1754 DBG("clk_shader: %p", gpu->clk_shader);
1755 if (IS_ERR(gpu->clk_shader))
1756 gpu->clk_shader = NULL;
Lucas Stachd79fd1ccf22017-04-11 15:54:50 +02001757 gpu->base_rate_shader = clk_get_rate(gpu->clk_shader);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001758
1759 /* TODO: figure out max mapped size */
1760 dev_set_drvdata(dev, gpu);
1761
1762 /*
1763 * We treat the device as initially suspended. The runtime PM
1764 * autosuspend delay is rather arbitary: no measurements have
1765 * yet been performed to determine an appropriate value.
1766 */
1767 pm_runtime_use_autosuspend(gpu->dev);
1768 pm_runtime_set_autosuspend_delay(gpu->dev, 200);
1769 pm_runtime_enable(gpu->dev);
1770
1771 err = component_add(&pdev->dev, &gpu_ops);
1772 if (err < 0) {
1773 dev_err(&pdev->dev, "failed to register component: %d\n", err);
Fabio Estevamdb60eda2016-08-21 19:32:14 -03001774 return err;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001775 }
1776
1777 return 0;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001778}
1779
1780static int etnaviv_gpu_platform_remove(struct platform_device *pdev)
1781{
1782 component_del(&pdev->dev, &gpu_ops);
1783 pm_runtime_disable(&pdev->dev);
1784 return 0;
1785}
1786
1787#ifdef CONFIG_PM
1788static int etnaviv_gpu_rpm_suspend(struct device *dev)
1789{
1790 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1791 u32 idle, mask;
1792
1793 /* If we have outstanding fences, we're not idle */
1794 if (gpu->completed_fence != gpu->active_fence)
1795 return -EBUSY;
1796
1797 /* Check whether the hardware (except FE) is idle */
1798 mask = gpu->idle_mask & ~VIVS_HI_IDLE_STATE_FE;
1799 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask;
1800 if (idle != mask)
1801 return -EBUSY;
1802
1803 return etnaviv_gpu_hw_suspend(gpu);
1804}
1805
1806static int etnaviv_gpu_rpm_resume(struct device *dev)
1807{
1808 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1809 int ret;
1810
1811 ret = etnaviv_gpu_clk_enable(gpu);
1812 if (ret)
1813 return ret;
1814
1815 /* Re-initialise the basic hardware state */
1816 if (gpu->drm && gpu->buffer) {
1817 ret = etnaviv_gpu_hw_resume(gpu);
1818 if (ret) {
1819 etnaviv_gpu_clk_disable(gpu);
1820 return ret;
1821 }
1822 }
1823
1824 return 0;
1825}
1826#endif
1827
1828static const struct dev_pm_ops etnaviv_gpu_pm_ops = {
1829 SET_RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume,
1830 NULL)
1831};
1832
1833struct platform_driver etnaviv_gpu_driver = {
1834 .driver = {
1835 .name = "etnaviv-gpu",
1836 .owner = THIS_MODULE,
1837 .pm = &etnaviv_gpu_pm_ops,
1838 .of_match_table = etnaviv_gpu_match,
1839 },
1840 .probe = etnaviv_gpu_platform_probe,
1841 .remove = etnaviv_gpu_platform_remove,
1842 .id_table = gpu_ids,
1843};