video: tegra: host: gk20a: add missing returns
[linux-3.10.git] / drivers / video / tegra / host / gk20a / fifo_gk20a.c
1 /*
2  * drivers/video/tegra/host/gk20a/fifo_gk20a.c
3  *
4  * GK20A Graphics FIFO (gr host)
5  *
6  * Copyright (c) 2011-2014, NVIDIA CORPORATION.  All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License along with
18  * this program; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20  */
21 #include <linux/delay.h>
22 #include <linux/slab.h>
23 #include <linux/scatterlist.h>
24 #include <trace/events/nvhost.h>
25
26 #include "../dev.h"
27 #include "../nvhost_as.h"
28
29 #include "gk20a.h"
30 #include "hw_fifo_gk20a.h"
31 #include "hw_pbdma_gk20a.h"
32 #include "hw_ccsr_gk20a.h"
33 #include "hw_ram_gk20a.h"
34 #include "hw_proj_gk20a.h"
35 #include "hw_top_gk20a.h"
36 #include "hw_mc_gk20a.h"
37 #include "hw_gr_gk20a.h"
38
39 static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
40                                             u32 hw_chid, bool add,
41                                             bool wait_for_finish);
42 static void gk20a_fifo_handle_mmu_fault_thread(struct work_struct *work);
43
44 /*
45  * Link engine IDs to MMU IDs and vice versa.
46  */
47
48 static inline u32 gk20a_engine_id_to_mmu_id(u32 engine_id)
49 {
50         switch (engine_id) {
51         case ENGINE_GR_GK20A:
52                 return 0x00;
53         case ENGINE_CE2_GK20A:
54                 return 0x1b;
55         default:
56                 return ~0;
57         }
58 }
59
60 static inline u32 gk20a_mmu_id_to_engine_id(u32 engine_id)
61 {
62         switch (engine_id) {
63         case 0x00:
64                 return ENGINE_GR_GK20A;
65         case 0x1b:
66                 return ENGINE_CE2_GK20A;
67         default:
68                 return ~0;
69         }
70 }
71
72
73 static int init_engine_info(struct fifo_gk20a *f)
74 {
75         struct gk20a *g = f->g;
76         struct device *d = dev_from_gk20a(g);
77         struct fifo_engine_info_gk20a *gr_info;
78         const u32 gr_sw_id = ENGINE_GR_GK20A;
79         u32 i;
80         u32 max_info_entries = top_device_info__size_1_v();
81
82         nvhost_dbg_fn("");
83
84         /* all we really care about finding is the graphics entry    */
85         /* especially early on in sim it probably thinks it has more */
86         f->num_engines = 1;
87
88         gr_info = f->engine_info + gr_sw_id;
89
90         gr_info->sw_id = gr_sw_id;
91         gr_info->name = "gr";
92         gr_info->dev_info_id = top_device_info_type_enum_graphics_v();
93         gr_info->mmu_fault_id = fifo_intr_mmu_fault_eng_id_graphics_v();
94         gr_info->runlist_id = ~0;
95         gr_info->pbdma_id   = ~0;
96         gr_info->engine_id  = ~0;
97
98         for (i = 0; i < max_info_entries; i++) {
99                 u32 table_entry = gk20a_readl(f->g, top_device_info_r(i));
100                 u32 entry = top_device_info_entry_v(table_entry);
101                 u32 engine_enum = top_device_info_type_enum_v(table_entry);
102                 u32 table_entry2 = 0;
103
104                 if (entry == top_device_info_entry_not_valid_v())
105                         continue;
106
107                 if (top_device_info_chain_v(table_entry) ==
108                     top_device_info_chain_enable_v()) {
109
110                         table_entry2 = gk20a_readl(f->g,
111                                                    top_device_info_r(++i));
112
113                         engine_enum = top_device_info_type_enum_v(table_entry2);
114                 }
115
116                 /* we only care about GR engine here */
117                 if (entry == top_device_info_entry_enum_v() &&
118                     engine_enum == gr_info->dev_info_id) {
119                         int pbdma_id;
120                         u32 runlist_bit;
121
122                         gr_info->runlist_id =
123                                 top_device_info_runlist_enum_v(table_entry);
124                         nvhost_dbg_info("gr info: runlist_id %d", gr_info->runlist_id);
125
126                         gr_info->engine_id =
127                                 top_device_info_engine_enum_v(table_entry);
128                         nvhost_dbg_info("gr info: engine_id %d", gr_info->engine_id);
129
130                         runlist_bit = 1 << gr_info->runlist_id;
131
132                         for (pbdma_id = 0; pbdma_id < f->num_pbdma; pbdma_id++) {
133                                 nvhost_dbg_info("gr info: pbdma_map[%d]=%d",
134                                         pbdma_id, f->pbdma_map[pbdma_id]);
135                                 if (f->pbdma_map[pbdma_id] & runlist_bit)
136                                         break;
137                         }
138
139                         if (pbdma_id == f->num_pbdma) {
140                                 nvhost_err(d, "busted pbmda map");
141                                 return -EINVAL;
142                         }
143                         gr_info->pbdma_id = pbdma_id;
144
145                         break;
146                 }
147         }
148
149         if (gr_info->runlist_id == ~0) {
150                 nvhost_err(d, "busted device info");
151                 return -EINVAL;
152         }
153
154         return 0;
155 }
156
157 void gk20a_remove_fifo_support(struct fifo_gk20a *f)
158 {
159         struct gk20a *g = f->g;
160         struct device *d = dev_from_gk20a(g);
161         struct fifo_engine_info_gk20a *engine_info;
162         struct fifo_runlist_info_gk20a *runlist;
163         u32 runlist_id;
164         u32 i;
165
166         nvhost_dbg_fn("");
167
168         if (f->channel) {
169                 int c;
170                 for (c = 0; c < f->num_channels; c++) {
171                         if (f->channel[c].remove_support)
172                                 f->channel[c].remove_support(f->channel+c);
173                 }
174                 kfree(f->channel);
175         }
176         if (f->userd.gpu_va)
177                 gk20a_gmmu_unmap(&g->mm.bar1.vm,
178                                 f->userd.gpu_va,
179                                 f->userd.size,
180                                 mem_flag_none);
181
182         if (f->userd.sgt)
183                 gk20a_free_sgtable(&f->userd.sgt);
184
185         if (f->userd.cpuva)
186                 dma_free_coherent(d,
187                                 f->userd_total_size,
188                                 f->userd.cpuva,
189                                 f->userd.iova);
190         f->userd.cpuva = NULL;
191         f->userd.iova = 0;
192
193         engine_info = f->engine_info + ENGINE_GR_GK20A;
194         runlist_id = engine_info->runlist_id;
195         runlist = &f->runlist_info[runlist_id];
196
197         for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) {
198                 if (runlist->mem[i].cpuva)
199                         dma_free_coherent(d,
200                                 runlist->mem[i].size,
201                                 runlist->mem[i].cpuva,
202                                 runlist->mem[i].iova);
203                 runlist->mem[i].cpuva = NULL;
204                 runlist->mem[i].iova = 0;
205         }
206
207         kfree(runlist->active_channels);
208
209         kfree(f->runlist_info);
210         kfree(f->pbdma_map);
211         kfree(f->engine_info);
212 }
213
214 /* reads info from hardware and fills in pbmda exception info record */
215 static inline void get_exception_pbdma_info(
216         struct gk20a *g,
217         struct fifo_engine_info_gk20a *eng_info)
218 {
219         struct fifo_pbdma_exception_info_gk20a *e =
220                 &eng_info->pbdma_exception_info;
221
222         u32 pbdma_status_r = e->status_r = gk20a_readl(g,
223                    fifo_pbdma_status_r(eng_info->pbdma_id));
224         e->id = fifo_pbdma_status_id_v(pbdma_status_r); /* vs. id_hw_v()? */
225         e->id_is_chid = fifo_pbdma_status_id_type_v(pbdma_status_r) ==
226                 fifo_pbdma_status_id_type_chid_v();
227         e->chan_status_v  = fifo_pbdma_status_chan_status_v(pbdma_status_r);
228         e->next_id_is_chid =
229                 fifo_pbdma_status_next_id_type_v(pbdma_status_r) ==
230                 fifo_pbdma_status_next_id_type_chid_v();
231         e->next_id = fifo_pbdma_status_next_id_v(pbdma_status_r);
232         e->chsw_in_progress =
233                 fifo_pbdma_status_chsw_v(pbdma_status_r) ==
234                 fifo_pbdma_status_chsw_in_progress_v();
235 }
236
237 static void fifo_pbdma_exception_status(struct gk20a *g,
238         struct fifo_engine_info_gk20a *eng_info)
239 {
240         struct fifo_pbdma_exception_info_gk20a *e;
241         get_exception_pbdma_info(g, eng_info);
242         e = &eng_info->pbdma_exception_info;
243
244         nvhost_dbg_fn("pbdma_id %d, "
245                       "id_type %s, id %d, chan_status %d, "
246                       "next_id_type %s, next_id %d, "
247                       "chsw_in_progress %d",
248                       eng_info->pbdma_id,
249                       e->id_is_chid ? "chid" : "tsgid", e->id, e->chan_status_v,
250                       e->next_id_is_chid ? "chid" : "tsgid", e->next_id,
251                       e->chsw_in_progress);
252 }
253
254 /* reads info from hardware and fills in pbmda exception info record */
255 static inline void get_exception_engine_info(
256         struct gk20a *g,
257         struct fifo_engine_info_gk20a *eng_info)
258 {
259         struct fifo_engine_exception_info_gk20a *e =
260                 &eng_info->engine_exception_info;
261         u32 engine_status_r = e->status_r =
262                 gk20a_readl(g, fifo_engine_status_r(eng_info->engine_id));
263         e->id = fifo_engine_status_id_v(engine_status_r); /* vs. id_hw_v()? */
264         e->id_is_chid = fifo_engine_status_id_type_v(engine_status_r) ==
265                 fifo_engine_status_id_type_chid_v();
266         e->ctx_status_v = fifo_engine_status_ctx_status_v(engine_status_r);
267         e->faulted =
268                 fifo_engine_status_faulted_v(engine_status_r) ==
269                 fifo_engine_status_faulted_true_v();
270         e->idle =
271                 fifo_engine_status_engine_v(engine_status_r) ==
272                 fifo_engine_status_engine_idle_v();
273         e->ctxsw_in_progress =
274                 fifo_engine_status_ctxsw_v(engine_status_r) ==
275                 fifo_engine_status_ctxsw_in_progress_v();
276 }
277
278 static void fifo_engine_exception_status(struct gk20a *g,
279                                struct fifo_engine_info_gk20a *eng_info)
280 {
281         struct fifo_engine_exception_info_gk20a *e;
282         get_exception_engine_info(g, eng_info);
283         e = &eng_info->engine_exception_info;
284
285         nvhost_dbg_fn("engine_id %d, id_type %s, id %d, ctx_status %d, "
286                       "faulted %d, idle %d, ctxsw_in_progress %d, ",
287                       eng_info->engine_id, e->id_is_chid ? "chid" : "tsgid",
288                       e->id, e->ctx_status_v,
289                       e->faulted, e->idle,  e->ctxsw_in_progress);
290 }
291
292 static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
293 {
294         struct fifo_engine_info_gk20a *engine_info;
295         struct fifo_runlist_info_gk20a *runlist;
296         struct device *d = dev_from_gk20a(g);
297         u32 runlist_id;
298         u32 i;
299         u64 runlist_size;
300
301         nvhost_dbg_fn("");
302
303         f->max_runlists = fifo_eng_runlist_base__size_1_v();
304         f->runlist_info = kzalloc(sizeof(struct fifo_runlist_info_gk20a) *
305                                   f->max_runlists, GFP_KERNEL);
306         if (!f->runlist_info)
307                 goto clean_up;
308
309         engine_info = f->engine_info + ENGINE_GR_GK20A;
310         runlist_id = engine_info->runlist_id;
311         runlist = &f->runlist_info[runlist_id];
312
313         runlist->active_channels =
314                 kzalloc(DIV_ROUND_UP(f->num_channels, BITS_PER_BYTE),
315                         GFP_KERNEL);
316         if (!runlist->active_channels)
317                 goto clean_up_runlist_info;
318
319         runlist_size  = ram_rl_entry_size_v() * f->num_channels;
320         for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) {
321                 runlist->mem[i].cpuva =
322                         dma_alloc_coherent(d,
323                                         runlist_size,
324                                         &runlist->mem[i].iova,
325                                         GFP_KERNEL);
326                 if (!runlist->mem[i].cpuva) {
327                         dev_err(d, "memory allocation failed\n");
328                         goto clean_up_runlist;
329                 }
330                 runlist->mem[i].size = runlist_size;
331         }
332         mutex_init(&runlist->mutex);
333         init_waitqueue_head(&runlist->runlist_wq);
334
335         /* None of buffers is pinned if this value doesn't change.
336             Otherwise, one of them (cur_buffer) must have been pinned. */
337         runlist->cur_buffer = MAX_RUNLIST_BUFFERS;
338
339         nvhost_dbg_fn("done");
340         return 0;
341
342 clean_up_runlist:
343         for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) {
344                 if (runlist->mem[i].cpuva)
345                         dma_free_coherent(d,
346                                 runlist->mem[i].size,
347                                 runlist->mem[i].cpuva,
348                                 runlist->mem[i].iova);
349                 runlist->mem[i].cpuva = NULL;
350                 runlist->mem[i].iova = 0;
351         }
352
353         kfree(runlist->active_channels);
354         runlist->active_channels = NULL;
355
356 clean_up_runlist_info:
357         kfree(f->runlist_info);
358         f->runlist_info = NULL;
359
360 clean_up:
361         nvhost_dbg_fn("fail");
362         return -ENOMEM;
363 }
364
365 #define GRFIFO_TIMEOUT_CHECK_PERIOD_US 100000
366
367 int gk20a_init_fifo_reset_enable_hw(struct gk20a *g)
368 {
369         u32 intr_stall;
370         u32 mask;
371         u32 timeout;
372         int i;
373
374         nvhost_dbg_fn("");
375         /* enable pmc pfifo */
376         gk20a_reset(g, mc_enable_pfifo_enabled_f()
377                         | mc_enable_ce2_enabled_f());
378
379         /* enable pbdma */
380         mask = 0;
381         for (i = 0; i < proj_host_num_pbdma_v(); ++i)
382                 mask |= mc_enable_pb_sel_f(mc_enable_pb_0_enabled_v(), i);
383         gk20a_writel(g, mc_enable_pb_r(), mask);
384
385         /* enable pfifo interrupt */
386         gk20a_writel(g, fifo_intr_0_r(), 0xFFFFFFFF);
387         gk20a_writel(g, fifo_intr_en_0_r(), 0x7FFFFFFF);
388         gk20a_writel(g, fifo_intr_en_1_r(), 0x80000000);
389
390         /* enable pbdma interrupt */
391         mask = 0;
392         for (i = 0; i < proj_host_num_pbdma_v(); i++) {
393                 intr_stall = gk20a_readl(g, pbdma_intr_stall_r(i));
394                 intr_stall &= ~pbdma_intr_stall_lbreq_enabled_f();
395                 gk20a_writel(g, pbdma_intr_stall_r(i), intr_stall);
396                 gk20a_writel(g, pbdma_intr_0_r(i), 0xFFFFFFFF);
397                 gk20a_writel(g, pbdma_intr_en_0_r(i),
398                         (~0) & ~pbdma_intr_en_0_lbreq_enabled_f());
399                 gk20a_writel(g, pbdma_intr_1_r(i), 0xFFFFFFFF);
400                 gk20a_writel(g, pbdma_intr_en_1_r(i), 0xFFFFFFFF);
401         }
402
403         /* TBD: apply overrides */
404
405         /* TBD: BLCG prod */
406
407         /* reset runlist interrupts */
408         gk20a_writel(g, fifo_intr_runlist_r(), ~0);
409
410         /* TBD: do we need those? */
411         timeout = gk20a_readl(g, fifo_fb_timeout_r());
412         timeout = set_field(timeout, fifo_fb_timeout_period_m(),
413                         fifo_fb_timeout_period_max_f());
414         gk20a_writel(g, fifo_fb_timeout_r(), timeout);
415
416         timeout = gk20a_readl(g, fifo_pb_timeout_r());
417         timeout &= ~fifo_pb_timeout_detection_enabled_f();
418         gk20a_writel(g, fifo_pb_timeout_r(), timeout);
419
420         timeout = GRFIFO_TIMEOUT_CHECK_PERIOD_US |
421                         fifo_eng_timeout_detection_enabled_f();
422         gk20a_writel(g, fifo_eng_timeout_r(), timeout);
423
424         nvhost_dbg_fn("done");
425
426         return 0;
427 }
428
429 static void gk20a_init_fifo_pbdma_intr_descs(struct fifo_gk20a *f)
430 {
431         /* These are all errors which indicate something really wrong
432          * going on in the device. */
433         f->intr.pbdma.device_fatal_0 =
434                 pbdma_intr_0_memreq_pending_f() |
435                 pbdma_intr_0_memack_timeout_pending_f() |
436                 pbdma_intr_0_memack_extra_pending_f() |
437                 pbdma_intr_0_memdat_timeout_pending_f() |
438                 pbdma_intr_0_memdat_extra_pending_f() |
439                 pbdma_intr_0_memflush_pending_f() |
440                 pbdma_intr_0_memop_pending_f() |
441                 pbdma_intr_0_lbconnect_pending_f() |
442                 pbdma_intr_0_lbreq_pending_f() |
443                 pbdma_intr_0_lback_timeout_pending_f() |
444                 pbdma_intr_0_lback_extra_pending_f() |
445                 pbdma_intr_0_lbdat_timeout_pending_f() |
446                 pbdma_intr_0_lbdat_extra_pending_f() |
447                 pbdma_intr_0_xbarconnect_pending_f() |
448                 pbdma_intr_0_pri_pending_f();
449
450         /* These are data parsing, framing errors or others which can be
451          * recovered from with intervention... or just resetting the
452          * channel. */
453         f->intr.pbdma.channel_fatal_0 =
454                 pbdma_intr_0_gpfifo_pending_f() |
455                 pbdma_intr_0_gpptr_pending_f() |
456                 pbdma_intr_0_gpentry_pending_f() |
457                 pbdma_intr_0_gpcrc_pending_f() |
458                 pbdma_intr_0_pbptr_pending_f() |
459                 pbdma_intr_0_pbentry_pending_f() |
460                 pbdma_intr_0_pbcrc_pending_f() |
461                 pbdma_intr_0_method_pending_f() |
462                 pbdma_intr_0_methodcrc_pending_f() |
463                 pbdma_intr_0_pbseg_pending_f() |
464                 pbdma_intr_0_signature_pending_f();
465
466         /* Can be used for sw-methods, or represents
467          * a recoverable timeout. */
468         f->intr.pbdma.restartable_0 =
469                 pbdma_intr_0_device_pending_f() |
470                 pbdma_intr_0_acquire_pending_f();
471 }
472
473 static int gk20a_init_fifo_setup_sw(struct gk20a *g)
474 {
475         struct fifo_gk20a *f = &g->fifo;
476         struct device *d = dev_from_gk20a(g);
477         int chid, i, err = 0;
478
479         nvhost_dbg_fn("");
480
481         if (f->sw_ready) {
482                 nvhost_dbg_fn("skip init");
483                 return 0;
484         }
485
486         f->g = g;
487
488         INIT_WORK(&f->fault_restore_thread,
489                   gk20a_fifo_handle_mmu_fault_thread);
490         mutex_init(&f->intr.isr.mutex);
491         gk20a_init_fifo_pbdma_intr_descs(f); /* just filling in data/tables */
492
493         f->num_channels = ccsr_channel__size_1_v();
494         f->num_pbdma = proj_host_num_pbdma_v();
495         f->max_engines = ENGINE_INVAL_GK20A;
496
497         f->userd_entry_size = 1 << ram_userd_base_shift_v();
498         f->userd_total_size = f->userd_entry_size * f->num_channels;
499
500         f->userd.cpuva = dma_alloc_coherent(d,
501                                         f->userd_total_size,
502                                         &f->userd.iova,
503                                         GFP_KERNEL);
504         if (!f->userd.cpuva) {
505                 dev_err(d, "memory allocation failed\n");
506                 goto clean_up;
507         }
508
509         err = gk20a_get_sgtable(d, &f->userd.sgt,
510                                 f->userd.cpuva, f->userd.iova,
511                                 f->userd_total_size);
512         if (err) {
513                 dev_err(d, "failed to create sg table\n");
514                 goto clean_up;
515         }
516
517         /* bar1 va */
518         f->userd.gpu_va = gk20a_gmmu_map(&g->mm.bar1.vm,
519                                         &f->userd.sgt,
520                                         f->userd_total_size,
521                                         0, /* flags */
522                                         mem_flag_none);
523         if (!f->userd.gpu_va) {
524                 dev_err(d, "gmmu mapping failed\n");
525                 goto clean_up;
526         }
527
528         nvhost_dbg(dbg_map, "userd bar1 va = 0x%llx", f->userd.gpu_va);
529
530         f->userd.size = f->userd_total_size;
531
532         f->channel = kzalloc(f->num_channels * sizeof(*f->channel),
533                                 GFP_KERNEL);
534         f->pbdma_map = kzalloc(f->num_pbdma * sizeof(*f->pbdma_map),
535                                 GFP_KERNEL);
536         f->engine_info = kzalloc(f->max_engines * sizeof(*f->engine_info),
537                                 GFP_KERNEL);
538
539         if (!(f->channel && f->pbdma_map && f->engine_info)) {
540                 err = -ENOMEM;
541                 goto clean_up;
542         }
543
544         /* pbdma map needs to be in place before calling engine info init */
545         for (i = 0; i < f->num_pbdma; ++i)
546                 f->pbdma_map[i] = gk20a_readl(g, fifo_pbdma_map_r(i));
547
548         init_engine_info(f);
549
550         init_runlist(g, f);
551
552         for (chid = 0; chid < f->num_channels; chid++) {
553                 f->channel[chid].userd_cpu_va =
554                         f->userd.cpuva + chid * f->userd_entry_size;
555                 f->channel[chid].userd_iova =
556                         NV_MC_SMMU_VADDR_TRANSLATE(f->userd.iova)
557                                 + chid * f->userd_entry_size;
558                 f->channel[chid].userd_gpu_va =
559                         f->userd.gpu_va + chid * f->userd_entry_size;
560
561                 gk20a_init_channel_support(g, chid);
562         }
563         mutex_init(&f->ch_inuse_mutex);
564
565         f->remove_support = gk20a_remove_fifo_support;
566
567         f->deferred_reset_pending = false;
568         mutex_init(&f->deferred_reset_mutex);
569
570         f->sw_ready = true;
571
572         nvhost_dbg_fn("done");
573         return 0;
574
575 clean_up:
576         nvhost_dbg_fn("fail");
577         if (f->userd.gpu_va)
578                 gk20a_gmmu_unmap(&g->mm.bar1.vm,
579                                         f->userd.gpu_va,
580                                         f->userd.size,
581                                         mem_flag_none);
582         if (f->userd.sgt)
583                 gk20a_free_sgtable(&f->userd.sgt);
584         if (f->userd.cpuva)
585                 dma_free_coherent(d,
586                                 f->userd_total_size,
587                                 f->userd.cpuva,
588                                 f->userd.iova);
589         f->userd.cpuva = NULL;
590         f->userd.iova = 0;
591
592         memset(&f->userd, 0, sizeof(struct userd_desc));
593
594         kfree(f->channel);
595         f->channel = NULL;
596         kfree(f->pbdma_map);
597         f->pbdma_map = NULL;
598         kfree(f->engine_info);
599         f->engine_info = NULL;
600
601         return err;
602 }
603
604 static void gk20a_fifo_handle_runlist_event(struct gk20a *g)
605 {
606         struct fifo_gk20a *f = &g->fifo;
607         struct fifo_runlist_info_gk20a *runlist;
608         unsigned long runlist_event;
609         u32 runlist_id;
610
611         runlist_event = gk20a_readl(g, fifo_intr_runlist_r());
612         gk20a_writel(g, fifo_intr_runlist_r(), runlist_event);
613
614         for_each_set_bit(runlist_id, &runlist_event, f->max_runlists) {
615                 runlist = &f->runlist_info[runlist_id];
616                 wake_up(&runlist->runlist_wq);
617         }
618
619 }
620
621 static int gk20a_init_fifo_setup_hw(struct gk20a *g)
622 {
623         struct fifo_gk20a *f = &g->fifo;
624
625         nvhost_dbg_fn("");
626
627         /* test write, read through bar1 @ userd region before
628          * turning on the snooping */
629         {
630                 struct fifo_gk20a *f = &g->fifo;
631                 u32 v, v1 = 0x33, v2 = 0x55;
632
633                 u32 bar1_vaddr = f->userd.gpu_va;
634                 volatile u32 *cpu_vaddr = f->userd.cpuva;
635
636                 nvhost_dbg_info("test bar1 @ vaddr 0x%x",
637                            bar1_vaddr);
638
639                 v = gk20a_bar1_readl(g, bar1_vaddr);
640
641                 *cpu_vaddr = v1;
642                 smp_mb();
643
644                 if (v1 != gk20a_bar1_readl(g, bar1_vaddr)) {
645                         nvhost_err(dev_from_gk20a(g), "bar1 broken @ gk20a!");
646                         return -EINVAL;
647                 }
648
649                 gk20a_bar1_writel(g, bar1_vaddr, v2);
650
651                 if (v2 != gk20a_bar1_readl(g, bar1_vaddr)) {
652                         nvhost_err(dev_from_gk20a(g), "bar1 broken @ gk20a!");
653                         return -EINVAL;
654                 }
655
656                 /* is it visible to the cpu? */
657                 if (*cpu_vaddr != v2) {
658                         nvhost_err(dev_from_gk20a(g),
659                                 "cpu didn't see bar1 write @ %p!",
660                                 cpu_vaddr);
661                 }
662
663                 /* put it back */
664                 gk20a_bar1_writel(g, bar1_vaddr, v);
665         }
666
667         /*XXX all manner of flushes and caching worries, etc */
668
669         /* set the base for the userd region now */
670         gk20a_writel(g, fifo_bar1_base_r(),
671                         fifo_bar1_base_ptr_f(f->userd.gpu_va >> 12) |
672                         fifo_bar1_base_valid_true_f());
673
674         nvhost_dbg_fn("done");
675
676         return 0;
677 }
678
679 int gk20a_init_fifo_support(struct gk20a *g)
680 {
681         u32 err;
682
683         err = gk20a_init_fifo_setup_sw(g);
684         if (err)
685                 return err;
686
687         err = gk20a_init_fifo_setup_hw(g);
688         if (err)
689                 return err;
690
691         return err;
692 }
693
694 static struct channel_gk20a *
695 channel_from_inst_ptr(struct fifo_gk20a *f, u64 inst_ptr)
696 {
697         int ci;
698         if (unlikely(!f->channel))
699                 return NULL;
700         for (ci = 0; ci < f->num_channels; ci++) {
701                 struct channel_gk20a *c = f->channel+ci;
702                 if (c->inst_block.cpuva &&
703                     (inst_ptr == c->inst_block.cpu_pa))
704                         return f->channel+ci;
705         }
706         return NULL;
707 }
708
709 /* fault info/descriptions.
710  * tbd: move to setup
711  *  */
712 static const char * const fault_type_descs[] = {
713          "pde", /*fifo_intr_mmu_fault_info_type_pde_v() == 0 */
714          "pde size",
715          "pte",
716          "va limit viol",
717          "unbound inst",
718          "priv viol",
719          "ro viol",
720          "wo viol",
721          "pitch mask",
722          "work creation",
723          "bad aperture",
724          "compression failure",
725          "bad kind",
726          "region viol",
727          "dual ptes",
728          "poisoned",
729 };
730 /* engine descriptions */
731 static const char * const engine_subid_descs[] = {
732         "gpc",
733         "hub",
734 };
735
736 static const char * const hub_client_descs[] = {
737         "vip", "ce0", "ce1", "dniso", "fe", "fecs", "host", "host cpu",
738         "host cpu nb", "iso", "mmu", "mspdec", "msppp", "msvld",
739         "niso", "p2p", "pd", "perf", "pmu", "raster twod", "scc",
740         "scc nb", "sec", "ssync", "gr copy", "ce2", "xv", "mmu nb",
741         "msenc", "d falcon", "sked", "a falcon", "n/a",
742 };
743
744 static const char * const gpc_client_descs[] = {
745         "l1 0", "t1 0", "pe 0",
746         "l1 1", "t1 1", "pe 1",
747         "l1 2", "t1 2", "pe 2",
748         "l1 3", "t1 3", "pe 3",
749         "rast", "gcc", "gpccs",
750         "prop 0", "prop 1", "prop 2", "prop 3",
751         "l1 4", "t1 4", "pe 4",
752         "l1 5", "t1 5", "pe 5",
753         "l1 6", "t1 6", "pe 6",
754         "l1 7", "t1 7", "pe 7",
755         "gpm",
756         "ltp utlb 0", "ltp utlb 1", "ltp utlb 2", "ltp utlb 3",
757         "rgg utlb",
758 };
759
760 /* reads info from hardware and fills in mmu fault info record */
761 static inline void get_exception_mmu_fault_info(
762         struct gk20a *g, u32 engine_id,
763         struct fifo_mmu_fault_info_gk20a *f)
764 {
765         u32 fault_info_v;
766
767         nvhost_dbg_fn("engine_id %d", engine_id);
768
769         memset(f, 0, sizeof(*f));
770
771         f->fault_info_v = fault_info_v = gk20a_readl(g,
772              fifo_intr_mmu_fault_info_r(engine_id));
773         f->fault_type_v =
774                 fifo_intr_mmu_fault_info_type_v(fault_info_v);
775         f->engine_subid_v =
776                 fifo_intr_mmu_fault_info_engine_subid_v(fault_info_v);
777         f->client_v = fifo_intr_mmu_fault_info_client_v(fault_info_v);
778
779         BUG_ON(f->fault_type_v >= ARRAY_SIZE(fault_type_descs));
780         f->fault_type_desc =  fault_type_descs[f->fault_type_v];
781
782         BUG_ON(f->engine_subid_v >= ARRAY_SIZE(engine_subid_descs));
783         f->engine_subid_desc = engine_subid_descs[f->engine_subid_v];
784
785         if (f->engine_subid_v ==
786             fifo_intr_mmu_fault_info_engine_subid_hub_v()) {
787
788                 BUG_ON(f->client_v >= ARRAY_SIZE(hub_client_descs));
789                 f->client_desc = hub_client_descs[f->client_v];
790         } else if (f->engine_subid_v ==
791                    fifo_intr_mmu_fault_info_engine_subid_gpc_v()) {
792                 BUG_ON(f->client_v >= ARRAY_SIZE(gpc_client_descs));
793                 f->client_desc = gpc_client_descs[f->client_v];
794         } else {
795                 BUG_ON(1);
796         }
797
798         f->fault_hi_v = gk20a_readl(g, fifo_intr_mmu_fault_hi_r(engine_id));
799         f->fault_lo_v = gk20a_readl(g, fifo_intr_mmu_fault_lo_r(engine_id));
800         /* note:ignoring aperture on gk20a... */
801         f->inst_ptr = fifo_intr_mmu_fault_inst_ptr_v(
802                  gk20a_readl(g, fifo_intr_mmu_fault_inst_r(engine_id)));
803         /* note: inst_ptr is a 40b phys addr.  */
804         f->inst_ptr <<= fifo_intr_mmu_fault_inst_ptr_align_shift_v();
805 }
806
807 static void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
808 {
809         nvhost_dbg_fn("");
810
811         if (engine_id == top_device_info_type_enum_graphics_v()) {
812                 /* resetting engine using mc_enable_r() is not enough,
813                  * we do full init sequence */
814                 gk20a_gr_reset(g);
815         }
816         if (engine_id == top_device_info_type_enum_copy0_v())
817                 gk20a_reset(g, mc_enable_ce2_m());
818 }
819
820 static void gk20a_fifo_handle_mmu_fault_thread(struct work_struct *work)
821 {
822         struct fifo_gk20a *f = container_of(work, struct fifo_gk20a,
823                                             fault_restore_thread);
824         struct gk20a *g = f->g;
825         int i;
826
827         /* Reinitialise FECS and GR */
828         gk20a_init_pmu_setup_hw2(g);
829
830         /* It is safe to enable ELPG again. */
831         gk20a_pmu_enable_elpg(g);
832
833         /* Restore the runlist */
834         for (i = 0; i < g->fifo.max_runlists; i++)
835                 gk20a_fifo_update_runlist_locked(g, i, ~0, true, true);
836
837         /* unlock all runlists */
838         for (i = 0; i < g->fifo.max_runlists; i++)
839                 mutex_unlock(&g->fifo.runlist_info[i].mutex);
840
841 }
842
843 static void gk20a_fifo_handle_chsw_fault(struct gk20a *g)
844 {
845         u32 intr;
846
847         intr = gk20a_readl(g, fifo_intr_chsw_error_r());
848         nvhost_err(dev_from_gk20a(g), "chsw: %08x\n", intr);
849         gk20a_fecs_dump_falcon_stats(g);
850         gk20a_writel(g, fifo_intr_chsw_error_r(), intr);
851 }
852
853 static void gk20a_fifo_handle_dropped_mmu_fault(struct gk20a *g)
854 {
855         struct device *dev = dev_from_gk20a(g);
856         u32 fault_id = gk20a_readl(g, fifo_intr_mmu_fault_id_r());
857         nvhost_err(dev, "dropped mmu fault (0x%08x)", fault_id);
858 }
859
860 static bool gk20a_fifo_should_defer_engine_reset(struct gk20a *g, u32 engine_id,
861                 struct fifo_mmu_fault_info_gk20a *f, bool fake_fault)
862 {
863         /* channel recovery is only deferred if an sm debugger
864            is attached and has MMU debug mode is enabled */
865         if (!gk20a_gr_sm_debugger_attached(g) ||
866             !gk20a_mm_mmu_debug_mode_enabled(g))
867                 return false;
868
869         /* if this fault is fake (due to RC recovery), don't defer recovery */
870         if (fake_fault)
871                 return false;
872
873         if (engine_id != ENGINE_GR_GK20A ||
874             f->engine_subid_v != fifo_intr_mmu_fault_info_engine_subid_gpc_v())
875                 return false;
876
877         return true;
878 }
879
880 void fifo_gk20a_finish_mmu_fault_handling(struct gk20a *g,
881                 unsigned long fault_id) {
882         u32 engine_mmu_id;
883         int i;
884
885         /* reset engines */
886         for_each_set_bit(engine_mmu_id, &fault_id, 32) {
887                 u32 engine_id = gk20a_mmu_id_to_engine_id(engine_mmu_id);
888                 if (engine_id != ~0)
889                         gk20a_fifo_reset_engine(g, engine_id);
890         }
891
892         /* CLEAR the runlists. Do not wait for runlist to start as
893          * some engines may not be available right now */
894         for (i = 0; i < g->fifo.max_runlists; i++)
895                 gk20a_fifo_update_runlist_locked(g, i, ~0, false, false);
896
897         /* clear interrupt */
898         gk20a_writel(g, fifo_intr_mmu_fault_id_r(), fault_id);
899
900         /* resume scheduler */
901         gk20a_writel(g, fifo_error_sched_disable_r(),
902                      gk20a_readl(g, fifo_error_sched_disable_r()));
903
904         /* Spawn a work to enable PMU and restore runlists */
905         schedule_work(&g->fifo.fault_restore_thread);
906 }
907
908 static bool gk20a_fifo_set_ctx_mmu_error(struct gk20a *g,
909                 struct channel_gk20a *ch) {
910         bool verbose = true;
911         if (!ch || !ch->hwctx)
912                 return verbose;
913
914         nvhost_err(dev_from_gk20a(g),
915                 "channel %d with hwctx generated a mmu fault",
916                 ch->hw_chid);
917         if (ch->hwctx->error_notifier) {
918                 u32 err = ch->hwctx->error_notifier->info32;
919                 if (err) {
920                         /* If error code is already set, this mmu fault
921                          * was triggered as part of recovery from other
922                          * error condition.
923                          * Don't overwrite error flag. */
924
925                         /* Fifo timeout debug spew is controlled by user */
926                         if (err == NVHOST_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT)
927                                 verbose = ch->hwctx->timeout_debug_dump;
928                 } else {
929                         gk20a_set_error_notifier(ch->hwctx,
930                                 NVHOST_CHANNEL_FIFO_ERROR_MMU_ERR_FLT);
931                 }
932         }
933         /* mark channel as faulted */
934         ch->hwctx->has_timedout = true;
935         wmb();
936         /* unblock pending waits */
937         wake_up(&ch->semaphore_wq);
938         wake_up(&ch->notifier_wq);
939         wake_up(&ch->submit_wq);
940         return verbose;
941 }
942
943
944 static bool gk20a_fifo_handle_mmu_fault(struct gk20a *g)
945 {
946         bool fake_fault;
947         unsigned long fault_id;
948         u32 engine_mmu_id;
949         int i;
950         bool verbose = true;
951         nvhost_dbg_fn("");
952
953         g->fifo.deferred_reset_pending = false;
954
955         /* Disable ELPG */
956         gk20a_pmu_disable_elpg(g);
957
958         /* If we have recovery in progress, MMU fault id is invalid */
959         if (g->fifo.mmu_fault_engines) {
960                 fault_id = g->fifo.mmu_fault_engines;
961                 g->fifo.mmu_fault_engines = 0;
962                 fake_fault = true;
963         } else {
964                 fault_id = gk20a_readl(g, fifo_intr_mmu_fault_id_r());
965                 fake_fault = false;
966                 nvhost_debug_dump(g->host);
967         }
968
969         /* lock all runlists. Note that locks are are released in
970          * gk20a_fifo_handle_mmu_fault_thread() */
971         for (i = 0; i < g->fifo.max_runlists; i++)
972                 mutex_lock(&g->fifo.runlist_info[i].mutex);
973
974         /* go through all faulted engines */
975         for_each_set_bit(engine_mmu_id, &fault_id, 32) {
976                 /* bits in fifo_intr_mmu_fault_id_r do not correspond 1:1 to
977                  * engines. Convert engine_mmu_id to engine_id */
978                 u32 engine_id = gk20a_mmu_id_to_engine_id(engine_mmu_id);
979                 struct fifo_runlist_info_gk20a *runlist = g->fifo.runlist_info;
980                 struct fifo_mmu_fault_info_gk20a f;
981                 struct channel_gk20a *ch = NULL;
982
983                 get_exception_mmu_fault_info(g, engine_mmu_id, &f);
984                 trace_nvhost_gk20a_mmu_fault(f.fault_hi_v,
985                                              f.fault_lo_v,
986                                              f.fault_info_v,
987                                              f.inst_ptr,
988                                              engine_id,
989                                              f.engine_subid_desc,
990                                              f.client_desc,
991                                              f.fault_type_desc);
992                 nvhost_err(dev_from_gk20a(g), "mmu fault on engine %d, "
993                            "engine subid %d (%s), client %d (%s), "
994                            "addr 0x%08x:0x%08x, type %d (%s), info 0x%08x,"
995                            "inst_ptr 0x%llx\n",
996                            engine_id,
997                            f.engine_subid_v, f.engine_subid_desc,
998                            f.client_v, f.client_desc,
999                            f.fault_hi_v, f.fault_lo_v,
1000                            f.fault_type_v, f.fault_type_desc,
1001                            f.fault_info_v, f.inst_ptr);
1002
1003                 /* get the channel */
1004                 if (fake_fault) {
1005                         /* read and parse engine status */
1006                         u32 status = gk20a_readl(g,
1007                                 fifo_engine_status_r(engine_id));
1008                         u32 ctx_status =
1009                                 fifo_engine_status_ctx_status_v(status);
1010                         bool type_ch = fifo_pbdma_status_id_type_v(status) ==
1011                                 fifo_pbdma_status_id_type_chid_v();
1012
1013                         /* use next_id if context load is failing */
1014                         u32 id = (ctx_status ==
1015                                 fifo_engine_status_ctx_status_ctxsw_load_v()) ?
1016                                 fifo_engine_status_next_id_v(status) :
1017                                 fifo_engine_status_id_v(status);
1018
1019                         if (type_ch) {
1020                                 ch = g->fifo.channel + id;
1021                         } else {
1022                                 nvhost_err(dev_from_gk20a(g), "non-chid type not supported");
1023                                 WARN_ON(1);
1024                         }
1025                 } else {
1026                         /* read channel based on instruction pointer */
1027                         ch = channel_from_inst_ptr(&g->fifo, f.inst_ptr);
1028                 }
1029
1030                 if (ch) {
1031                         verbose = gk20a_fifo_set_ctx_mmu_error(g, ch);
1032                         if (ch->in_use) {
1033                                 /* disable the channel from hw and increment
1034                                  * syncpoints */
1035                                 gk20a_disable_channel_no_update(ch);
1036
1037                                 /* remove the channel from runlist */
1038                                 clear_bit(ch->hw_chid,
1039                                           runlist->active_channels);
1040                         }
1041
1042                         /* check if engine reset should be deferred */
1043                         if (gk20a_fifo_should_defer_engine_reset(g, engine_id, &f, fake_fault)) {
1044                                 g->fifo.mmu_fault_engines = fault_id;
1045
1046                                 /* handled during channel free */
1047                                 g->fifo.deferred_reset_pending = true;
1048                         }
1049                 } else if (f.inst_ptr ==
1050                                 g->mm.bar1.inst_block.cpu_pa) {
1051                         nvhost_err(dev_from_gk20a(g), "mmu fault from bar1");
1052                 } else if (f.inst_ptr ==
1053                                 g->mm.pmu.inst_block.cpu_pa) {
1054                         nvhost_err(dev_from_gk20a(g), "mmu fault from pmu");
1055                 } else
1056                         nvhost_err(dev_from_gk20a(g), "couldn't locate channel for mmu fault");
1057         }
1058
1059         if (g->fifo.deferred_reset_pending) {
1060                 nvhost_dbg(dbg_intr | dbg_gpu_dbg, "sm debugger attached,"
1061                            " deferring channel recovery to channel free");
1062                 /* clear interrupt */
1063                 gk20a_writel(g, fifo_intr_mmu_fault_id_r(), fault_id);
1064                 return verbose;
1065         }
1066
1067         /* resetting the engines and clearing the runlists is done in
1068            a separate function to allow deferred reset. */
1069         fifo_gk20a_finish_mmu_fault_handling(g, fault_id);
1070
1071         return verbose;
1072 }
1073
1074 static void gk20a_fifo_get_faulty_channel(struct gk20a *g, int engine_id,
1075                                           u32 *chid, bool *type_ch)
1076 {
1077         u32 status = gk20a_readl(g, fifo_engine_status_r(engine_id));
1078         u32 ctx_status = fifo_engine_status_ctx_status_v(status);
1079
1080         *type_ch = fifo_pbdma_status_id_type_v(status) ==
1081                 fifo_pbdma_status_id_type_chid_v();
1082         /* use next_id if context load is failing */
1083         *chid = (ctx_status ==
1084                 fifo_engine_status_ctx_status_ctxsw_load_v()) ?
1085                 fifo_engine_status_next_id_v(status) :
1086                 fifo_engine_status_id_v(status);
1087 }
1088
1089 void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids,
1090                 bool verbose)
1091 {
1092         unsigned long end_jiffies = jiffies +
1093                 msecs_to_jiffies(gk20a_get_gr_idle_timeout(g));
1094         unsigned long delay = GR_IDLE_CHECK_DEFAULT;
1095         unsigned long engine_id, i;
1096         unsigned long _engine_ids = __engine_ids;
1097         unsigned long engine_ids = 0;
1098         int ret;
1099
1100         if (verbose)
1101                 nvhost_debug_dump(g->host);
1102
1103         /* store faulted engines in advance */
1104         g->fifo.mmu_fault_engines = 0;
1105         for_each_set_bit(engine_id, &_engine_ids, 32) {
1106                 bool ref_type_ch;
1107                 int ref_chid;
1108                 gk20a_fifo_get_faulty_channel(g, engine_id, &ref_chid,
1109                                               &ref_type_ch);
1110
1111                 /* Reset *all* engines that use the
1112                  * same channel as faulty engine */
1113                 for (i = 0; i < g->fifo.max_engines; i++) {
1114                         bool type_ch;
1115                         u32 chid;
1116                         gk20a_fifo_get_faulty_channel(g, i, &chid, &type_ch);
1117                         if (ref_type_ch == type_ch && ref_chid == chid) {
1118                                 engine_ids |= BIT(i);
1119                                 g->fifo.mmu_fault_engines |=
1120                                         BIT(gk20a_engine_id_to_mmu_id(i));
1121                         }
1122                 }
1123
1124         }
1125
1126         /* trigger faults for all bad engines */
1127         for_each_set_bit(engine_id, &engine_ids, 32) {
1128                 if (engine_id > g->fifo.max_engines) {
1129                         WARN_ON(true);
1130                         break;
1131                 }
1132
1133                 gk20a_writel(g, fifo_trigger_mmu_fault_r(engine_id),
1134                              fifo_trigger_mmu_fault_id_f(
1135                              gk20a_engine_id_to_mmu_id(engine_id)) |
1136                              fifo_trigger_mmu_fault_enable_f(1));
1137         }
1138
1139         /* Wait for MMU fault to trigger */
1140         ret = -EBUSY;
1141         do {
1142                 if (gk20a_readl(g, fifo_intr_0_r()) &
1143                                 fifo_intr_0_mmu_fault_pending_f()) {
1144                         ret = 0;
1145                         break;
1146                 }
1147
1148                 usleep_range(delay, delay * 2);
1149                 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
1150         } while (time_before(jiffies, end_jiffies) |
1151                         !tegra_platform_is_silicon());
1152
1153         if (ret)
1154                 nvhost_err(dev_from_gk20a(g), "mmu fault timeout");
1155
1156         /* release mmu fault trigger */
1157         for_each_set_bit(engine_id, &engine_ids, 32)
1158                 gk20a_writel(g, fifo_trigger_mmu_fault_r(engine_id), 0);
1159 }
1160
1161
1162 static bool gk20a_fifo_handle_sched_error(struct gk20a *g)
1163 {
1164         u32 sched_error;
1165         u32 engine_id;
1166         int id = -1;
1167         bool non_chid = false;
1168
1169         /* read and reset the scheduler error register */
1170         sched_error = gk20a_readl(g, fifo_intr_sched_error_r());
1171         gk20a_writel(g, fifo_intr_0_r(), fifo_intr_0_sched_error_reset_f());
1172
1173         for (engine_id = 0; engine_id < g->fifo.max_engines; engine_id++) {
1174                 u32 status = gk20a_readl(g, fifo_engine_status_r(engine_id));
1175                 u32 ctx_status = fifo_engine_status_ctx_status_v(status);
1176                 bool failing_engine;
1177
1178                 /* we are interested in busy engines */
1179                 failing_engine = fifo_engine_status_engine_v(status) ==
1180                         fifo_engine_status_engine_busy_v();
1181
1182                 /* ..that are doing context switch */
1183                 failing_engine = failing_engine &&
1184                         (ctx_status ==
1185                                 fifo_engine_status_ctx_status_ctxsw_switch_v()
1186                         || ctx_status ==
1187                                 fifo_engine_status_ctx_status_ctxsw_save_v()
1188                         || ctx_status ==
1189                                 fifo_engine_status_ctx_status_ctxsw_load_v());
1190
1191                 if (failing_engine) {
1192                         id = (ctx_status ==
1193                                 fifo_engine_status_ctx_status_ctxsw_load_v()) ?
1194                                 fifo_engine_status_next_id_v(status) :
1195                                 fifo_engine_status_id_v(status);
1196                         non_chid = fifo_pbdma_status_id_type_v(status) !=
1197                                 fifo_pbdma_status_id_type_chid_v();
1198                         break;
1199                 }
1200         }
1201
1202         /* could not find the engine - should never happen */
1203         if (unlikely(engine_id >= g->fifo.max_engines))
1204                 goto err;
1205
1206         if (fifo_intr_sched_error_code_f(sched_error) ==
1207                         fifo_intr_sched_error_code_ctxsw_timeout_v()) {
1208                 struct fifo_gk20a *f = &g->fifo;
1209                 struct channel_gk20a *ch = &f->channel[id];
1210                 struct nvhost_hwctx *hwctx = ch->hwctx;
1211
1212                 if (non_chid) {
1213                         gk20a_fifo_recover(g, BIT(engine_id), true);
1214                         goto err;
1215                 }
1216
1217                 if (gk20a_channel_update_and_check_timeout(ch,
1218                         GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000)) {
1219                         gk20a_set_error_notifier(hwctx,
1220                                 NVHOST_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT);
1221                         nvhost_err(dev_from_gk20a(g),
1222                                 "fifo sched ctxsw timeout error:"
1223                                 "engine = %u, ch = %d", engine_id, id);
1224                         gk20a_fifo_recover(g, BIT(engine_id),
1225                                 hwctx ? hwctx->timeout_debug_dump : true);
1226                 } else {
1227                         nvhost_warn(dev_from_gk20a(g),
1228                                 "fifo is waiting for ctx switch for %d ms,"
1229                                 "ch = %d\n",
1230                                 ch->timeout_accumulated_ms,
1231                                 id);
1232                 }
1233                 return hwctx->timeout_debug_dump;
1234         }
1235 err:
1236         nvhost_err(dev_from_gk20a(g), "fifo sched error : 0x%08x, engine=%u, %s=%d",
1237                    sched_error, engine_id, non_chid ? "non-ch" : "ch", id);
1238
1239         return true;
1240 }
1241
1242 static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr)
1243 {
1244         bool print_channel_reset_log = false, reset_engine = false;
1245         struct device *dev = dev_from_gk20a(g);
1246         u32 handled = 0;
1247
1248         nvhost_dbg_fn("");
1249
1250         if (fifo_intr & fifo_intr_0_pio_error_pending_f()) {
1251                 /* pio mode is unused.  this shouldn't happen, ever. */
1252                 /* should we clear it or just leave it pending? */
1253                 nvhost_err(dev, "fifo pio error!\n");
1254                 BUG_ON(1);
1255         }
1256
1257         if (fifo_intr & fifo_intr_0_bind_error_pending_f()) {
1258                 u32 bind_error = gk20a_readl(g, fifo_intr_bind_error_r());
1259                 nvhost_err(dev, "fifo bind error: 0x%08x", bind_error);
1260                 print_channel_reset_log = true;
1261                 handled |= fifo_intr_0_bind_error_pending_f();
1262         }
1263
1264         if (fifo_intr & fifo_intr_0_sched_error_pending_f()) {
1265                 print_channel_reset_log = gk20a_fifo_handle_sched_error(g);
1266                 handled |= fifo_intr_0_sched_error_pending_f();
1267         }
1268
1269         if (fifo_intr & fifo_intr_0_chsw_error_pending_f()) {
1270                 gk20a_fifo_handle_chsw_fault(g);
1271                 handled |= fifo_intr_0_chsw_error_pending_f();
1272         }
1273
1274         if (fifo_intr & fifo_intr_0_mmu_fault_pending_f()) {
1275                 print_channel_reset_log = gk20a_fifo_handle_mmu_fault(g);
1276                 reset_engine  = true;
1277                 handled |= fifo_intr_0_mmu_fault_pending_f();
1278         }
1279
1280         if (fifo_intr & fifo_intr_0_dropped_mmu_fault_pending_f()) {
1281                 gk20a_fifo_handle_dropped_mmu_fault(g);
1282                 handled |= fifo_intr_0_dropped_mmu_fault_pending_f();
1283         }
1284
1285         print_channel_reset_log = !g->fifo.deferred_reset_pending
1286                         && print_channel_reset_log;
1287
1288         if (print_channel_reset_log) {
1289                 int engine_id;
1290                 nvhost_err(dev_from_gk20a(g),
1291                            "channel reset initated from %s", __func__);
1292                 for (engine_id = 0;
1293                      engine_id < g->fifo.max_engines;
1294                      engine_id++) {
1295                         nvhost_dbg_fn("enum:%d -> engine_id:%d", engine_id,
1296                                 g->fifo.engine_info[engine_id].engine_id);
1297                         fifo_pbdma_exception_status(g,
1298                                         &g->fifo.engine_info[engine_id]);
1299                         fifo_engine_exception_status(g,
1300                                         &g->fifo.engine_info[engine_id]);
1301                 }
1302         }
1303
1304         return handled;
1305 }
1306
1307
1308 static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev,
1309                                         struct gk20a *g,
1310                                         struct fifo_gk20a *f,
1311                                         u32 pbdma_id)
1312 {
1313         u32 pbdma_intr_0 = gk20a_readl(g, pbdma_intr_0_r(pbdma_id));
1314         u32 pbdma_intr_1 = gk20a_readl(g, pbdma_intr_1_r(pbdma_id));
1315         u32 handled = 0;
1316         bool reset_device = false;
1317         bool reset_channel = false;
1318
1319         nvhost_dbg_fn("");
1320
1321         nvhost_dbg(dbg_intr, "pbdma id intr pending %d %08x %08x", pbdma_id,
1322                         pbdma_intr_0, pbdma_intr_1);
1323         if (pbdma_intr_0) {
1324                 if (f->intr.pbdma.device_fatal_0 & pbdma_intr_0) {
1325                         dev_err(dev, "unrecoverable device error: "
1326                                 "pbdma_intr_0(%d):0x%08x", pbdma_id, pbdma_intr_0);
1327                         reset_device = true;
1328                         /* TODO: disable pbdma intrs */
1329                         handled |= f->intr.pbdma.device_fatal_0 & pbdma_intr_0;
1330                 }
1331                 if (f->intr.pbdma.channel_fatal_0 & pbdma_intr_0) {
1332                         dev_warn(dev, "channel error: "
1333                                  "pbdma_intr_0(%d):0x%08x", pbdma_id, pbdma_intr_0);
1334                         reset_channel = true;
1335                         /* TODO: clear pbdma channel errors */
1336                         handled |= f->intr.pbdma.channel_fatal_0 & pbdma_intr_0;
1337                 }
1338                 gk20a_writel(g, pbdma_intr_0_r(pbdma_id), pbdma_intr_0);
1339         }
1340
1341         /* all intrs in _intr_1 are "host copy engine" related,
1342          * which gk20a doesn't have. for now just make them channel fatal. */
1343         if (pbdma_intr_1) {
1344                 dev_err(dev, "channel hce error: pbdma_intr_1(%d): 0x%08x",
1345                         pbdma_id, pbdma_intr_1);
1346                 reset_channel = true;
1347                 gk20a_writel(g, pbdma_intr_1_r(pbdma_id), pbdma_intr_1);
1348         }
1349
1350
1351
1352         return handled;
1353 }
1354
1355 static u32 fifo_channel_isr(struct gk20a *g, u32 fifo_intr)
1356 {
1357         gk20a_channel_semaphore_wakeup(g);
1358         return fifo_intr_0_channel_intr_pending_f();
1359 }
1360
1361
1362 static u32 fifo_pbdma_isr(struct gk20a *g, u32 fifo_intr)
1363 {
1364         struct device *dev = dev_from_gk20a(g);
1365         struct fifo_gk20a *f = &g->fifo;
1366         u32 clear_intr = 0, i;
1367         u32 pbdma_pending = gk20a_readl(g, fifo_intr_pbdma_id_r());
1368
1369         for (i = 0; i < fifo_intr_pbdma_id_status__size_1_v(); i++) {
1370                 if (fifo_intr_pbdma_id_status_f(pbdma_pending, i)) {
1371                         nvhost_dbg(dbg_intr, "pbdma id %d intr pending", i);
1372                         clear_intr |=
1373                                 gk20a_fifo_handle_pbdma_intr(dev, g, f, i);
1374                 }
1375         }
1376         return fifo_intr_0_pbdma_intr_pending_f();
1377 }
1378
1379 void gk20a_fifo_isr(struct gk20a *g)
1380 {
1381         u32 error_intr_mask =
1382                 fifo_intr_0_bind_error_pending_f() |
1383                 fifo_intr_0_sched_error_pending_f() |
1384                 fifo_intr_0_chsw_error_pending_f() |
1385                 fifo_intr_0_fb_flush_timeout_pending_f() |
1386                 fifo_intr_0_dropped_mmu_fault_pending_f() |
1387                 fifo_intr_0_mmu_fault_pending_f() |
1388                 fifo_intr_0_lb_error_pending_f() |
1389                 fifo_intr_0_pio_error_pending_f();
1390
1391         u32 fifo_intr = gk20a_readl(g, fifo_intr_0_r());
1392         u32 clear_intr = 0;
1393
1394         /* note we're not actually in an "isr", but rather
1395          * in a threaded interrupt context... */
1396         mutex_lock(&g->fifo.intr.isr.mutex);
1397
1398         nvhost_dbg(dbg_intr, "fifo isr %08x\n", fifo_intr);
1399
1400         /* handle runlist update */
1401         if (fifo_intr & fifo_intr_0_runlist_event_pending_f()) {
1402                 gk20a_fifo_handle_runlist_event(g);
1403                 clear_intr |= fifo_intr_0_runlist_event_pending_f();
1404         }
1405         if (fifo_intr & fifo_intr_0_pbdma_intr_pending_f())
1406                 clear_intr |= fifo_pbdma_isr(g, fifo_intr);
1407
1408         if (unlikely(fifo_intr & error_intr_mask))
1409                 clear_intr = fifo_error_isr(g, fifo_intr);
1410
1411         gk20a_writel(g, fifo_intr_0_r(), clear_intr);
1412
1413         mutex_unlock(&g->fifo.intr.isr.mutex);
1414
1415         return;
1416 }
1417
1418 void gk20a_fifo_nonstall_isr(struct gk20a *g)
1419 {
1420         u32 fifo_intr = gk20a_readl(g, fifo_intr_0_r());
1421         u32 clear_intr = 0;
1422
1423         nvhost_dbg(dbg_intr, "fifo nonstall isr %08x\n", fifo_intr);
1424
1425         if (fifo_intr & fifo_intr_0_channel_intr_pending_f())
1426                 clear_intr |= fifo_channel_isr(g, fifo_intr);
1427
1428         gk20a_writel(g, fifo_intr_0_r(), clear_intr);
1429
1430         return;
1431 }
1432
1433 int gk20a_fifo_preempt_channel(struct gk20a *g, u32 hw_chid)
1434 {
1435         struct fifo_gk20a *f = &g->fifo;
1436         unsigned long end_jiffies = jiffies
1437                 + msecs_to_jiffies(gk20a_get_gr_idle_timeout(g));
1438         u32 delay = GR_IDLE_CHECK_DEFAULT;
1439         u32 ret = 0;
1440         u32 token = PMU_INVALID_MUTEX_OWNER_ID;
1441         u32 elpg_off = 0;
1442         u32 i;
1443
1444         nvhost_dbg_fn("%d", hw_chid);
1445
1446         /* we have no idea which runlist we are using. lock all */
1447         for (i = 0; i < g->fifo.max_runlists; i++)
1448                 mutex_lock(&f->runlist_info[i].mutex);
1449
1450         /* disable elpg if failed to acquire pmu mutex */
1451         elpg_off = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
1452         if (elpg_off)
1453                 gk20a_pmu_disable_elpg(g);
1454
1455         /* issue preempt */
1456         gk20a_writel(g, fifo_preempt_r(),
1457                 fifo_preempt_chid_f(hw_chid) |
1458                 fifo_preempt_type_channel_f());
1459
1460         /* wait for preempt */
1461         ret = -EBUSY;
1462         do {
1463                 if (!(gk20a_readl(g, fifo_preempt_r()) &
1464                         fifo_preempt_pending_true_f())) {
1465                         ret = 0;
1466                         break;
1467                 }
1468
1469                 usleep_range(delay, delay * 2);
1470                 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
1471         } while (time_before(jiffies, end_jiffies) |
1472                         !tegra_platform_is_silicon());
1473
1474         if (ret) {
1475                 int i;
1476                 u32 engines = 0;
1477                 struct fifo_gk20a *f = &g->fifo;
1478                 struct channel_gk20a *ch = &f->channel[hw_chid];
1479
1480                 nvhost_err(dev_from_gk20a(g), "preempt channel %d timeout\n",
1481                             hw_chid);
1482
1483                 /* forcefully reset all busy engines using this channel */
1484                 for (i = 0; i < g->fifo.max_engines; i++) {
1485                         u32 status = gk20a_readl(g, fifo_engine_status_r(i));
1486                         u32 ctx_status =
1487                                 fifo_engine_status_ctx_status_v(status);
1488                         bool type_ch = fifo_pbdma_status_id_type_v(status) ==
1489                                 fifo_pbdma_status_id_type_chid_v();
1490                         bool busy = fifo_engine_status_engine_v(status) ==
1491                                 fifo_engine_status_engine_busy_v();
1492                         u32 id = (ctx_status ==
1493                                 fifo_engine_status_ctx_status_ctxsw_load_v()) ?
1494                                 fifo_engine_status_next_id_v(status) :
1495                                 fifo_engine_status_id_v(status);
1496
1497                         if (type_ch && busy && id == hw_chid)
1498                                 engines |= BIT(i);
1499                 }
1500                 gk20a_set_error_notifier(ch->hwctx,
1501                                 NVHOST_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT);
1502                 gk20a_fifo_recover(g, engines, true);
1503         }
1504
1505         /* re-enable elpg or release pmu mutex */
1506         if (elpg_off)
1507                 gk20a_pmu_enable_elpg(g);
1508         else
1509                 pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
1510
1511         for (i = 0; i < g->fifo.max_runlists; i++)
1512                 mutex_unlock(&f->runlist_info[i].mutex);
1513
1514         return ret;
1515 }
1516
1517 int gk20a_fifo_enable_engine_activity(struct gk20a *g,
1518                                 struct fifo_engine_info_gk20a *eng_info)
1519 {
1520         u32 token = PMU_INVALID_MUTEX_OWNER_ID;
1521         u32 elpg_off;
1522         u32 enable;
1523
1524         nvhost_dbg_fn("");
1525
1526         /* disable elpg if failed to acquire pmu mutex */
1527         elpg_off = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
1528         if (elpg_off)
1529                 gk20a_pmu_disable_elpg(g);
1530
1531         enable = gk20a_readl(g, fifo_sched_disable_r());
1532         enable &= ~(fifo_sched_disable_true_v() >> eng_info->runlist_id);
1533         gk20a_writel(g, fifo_sched_disable_r(), enable);
1534
1535         /* re-enable elpg or release pmu mutex */
1536         if (elpg_off)
1537                 gk20a_pmu_enable_elpg(g);
1538         else
1539                 pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
1540
1541         nvhost_dbg_fn("done");
1542         return 0;
1543 }
1544
1545 int gk20a_fifo_disable_engine_activity(struct gk20a *g,
1546                                 struct fifo_engine_info_gk20a *eng_info,
1547                                 bool wait_for_idle)
1548 {
1549         u32 gr_stat, pbdma_stat, chan_stat, eng_stat, ctx_stat;
1550         u32 pbdma_chid = ~0, engine_chid = ~0, disable;
1551         u32 token = PMU_INVALID_MUTEX_OWNER_ID;
1552         u32 elpg_off;
1553         u32 err = 0;
1554
1555         nvhost_dbg_fn("");
1556
1557         gr_stat =
1558                 gk20a_readl(g, fifo_engine_status_r(eng_info->engine_id));
1559         if (fifo_engine_status_engine_v(gr_stat) ==
1560             fifo_engine_status_engine_busy_v() && !wait_for_idle)
1561                 return -EBUSY;
1562
1563         /* disable elpg if failed to acquire pmu mutex */
1564         elpg_off = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
1565         if (elpg_off)
1566                 gk20a_pmu_disable_elpg(g);
1567
1568         disable = gk20a_readl(g, fifo_sched_disable_r());
1569         disable = set_field(disable,
1570                         fifo_sched_disable_runlist_m(eng_info->runlist_id),
1571                         fifo_sched_disable_runlist_f(fifo_sched_disable_true_v(),
1572                                 eng_info->runlist_id));
1573         gk20a_writel(g, fifo_sched_disable_r(), disable);
1574
1575         /* chid from pbdma status */
1576         pbdma_stat = gk20a_readl(g, fifo_pbdma_status_r(eng_info->pbdma_id));
1577         chan_stat  = fifo_pbdma_status_chan_status_v(pbdma_stat);
1578         if (chan_stat == fifo_pbdma_status_chan_status_valid_v() ||
1579             chan_stat == fifo_pbdma_status_chan_status_chsw_save_v())
1580                 pbdma_chid = fifo_pbdma_status_id_v(pbdma_stat);
1581         else if (chan_stat == fifo_pbdma_status_chan_status_chsw_load_v() ||
1582                  chan_stat == fifo_pbdma_status_chan_status_chsw_switch_v())
1583                 pbdma_chid = fifo_pbdma_status_next_id_v(pbdma_stat);
1584
1585         if (pbdma_chid != ~0) {
1586                 err = gk20a_fifo_preempt_channel(g, pbdma_chid);
1587                 if (err)
1588                         goto clean_up;
1589         }
1590
1591         /* chid from engine status */
1592         eng_stat = gk20a_readl(g, fifo_engine_status_r(eng_info->engine_id));
1593         ctx_stat  = fifo_engine_status_ctx_status_v(eng_stat);
1594         if (ctx_stat == fifo_engine_status_ctx_status_valid_v() ||
1595             ctx_stat == fifo_engine_status_ctx_status_ctxsw_save_v())
1596                 engine_chid = fifo_engine_status_id_v(eng_stat);
1597         else if (ctx_stat == fifo_engine_status_ctx_status_ctxsw_load_v() ||
1598                  ctx_stat == fifo_engine_status_ctx_status_ctxsw_switch_v())
1599                 engine_chid = fifo_engine_status_next_id_v(eng_stat);
1600
1601         if (engine_chid != ~0 && engine_chid != pbdma_chid) {
1602                 err = gk20a_fifo_preempt_channel(g, engine_chid);
1603                 if (err)
1604                         goto clean_up;
1605         }
1606
1607 clean_up:
1608         /* re-enable elpg or release pmu mutex */
1609         if (elpg_off)
1610                 gk20a_pmu_enable_elpg(g);
1611         else
1612                 pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
1613
1614         if (err) {
1615                 nvhost_dbg_fn("failed");
1616                 if (gk20a_fifo_enable_engine_activity(g, eng_info))
1617                         nvhost_err(dev_from_gk20a(g),
1618                                 "failed to enable gr engine activity\n");
1619         } else {
1620                 nvhost_dbg_fn("done");
1621         }
1622         return err;
1623 }
1624
1625 static void gk20a_fifo_runlist_reset_engines(struct gk20a *g, u32 runlist_id)
1626 {
1627         struct fifo_gk20a *f = &g->fifo;
1628         u32 engines = 0;
1629         int i;
1630
1631         for (i = 0; i < f->max_engines; i++) {
1632                 u32 status = gk20a_readl(g, fifo_engine_status_r(i));
1633                 bool engine_busy = fifo_engine_status_engine_v(status) ==
1634                         fifo_engine_status_engine_busy_v();
1635
1636                 if (engine_busy &&
1637                     (f->engine_info[i].runlist_id == runlist_id))
1638                         engines |= BIT(i);
1639         }
1640         gk20a_fifo_recover(g, engines, true);
1641 }
1642
1643 static int gk20a_fifo_runlist_wait_pending(struct gk20a *g, u32 runlist_id)
1644 {
1645         struct fifo_runlist_info_gk20a *runlist;
1646         u32 remain;
1647         bool pending;
1648
1649         runlist = &g->fifo.runlist_info[runlist_id];
1650         remain = wait_event_timeout(runlist->runlist_wq,
1651                 ((pending = gk20a_readl(g, fifo_eng_runlist_r(runlist_id)) &
1652                         fifo_eng_runlist_pending_true_f()) == 0),
1653                 msecs_to_jiffies(gk20a_get_gr_idle_timeout(g)));
1654
1655         if (remain == 0 && pending != 0)
1656                 return -ETIMEDOUT;
1657
1658         return 0;
1659 }
1660
1661 static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
1662                                             u32 hw_chid, bool add,
1663                                             bool wait_for_finish)
1664 {
1665         u32 ret = 0;
1666         struct device *d = dev_from_gk20a(g);
1667         struct fifo_gk20a *f = &g->fifo;
1668         struct fifo_runlist_info_gk20a *runlist = NULL;
1669         u32 *runlist_entry_base = NULL;
1670         u32 *runlist_entry = NULL;
1671         phys_addr_t runlist_pa;
1672         u32 old_buf, new_buf;
1673         u32 chid;
1674         u32 count = 0;
1675         runlist = &f->runlist_info[runlist_id];
1676
1677         /* valid channel, add/remove it from active list.
1678            Otherwise, keep active list untouched for suspend/resume. */
1679         if (hw_chid != ~0) {
1680                 if (add) {
1681                         if (test_and_set_bit(hw_chid,
1682                                 runlist->active_channels) == 1)
1683                                 return 0;
1684                 } else {
1685                         if (test_and_clear_bit(hw_chid,
1686                                 runlist->active_channels) == 0)
1687                                 return 0;
1688                 }
1689         }
1690
1691         old_buf = runlist->cur_buffer;
1692         new_buf = !runlist->cur_buffer;
1693
1694         nvhost_dbg_info("runlist_id : %d, switch to new buffer 0x%16llx",
1695                 runlist_id, runlist->mem[new_buf].iova);
1696
1697         runlist_pa = gk20a_get_phys_from_iova(d, runlist->mem[new_buf].iova);
1698         if (!runlist_pa) {
1699                 ret = -EINVAL;
1700                 goto clean_up;
1701         }
1702
1703         runlist_entry_base = runlist->mem[new_buf].cpuva;
1704         if (!runlist_entry_base) {
1705                 ret = -ENOMEM;
1706                 goto clean_up;
1707         }
1708
1709         if (hw_chid != ~0 || /* add/remove a valid channel */
1710             add /* resume to add all channels back */) {
1711                 runlist_entry = runlist_entry_base;
1712                 for_each_set_bit(chid,
1713                         runlist->active_channels, f->num_channels) {
1714                         nvhost_dbg_info("add channel %d to runlist", chid);
1715                         runlist_entry[0] = chid;
1716                         runlist_entry[1] = 0;
1717                         runlist_entry += 2;
1718                         count++;
1719                 }
1720         } else  /* suspend to remove all channels */
1721                 count = 0;
1722
1723         if (count != 0) {
1724                 gk20a_writel(g, fifo_runlist_base_r(),
1725                         fifo_runlist_base_ptr_f(u64_lo32(runlist_pa >> 12)) |
1726                         fifo_runlist_base_target_vid_mem_f());
1727         }
1728
1729         gk20a_writel(g, fifo_runlist_r(),
1730                 fifo_runlist_engine_f(runlist_id) |
1731                 fifo_eng_runlist_length_f(count));
1732
1733         if (wait_for_finish) {
1734                 ret = gk20a_fifo_runlist_wait_pending(g, runlist_id);
1735
1736                 if (ret == -ETIMEDOUT) {
1737                         nvhost_err(dev_from_gk20a(g),
1738                                    "runlist update timeout");
1739
1740                         gk20a_fifo_runlist_reset_engines(g, runlist_id);
1741
1742                         /* engine reset needs the lock. drop it */
1743                         mutex_unlock(&runlist->mutex);
1744                         /* wait until the runlist is active again */
1745                         ret = gk20a_fifo_runlist_wait_pending(g, runlist_id);
1746                         /* get the lock back. at this point everything should
1747                          * should be fine */
1748                         mutex_lock(&runlist->mutex);
1749
1750                         if (ret)
1751                                 nvhost_err(dev_from_gk20a(g),
1752                                            "runlist update failed: %d", ret);
1753                 } else if (ret == -EINTR)
1754                         nvhost_err(dev_from_gk20a(g),
1755                                    "runlist update interrupted");
1756         }
1757
1758         runlist->cur_buffer = new_buf;
1759
1760 clean_up:
1761         return ret;
1762 }
1763
1764 /* add/remove a channel from runlist
1765    special cases below: runlist->active_channels will NOT be changed.
1766    (hw_chid == ~0 && !add) means remove all active channels from runlist.
1767    (hw_chid == ~0 &&  add) means restore all active channels on runlist. */
1768 int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 hw_chid,
1769                               bool add, bool wait_for_finish)
1770 {
1771         struct fifo_runlist_info_gk20a *runlist = NULL;
1772         struct fifo_gk20a *f = &g->fifo;
1773         u32 token = PMU_INVALID_MUTEX_OWNER_ID;
1774         u32 elpg_off;
1775         u32 ret = 0;
1776
1777         runlist = &f->runlist_info[runlist_id];
1778
1779         mutex_lock(&runlist->mutex);
1780
1781         /* disable elpg if failed to acquire pmu mutex */
1782         elpg_off = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
1783         if (elpg_off)
1784                 gk20a_pmu_disable_elpg(g);
1785
1786         ret = gk20a_fifo_update_runlist_locked(g, runlist_id, hw_chid, add,
1787                                                wait_for_finish);
1788
1789         /* re-enable elpg or release pmu mutex */
1790         if (elpg_off)
1791                 gk20a_pmu_enable_elpg(g);
1792         else
1793                 pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
1794
1795         mutex_unlock(&runlist->mutex);
1796         return ret;
1797 }
1798
1799 int gk20a_fifo_suspend(struct gk20a *g)
1800 {
1801         nvhost_dbg_fn("");
1802
1803         /* stop bar1 snooping */
1804         gk20a_writel(g, fifo_bar1_base_r(),
1805                         fifo_bar1_base_valid_false_f());
1806
1807         /* disable fifo intr */
1808         gk20a_writel(g, fifo_intr_en_0_r(), 0);
1809         gk20a_writel(g, fifo_intr_en_1_r(), 0);
1810
1811         nvhost_dbg_fn("done");
1812         return 0;
1813 }
1814
1815 bool gk20a_fifo_mmu_fault_pending(struct gk20a *g)
1816 {
1817         if (gk20a_readl(g, fifo_intr_0_r()) &
1818                         fifo_intr_0_mmu_fault_pending_f())
1819                 return true;
1820         else
1821                 return false;
1822 }