video: tegra: gk20a: do not set error notifier during debugging
[linux-3.10.git] / drivers / video / tegra / host / gk20a / fifo_gk20a.c
1 /*
2  * drivers/video/tegra/host/gk20a/fifo_gk20a.c
3  *
4  * GK20A Graphics FIFO (gr host)
5  *
6  * Copyright (c) 2011-2014, NVIDIA CORPORATION.  All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License along with
18  * this program; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20  */
21 #include <linux/delay.h>
22 #include <linux/slab.h>
23 #include <linux/scatterlist.h>
24 #include <trace/events/nvhost.h>
25
26 #include "../dev.h"
27 #include "../nvhost_as.h"
28
29 #include "gk20a.h"
30 #include "hw_fifo_gk20a.h"
31 #include "hw_pbdma_gk20a.h"
32 #include "hw_ccsr_gk20a.h"
33 #include "hw_ram_gk20a.h"
34 #include "hw_proj_gk20a.h"
35 #include "hw_top_gk20a.h"
36 #include "hw_mc_gk20a.h"
37 #include "hw_gr_gk20a.h"
38
39 static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
40                                             u32 hw_chid, bool add,
41                                             bool wait_for_finish);
42 static void gk20a_fifo_handle_mmu_fault_thread(struct work_struct *work);
43
44 /*
45  * Link engine IDs to MMU IDs and vice versa.
46  */
47
48 static inline u32 gk20a_engine_id_to_mmu_id(u32 engine_id)
49 {
50         switch (engine_id) {
51         case ENGINE_GR_GK20A:
52                 return 0x00;
53         case ENGINE_CE2_GK20A:
54                 return 0x1b;
55         default:
56                 return ~0;
57         }
58 }
59
60 static inline u32 gk20a_mmu_id_to_engine_id(u32 engine_id)
61 {
62         switch (engine_id) {
63         case 0x00:
64                 return ENGINE_GR_GK20A;
65         case 0x1b:
66                 return ENGINE_CE2_GK20A;
67         default:
68                 return ~0;
69         }
70 }
71
72
73 static int init_engine_info(struct fifo_gk20a *f)
74 {
75         struct gk20a *g = f->g;
76         struct device *d = dev_from_gk20a(g);
77         struct fifo_engine_info_gk20a *gr_info;
78         const u32 gr_sw_id = ENGINE_GR_GK20A;
79         u32 i;
80         u32 max_info_entries = top_device_info__size_1_v();
81
82         nvhost_dbg_fn("");
83
84         /* all we really care about finding is the graphics entry    */
85         /* especially early on in sim it probably thinks it has more */
86         f->num_engines = 1;
87
88         gr_info = f->engine_info + gr_sw_id;
89
90         gr_info->sw_id = gr_sw_id;
91         gr_info->name = "gr";
92         gr_info->dev_info_id = top_device_info_type_enum_graphics_v();
93         gr_info->mmu_fault_id = fifo_intr_mmu_fault_eng_id_graphics_v();
94         gr_info->runlist_id = ~0;
95         gr_info->pbdma_id   = ~0;
96         gr_info->engine_id  = ~0;
97
98         for (i = 0; i < max_info_entries; i++) {
99                 u32 table_entry = gk20a_readl(f->g, top_device_info_r(i));
100                 u32 entry = top_device_info_entry_v(table_entry);
101                 u32 engine_enum = top_device_info_type_enum_v(table_entry);
102                 u32 table_entry2 = 0;
103
104                 if (entry == top_device_info_entry_not_valid_v())
105                         continue;
106
107                 if (top_device_info_chain_v(table_entry) ==
108                     top_device_info_chain_enable_v()) {
109
110                         table_entry2 = gk20a_readl(f->g,
111                                                    top_device_info_r(++i));
112
113                         engine_enum = top_device_info_type_enum_v(table_entry2);
114                 }
115
116                 /* we only care about GR engine here */
117                 if (entry == top_device_info_entry_enum_v() &&
118                     engine_enum == gr_info->dev_info_id) {
119                         int pbdma_id;
120                         u32 runlist_bit;
121
122                         gr_info->runlist_id =
123                                 top_device_info_runlist_enum_v(table_entry);
124                         nvhost_dbg_info("gr info: runlist_id %d", gr_info->runlist_id);
125
126                         gr_info->engine_id =
127                                 top_device_info_engine_enum_v(table_entry);
128                         nvhost_dbg_info("gr info: engine_id %d", gr_info->engine_id);
129
130                         runlist_bit = 1 << gr_info->runlist_id;
131
132                         for (pbdma_id = 0; pbdma_id < f->num_pbdma; pbdma_id++) {
133                                 nvhost_dbg_info("gr info: pbdma_map[%d]=%d",
134                                         pbdma_id, f->pbdma_map[pbdma_id]);
135                                 if (f->pbdma_map[pbdma_id] & runlist_bit)
136                                         break;
137                         }
138
139                         if (pbdma_id == f->num_pbdma) {
140                                 nvhost_err(d, "busted pbmda map");
141                                 return -EINVAL;
142                         }
143                         gr_info->pbdma_id = pbdma_id;
144
145                         break;
146                 }
147         }
148
149         if (gr_info->runlist_id == ~0) {
150                 nvhost_err(d, "busted device info");
151                 return -EINVAL;
152         }
153
154         return 0;
155 }
156
157 void gk20a_remove_fifo_support(struct fifo_gk20a *f)
158 {
159         struct gk20a *g = f->g;
160         struct device *d = dev_from_gk20a(g);
161         struct fifo_engine_info_gk20a *engine_info;
162         struct fifo_runlist_info_gk20a *runlist;
163         u32 runlist_id;
164         u32 i;
165
166         nvhost_dbg_fn("");
167
168         if (f->channel) {
169                 int c;
170                 for (c = 0; c < f->num_channels; c++) {
171                         if (f->channel[c].remove_support)
172                                 f->channel[c].remove_support(f->channel+c);
173                 }
174                 kfree(f->channel);
175         }
176         if (f->userd.gpu_va)
177                 gk20a_gmmu_unmap(&g->mm.bar1.vm,
178                                 f->userd.gpu_va,
179                                 f->userd.size,
180                                 mem_flag_none);
181
182         if (f->userd.sgt)
183                 gk20a_free_sgtable(&f->userd.sgt);
184
185         if (f->userd.cpuva)
186                 dma_free_coherent(d,
187                                 f->userd_total_size,
188                                 f->userd.cpuva,
189                                 f->userd.iova);
190         f->userd.cpuva = NULL;
191         f->userd.iova = 0;
192
193         engine_info = f->engine_info + ENGINE_GR_GK20A;
194         runlist_id = engine_info->runlist_id;
195         runlist = &f->runlist_info[runlist_id];
196
197         for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) {
198                 if (runlist->mem[i].cpuva)
199                         dma_free_coherent(d,
200                                 runlist->mem[i].size,
201                                 runlist->mem[i].cpuva,
202                                 runlist->mem[i].iova);
203                 runlist->mem[i].cpuva = NULL;
204                 runlist->mem[i].iova = 0;
205         }
206
207         kfree(runlist->active_channels);
208
209         kfree(f->runlist_info);
210         kfree(f->pbdma_map);
211         kfree(f->engine_info);
212 }
213
214 /* reads info from hardware and fills in pbmda exception info record */
215 static inline void get_exception_pbdma_info(
216         struct gk20a *g,
217         struct fifo_engine_info_gk20a *eng_info)
218 {
219         struct fifo_pbdma_exception_info_gk20a *e =
220                 &eng_info->pbdma_exception_info;
221
222         u32 pbdma_status_r = e->status_r = gk20a_readl(g,
223                    fifo_pbdma_status_r(eng_info->pbdma_id));
224         e->id = fifo_pbdma_status_id_v(pbdma_status_r); /* vs. id_hw_v()? */
225         e->id_is_chid = fifo_pbdma_status_id_type_v(pbdma_status_r) ==
226                 fifo_pbdma_status_id_type_chid_v();
227         e->chan_status_v  = fifo_pbdma_status_chan_status_v(pbdma_status_r);
228         e->next_id_is_chid =
229                 fifo_pbdma_status_next_id_type_v(pbdma_status_r) ==
230                 fifo_pbdma_status_next_id_type_chid_v();
231         e->next_id = fifo_pbdma_status_next_id_v(pbdma_status_r);
232         e->chsw_in_progress =
233                 fifo_pbdma_status_chsw_v(pbdma_status_r) ==
234                 fifo_pbdma_status_chsw_in_progress_v();
235 }
236
237 static void fifo_pbdma_exception_status(struct gk20a *g,
238         struct fifo_engine_info_gk20a *eng_info)
239 {
240         struct fifo_pbdma_exception_info_gk20a *e;
241         get_exception_pbdma_info(g, eng_info);
242         e = &eng_info->pbdma_exception_info;
243
244         nvhost_dbg_fn("pbdma_id %d, "
245                       "id_type %s, id %d, chan_status %d, "
246                       "next_id_type %s, next_id %d, "
247                       "chsw_in_progress %d",
248                       eng_info->pbdma_id,
249                       e->id_is_chid ? "chid" : "tsgid", e->id, e->chan_status_v,
250                       e->next_id_is_chid ? "chid" : "tsgid", e->next_id,
251                       e->chsw_in_progress);
252 }
253
254 /* reads info from hardware and fills in pbmda exception info record */
255 static inline void get_exception_engine_info(
256         struct gk20a *g,
257         struct fifo_engine_info_gk20a *eng_info)
258 {
259         struct fifo_engine_exception_info_gk20a *e =
260                 &eng_info->engine_exception_info;
261         u32 engine_status_r = e->status_r =
262                 gk20a_readl(g, fifo_engine_status_r(eng_info->engine_id));
263         e->id = fifo_engine_status_id_v(engine_status_r); /* vs. id_hw_v()? */
264         e->id_is_chid = fifo_engine_status_id_type_v(engine_status_r) ==
265                 fifo_engine_status_id_type_chid_v();
266         e->ctx_status_v = fifo_engine_status_ctx_status_v(engine_status_r);
267         e->faulted =
268                 fifo_engine_status_faulted_v(engine_status_r) ==
269                 fifo_engine_status_faulted_true_v();
270         e->idle =
271                 fifo_engine_status_engine_v(engine_status_r) ==
272                 fifo_engine_status_engine_idle_v();
273         e->ctxsw_in_progress =
274                 fifo_engine_status_ctxsw_v(engine_status_r) ==
275                 fifo_engine_status_ctxsw_in_progress_v();
276 }
277
278 static void fifo_engine_exception_status(struct gk20a *g,
279                                struct fifo_engine_info_gk20a *eng_info)
280 {
281         struct fifo_engine_exception_info_gk20a *e;
282         get_exception_engine_info(g, eng_info);
283         e = &eng_info->engine_exception_info;
284
285         nvhost_dbg_fn("engine_id %d, id_type %s, id %d, ctx_status %d, "
286                       "faulted %d, idle %d, ctxsw_in_progress %d, ",
287                       eng_info->engine_id, e->id_is_chid ? "chid" : "tsgid",
288                       e->id, e->ctx_status_v,
289                       e->faulted, e->idle,  e->ctxsw_in_progress);
290 }
291
292 static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
293 {
294         struct fifo_engine_info_gk20a *engine_info;
295         struct fifo_runlist_info_gk20a *runlist;
296         struct device *d = dev_from_gk20a(g);
297         u32 runlist_id;
298         u32 i;
299         u64 runlist_size;
300
301         nvhost_dbg_fn("");
302
303         f->max_runlists = fifo_eng_runlist_base__size_1_v();
304         f->runlist_info = kzalloc(sizeof(struct fifo_runlist_info_gk20a) *
305                                   f->max_runlists, GFP_KERNEL);
306         if (!f->runlist_info)
307                 goto clean_up;
308
309         engine_info = f->engine_info + ENGINE_GR_GK20A;
310         runlist_id = engine_info->runlist_id;
311         runlist = &f->runlist_info[runlist_id];
312
313         runlist->active_channels =
314                 kzalloc(DIV_ROUND_UP(f->num_channels, BITS_PER_BYTE),
315                         GFP_KERNEL);
316         if (!runlist->active_channels)
317                 goto clean_up_runlist_info;
318
319         runlist_size  = ram_rl_entry_size_v() * f->num_channels;
320         for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) {
321                 runlist->mem[i].cpuva =
322                         dma_alloc_coherent(d,
323                                         runlist_size,
324                                         &runlist->mem[i].iova,
325                                         GFP_KERNEL);
326                 if (!runlist->mem[i].cpuva) {
327                         dev_err(d, "memory allocation failed\n");
328                         goto clean_up_runlist;
329                 }
330                 runlist->mem[i].size = runlist_size;
331         }
332         mutex_init(&runlist->mutex);
333         init_waitqueue_head(&runlist->runlist_wq);
334
335         /* None of buffers is pinned if this value doesn't change.
336             Otherwise, one of them (cur_buffer) must have been pinned. */
337         runlist->cur_buffer = MAX_RUNLIST_BUFFERS;
338
339         nvhost_dbg_fn("done");
340         return 0;
341
342 clean_up_runlist:
343         for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) {
344                 if (runlist->mem[i].cpuva)
345                         dma_free_coherent(d,
346                                 runlist->mem[i].size,
347                                 runlist->mem[i].cpuva,
348                                 runlist->mem[i].iova);
349                 runlist->mem[i].cpuva = NULL;
350                 runlist->mem[i].iova = 0;
351         }
352
353         kfree(runlist->active_channels);
354         runlist->active_channels = NULL;
355
356 clean_up_runlist_info:
357         kfree(f->runlist_info);
358         f->runlist_info = NULL;
359
360 clean_up:
361         nvhost_dbg_fn("fail");
362         return -ENOMEM;
363 }
364
365 #define GRFIFO_TIMEOUT_CHECK_PERIOD_US 100000
366
367 int gk20a_init_fifo_reset_enable_hw(struct gk20a *g)
368 {
369         u32 intr_stall;
370         u32 mask;
371         u32 timeout;
372         int i;
373
374         nvhost_dbg_fn("");
375         /* enable pmc pfifo */
376         gk20a_reset(g, mc_enable_pfifo_enabled_f()
377                         | mc_enable_ce2_enabled_f());
378
379         /* enable pbdma */
380         mask = 0;
381         for (i = 0; i < proj_host_num_pbdma_v(); ++i)
382                 mask |= mc_enable_pb_sel_f(mc_enable_pb_0_enabled_v(), i);
383         gk20a_writel(g, mc_enable_pb_r(), mask);
384
385         /* enable pfifo interrupt */
386         gk20a_writel(g, fifo_intr_0_r(), 0xFFFFFFFF);
387         gk20a_writel(g, fifo_intr_en_0_r(), 0x7FFFFFFF);
388         gk20a_writel(g, fifo_intr_en_1_r(), 0x80000000);
389
390         /* enable pbdma interrupt */
391         mask = 0;
392         for (i = 0; i < proj_host_num_pbdma_v(); i++) {
393                 intr_stall = gk20a_readl(g, pbdma_intr_stall_r(i));
394                 intr_stall &= ~pbdma_intr_stall_lbreq_enabled_f();
395                 gk20a_writel(g, pbdma_intr_stall_r(i), intr_stall);
396                 gk20a_writel(g, pbdma_intr_0_r(i), 0xFFFFFFFF);
397                 gk20a_writel(g, pbdma_intr_en_0_r(i),
398                         (~0) & ~pbdma_intr_en_0_lbreq_enabled_f());
399                 gk20a_writel(g, pbdma_intr_1_r(i), 0xFFFFFFFF);
400                 gk20a_writel(g, pbdma_intr_en_1_r(i), 0xFFFFFFFF);
401         }
402
403         /* TBD: apply overrides */
404
405         /* TBD: BLCG prod */
406
407         /* reset runlist interrupts */
408         gk20a_writel(g, fifo_intr_runlist_r(), ~0);
409
410         /* TBD: do we need those? */
411         timeout = gk20a_readl(g, fifo_fb_timeout_r());
412         timeout = set_field(timeout, fifo_fb_timeout_period_m(),
413                         fifo_fb_timeout_period_max_f());
414         gk20a_writel(g, fifo_fb_timeout_r(), timeout);
415
416         timeout = gk20a_readl(g, fifo_pb_timeout_r());
417         timeout &= ~fifo_pb_timeout_detection_enabled_f();
418         gk20a_writel(g, fifo_pb_timeout_r(), timeout);
419
420         timeout = GRFIFO_TIMEOUT_CHECK_PERIOD_US |
421                         fifo_eng_timeout_detection_enabled_f();
422         gk20a_writel(g, fifo_eng_timeout_r(), timeout);
423
424         nvhost_dbg_fn("done");
425
426         return 0;
427 }
428
429 static void gk20a_init_fifo_pbdma_intr_descs(struct fifo_gk20a *f)
430 {
431         /* These are all errors which indicate something really wrong
432          * going on in the device. */
433         f->intr.pbdma.device_fatal_0 =
434                 pbdma_intr_0_memreq_pending_f() |
435                 pbdma_intr_0_memack_timeout_pending_f() |
436                 pbdma_intr_0_memack_extra_pending_f() |
437                 pbdma_intr_0_memdat_timeout_pending_f() |
438                 pbdma_intr_0_memdat_extra_pending_f() |
439                 pbdma_intr_0_memflush_pending_f() |
440                 pbdma_intr_0_memop_pending_f() |
441                 pbdma_intr_0_lbconnect_pending_f() |
442                 pbdma_intr_0_lbreq_pending_f() |
443                 pbdma_intr_0_lback_timeout_pending_f() |
444                 pbdma_intr_0_lback_extra_pending_f() |
445                 pbdma_intr_0_lbdat_timeout_pending_f() |
446                 pbdma_intr_0_lbdat_extra_pending_f() |
447                 pbdma_intr_0_xbarconnect_pending_f() |
448                 pbdma_intr_0_pri_pending_f();
449
450         /* These are data parsing, framing errors or others which can be
451          * recovered from with intervention... or just resetting the
452          * channel. */
453         f->intr.pbdma.channel_fatal_0 =
454                 pbdma_intr_0_gpfifo_pending_f() |
455                 pbdma_intr_0_gpptr_pending_f() |
456                 pbdma_intr_0_gpentry_pending_f() |
457                 pbdma_intr_0_gpcrc_pending_f() |
458                 pbdma_intr_0_pbptr_pending_f() |
459                 pbdma_intr_0_pbentry_pending_f() |
460                 pbdma_intr_0_pbcrc_pending_f() |
461                 pbdma_intr_0_method_pending_f() |
462                 pbdma_intr_0_methodcrc_pending_f() |
463                 pbdma_intr_0_pbseg_pending_f() |
464                 pbdma_intr_0_signature_pending_f();
465
466         /* Can be used for sw-methods, or represents
467          * a recoverable timeout. */
468         f->intr.pbdma.restartable_0 =
469                 pbdma_intr_0_device_pending_f() |
470                 pbdma_intr_0_acquire_pending_f();
471 }
472
473 static int gk20a_init_fifo_setup_sw(struct gk20a *g)
474 {
475         struct fifo_gk20a *f = &g->fifo;
476         struct device *d = dev_from_gk20a(g);
477         int chid, i, err = 0;
478
479         nvhost_dbg_fn("");
480
481         if (f->sw_ready) {
482                 nvhost_dbg_fn("skip init");
483                 return 0;
484         }
485
486         f->g = g;
487
488         INIT_WORK(&f->fault_restore_thread,
489                   gk20a_fifo_handle_mmu_fault_thread);
490         mutex_init(&f->intr.isr.mutex);
491         gk20a_init_fifo_pbdma_intr_descs(f); /* just filling in data/tables */
492
493         f->num_channels = ccsr_channel__size_1_v();
494         f->num_pbdma = proj_host_num_pbdma_v();
495         f->max_engines = ENGINE_INVAL_GK20A;
496
497         f->userd_entry_size = 1 << ram_userd_base_shift_v();
498         f->userd_total_size = f->userd_entry_size * f->num_channels;
499
500         f->userd.cpuva = dma_alloc_coherent(d,
501                                         f->userd_total_size,
502                                         &f->userd.iova,
503                                         GFP_KERNEL);
504         if (!f->userd.cpuva) {
505                 dev_err(d, "memory allocation failed\n");
506                 goto clean_up;
507         }
508
509         err = gk20a_get_sgtable(d, &f->userd.sgt,
510                                 f->userd.cpuva, f->userd.iova,
511                                 f->userd_total_size);
512         if (err) {
513                 dev_err(d, "failed to create sg table\n");
514                 goto clean_up;
515         }
516
517         /* bar1 va */
518         f->userd.gpu_va = gk20a_gmmu_map(&g->mm.bar1.vm,
519                                         &f->userd.sgt,
520                                         f->userd_total_size,
521                                         0, /* flags */
522                                         mem_flag_none);
523         if (!f->userd.gpu_va) {
524                 dev_err(d, "gmmu mapping failed\n");
525                 goto clean_up;
526         }
527
528         nvhost_dbg(dbg_map, "userd bar1 va = 0x%llx", f->userd.gpu_va);
529
530         f->userd.size = f->userd_total_size;
531
532         f->channel = kzalloc(f->num_channels * sizeof(*f->channel),
533                                 GFP_KERNEL);
534         f->pbdma_map = kzalloc(f->num_pbdma * sizeof(*f->pbdma_map),
535                                 GFP_KERNEL);
536         f->engine_info = kzalloc(f->max_engines * sizeof(*f->engine_info),
537                                 GFP_KERNEL);
538
539         if (!(f->channel && f->pbdma_map && f->engine_info)) {
540                 err = -ENOMEM;
541                 goto clean_up;
542         }
543
544         /* pbdma map needs to be in place before calling engine info init */
545         for (i = 0; i < f->num_pbdma; ++i)
546                 f->pbdma_map[i] = gk20a_readl(g, fifo_pbdma_map_r(i));
547
548         init_engine_info(f);
549
550         init_runlist(g, f);
551
552         for (chid = 0; chid < f->num_channels; chid++) {
553                 f->channel[chid].userd_cpu_va =
554                         f->userd.cpuva + chid * f->userd_entry_size;
555                 f->channel[chid].userd_iova =
556                         NV_MC_SMMU_VADDR_TRANSLATE(f->userd.iova)
557                                 + chid * f->userd_entry_size;
558                 f->channel[chid].userd_gpu_va =
559                         f->userd.gpu_va + chid * f->userd_entry_size;
560
561                 gk20a_init_channel_support(g, chid);
562         }
563         mutex_init(&f->ch_inuse_mutex);
564
565         f->remove_support = gk20a_remove_fifo_support;
566
567         f->deferred_reset_pending = false;
568         mutex_init(&f->deferred_reset_mutex);
569
570         f->sw_ready = true;
571
572         nvhost_dbg_fn("done");
573         return 0;
574
575 clean_up:
576         nvhost_dbg_fn("fail");
577         if (f->userd.gpu_va)
578                 gk20a_gmmu_unmap(&g->mm.bar1.vm,
579                                         f->userd.gpu_va,
580                                         f->userd.size,
581                                         mem_flag_none);
582         if (f->userd.sgt)
583                 gk20a_free_sgtable(&f->userd.sgt);
584         if (f->userd.cpuva)
585                 dma_free_coherent(d,
586                                 f->userd_total_size,
587                                 f->userd.cpuva,
588                                 f->userd.iova);
589         f->userd.cpuva = NULL;
590         f->userd.iova = 0;
591
592         memset(&f->userd, 0, sizeof(struct userd_desc));
593
594         kfree(f->channel);
595         f->channel = NULL;
596         kfree(f->pbdma_map);
597         f->pbdma_map = NULL;
598         kfree(f->engine_info);
599         f->engine_info = NULL;
600
601         return err;
602 }
603
604 static void gk20a_fifo_handle_runlist_event(struct gk20a *g)
605 {
606         struct fifo_gk20a *f = &g->fifo;
607         struct fifo_runlist_info_gk20a *runlist;
608         unsigned long runlist_event;
609         u32 runlist_id;
610
611         runlist_event = gk20a_readl(g, fifo_intr_runlist_r());
612         gk20a_writel(g, fifo_intr_runlist_r(), runlist_event);
613
614         for_each_set_bit(runlist_id, &runlist_event, f->max_runlists) {
615                 runlist = &f->runlist_info[runlist_id];
616                 wake_up(&runlist->runlist_wq);
617         }
618
619 }
620
621 static int gk20a_init_fifo_setup_hw(struct gk20a *g)
622 {
623         struct fifo_gk20a *f = &g->fifo;
624
625         nvhost_dbg_fn("");
626
627         /* test write, read through bar1 @ userd region before
628          * turning on the snooping */
629         {
630                 struct fifo_gk20a *f = &g->fifo;
631                 u32 v, v1 = 0x33, v2 = 0x55;
632
633                 u32 bar1_vaddr = f->userd.gpu_va;
634                 volatile u32 *cpu_vaddr = f->userd.cpuva;
635
636                 nvhost_dbg_info("test bar1 @ vaddr 0x%x",
637                            bar1_vaddr);
638
639                 v = gk20a_bar1_readl(g, bar1_vaddr);
640
641                 *cpu_vaddr = v1;
642                 smp_mb();
643
644                 if (v1 != gk20a_bar1_readl(g, bar1_vaddr)) {
645                         nvhost_err(dev_from_gk20a(g), "bar1 broken @ gk20a!");
646                         return -EINVAL;
647                 }
648
649                 gk20a_bar1_writel(g, bar1_vaddr, v2);
650
651                 if (v2 != gk20a_bar1_readl(g, bar1_vaddr)) {
652                         nvhost_err(dev_from_gk20a(g), "bar1 broken @ gk20a!");
653                         return -EINVAL;
654                 }
655
656                 /* is it visible to the cpu? */
657                 if (*cpu_vaddr != v2) {
658                         nvhost_err(dev_from_gk20a(g),
659                                 "cpu didn't see bar1 write @ %p!",
660                                 cpu_vaddr);
661                 }
662
663                 /* put it back */
664                 gk20a_bar1_writel(g, bar1_vaddr, v);
665         }
666
667         /*XXX all manner of flushes and caching worries, etc */
668
669         /* set the base for the userd region now */
670         gk20a_writel(g, fifo_bar1_base_r(),
671                         fifo_bar1_base_ptr_f(f->userd.gpu_va >> 12) |
672                         fifo_bar1_base_valid_true_f());
673
674         nvhost_dbg_fn("done");
675
676         return 0;
677 }
678
679 int gk20a_init_fifo_support(struct gk20a *g)
680 {
681         u32 err;
682
683         err = gk20a_init_fifo_setup_sw(g);
684         if (err)
685                 return err;
686
687         err = gk20a_init_fifo_setup_hw(g);
688         if (err)
689                 return err;
690
691         return err;
692 }
693
694 static struct channel_gk20a *
695 channel_from_inst_ptr(struct fifo_gk20a *f, u64 inst_ptr)
696 {
697         int ci;
698         if (unlikely(!f->channel))
699                 return NULL;
700         for (ci = 0; ci < f->num_channels; ci++) {
701                 struct channel_gk20a *c = f->channel+ci;
702                 if (c->inst_block.cpuva &&
703                     (inst_ptr == c->inst_block.cpu_pa))
704                         return f->channel+ci;
705         }
706         return NULL;
707 }
708
709 /* fault info/descriptions.
710  * tbd: move to setup
711  *  */
712 static const char * const fault_type_descs[] = {
713          "pde", /*fifo_intr_mmu_fault_info_type_pde_v() == 0 */
714          "pde size",
715          "pte",
716          "va limit viol",
717          "unbound inst",
718          "priv viol",
719          "ro viol",
720          "wo viol",
721          "pitch mask",
722          "work creation",
723          "bad aperture",
724          "compression failure",
725          "bad kind",
726          "region viol",
727          "dual ptes",
728          "poisoned",
729 };
730 /* engine descriptions */
731 static const char * const engine_subid_descs[] = {
732         "gpc",
733         "hub",
734 };
735
736 static const char * const hub_client_descs[] = {
737         "vip", "ce0", "ce1", "dniso", "fe", "fecs", "host", "host cpu",
738         "host cpu nb", "iso", "mmu", "mspdec", "msppp", "msvld",
739         "niso", "p2p", "pd", "perf", "pmu", "raster twod", "scc",
740         "scc nb", "sec", "ssync", "gr copy", "ce2", "xv", "mmu nb",
741         "msenc", "d falcon", "sked", "a falcon", "n/a",
742 };
743
744 static const char * const gpc_client_descs[] = {
745         "l1 0", "t1 0", "pe 0",
746         "l1 1", "t1 1", "pe 1",
747         "l1 2", "t1 2", "pe 2",
748         "l1 3", "t1 3", "pe 3",
749         "rast", "gcc", "gpccs",
750         "prop 0", "prop 1", "prop 2", "prop 3",
751         "l1 4", "t1 4", "pe 4",
752         "l1 5", "t1 5", "pe 5",
753         "l1 6", "t1 6", "pe 6",
754         "l1 7", "t1 7", "pe 7",
755         "gpm",
756         "ltp utlb 0", "ltp utlb 1", "ltp utlb 2", "ltp utlb 3",
757         "rgg utlb",
758 };
759
760 /* reads info from hardware and fills in mmu fault info record */
761 static inline void get_exception_mmu_fault_info(
762         struct gk20a *g, u32 engine_id,
763         struct fifo_mmu_fault_info_gk20a *f)
764 {
765         u32 fault_info_v;
766
767         nvhost_dbg_fn("engine_id %d", engine_id);
768
769         memset(f, 0, sizeof(*f));
770
771         f->fault_info_v = fault_info_v = gk20a_readl(g,
772              fifo_intr_mmu_fault_info_r(engine_id));
773         f->fault_type_v =
774                 fifo_intr_mmu_fault_info_type_v(fault_info_v);
775         f->engine_subid_v =
776                 fifo_intr_mmu_fault_info_engine_subid_v(fault_info_v);
777         f->client_v = fifo_intr_mmu_fault_info_client_v(fault_info_v);
778
779         BUG_ON(f->fault_type_v >= ARRAY_SIZE(fault_type_descs));
780         f->fault_type_desc =  fault_type_descs[f->fault_type_v];
781
782         BUG_ON(f->engine_subid_v >= ARRAY_SIZE(engine_subid_descs));
783         f->engine_subid_desc = engine_subid_descs[f->engine_subid_v];
784
785         if (f->engine_subid_v ==
786             fifo_intr_mmu_fault_info_engine_subid_hub_v()) {
787
788                 BUG_ON(f->client_v >= ARRAY_SIZE(hub_client_descs));
789                 f->client_desc = hub_client_descs[f->client_v];
790         } else if (f->engine_subid_v ==
791                    fifo_intr_mmu_fault_info_engine_subid_gpc_v()) {
792                 BUG_ON(f->client_v >= ARRAY_SIZE(gpc_client_descs));
793                 f->client_desc = gpc_client_descs[f->client_v];
794         } else {
795                 BUG_ON(1);
796         }
797
798         f->fault_hi_v = gk20a_readl(g, fifo_intr_mmu_fault_hi_r(engine_id));
799         f->fault_lo_v = gk20a_readl(g, fifo_intr_mmu_fault_lo_r(engine_id));
800         /* note:ignoring aperture on gk20a... */
801         f->inst_ptr = fifo_intr_mmu_fault_inst_ptr_v(
802                  gk20a_readl(g, fifo_intr_mmu_fault_inst_r(engine_id)));
803         /* note: inst_ptr is a 40b phys addr.  */
804         f->inst_ptr <<= fifo_intr_mmu_fault_inst_ptr_align_shift_v();
805 }
806
807 static void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
808 {
809         nvhost_dbg_fn("");
810
811         if (engine_id == top_device_info_type_enum_graphics_v()) {
812                 /* resetting engine using mc_enable_r() is not enough,
813                  * we do full init sequence */
814                 gk20a_gr_reset(g);
815         }
816         if (engine_id == top_device_info_type_enum_copy0_v())
817                 gk20a_reset(g, mc_enable_ce2_m());
818 }
819
820 static void gk20a_fifo_handle_mmu_fault_thread(struct work_struct *work)
821 {
822         struct fifo_gk20a *f = container_of(work, struct fifo_gk20a,
823                                             fault_restore_thread);
824         struct gk20a *g = f->g;
825         int i;
826
827         /* Reinitialise FECS and GR */
828         gk20a_init_pmu_setup_hw2(g);
829
830         /* It is safe to enable ELPG again. */
831         gk20a_pmu_enable_elpg(g);
832
833         /* Restore the runlist */
834         for (i = 0; i < g->fifo.max_runlists; i++)
835                 gk20a_fifo_update_runlist_locked(g, i, ~0, true, true);
836
837         /* unlock all runlists */
838         for (i = 0; i < g->fifo.max_runlists; i++)
839                 mutex_unlock(&g->fifo.runlist_info[i].mutex);
840
841 }
842
843 static void gk20a_fifo_handle_chsw_fault(struct gk20a *g)
844 {
845         u32 intr;
846
847         intr = gk20a_readl(g, fifo_intr_chsw_error_r());
848         nvhost_err(dev_from_gk20a(g), "chsw: %08x\n", intr);
849         gk20a_fecs_dump_falcon_stats(g);
850         gk20a_writel(g, fifo_intr_chsw_error_r(), intr);
851 }
852
853 static void gk20a_fifo_handle_dropped_mmu_fault(struct gk20a *g)
854 {
855         struct device *dev = dev_from_gk20a(g);
856         u32 fault_id = gk20a_readl(g, fifo_intr_mmu_fault_id_r());
857         nvhost_err(dev, "dropped mmu fault (0x%08x)", fault_id);
858 }
859
860 static bool gk20a_fifo_should_defer_engine_reset(struct gk20a *g, u32 engine_id,
861                 struct fifo_mmu_fault_info_gk20a *f, bool fake_fault)
862 {
863         /* channel recovery is only deferred if an sm debugger
864            is attached and has MMU debug mode is enabled */
865         if (!gk20a_gr_sm_debugger_attached(g) ||
866             !gk20a_mm_mmu_debug_mode_enabled(g))
867                 return false;
868
869         /* if this fault is fake (due to RC recovery), don't defer recovery */
870         if (fake_fault)
871                 return false;
872
873         if (engine_id != ENGINE_GR_GK20A ||
874             f->engine_subid_v != fifo_intr_mmu_fault_info_engine_subid_gpc_v())
875                 return false;
876
877         return true;
878 }
879
880 void fifo_gk20a_finish_mmu_fault_handling(struct gk20a *g,
881                 unsigned long fault_id) {
882         u32 engine_mmu_id;
883         int i;
884
885         /* reset engines */
886         for_each_set_bit(engine_mmu_id, &fault_id, 32) {
887                 u32 engine_id = gk20a_mmu_id_to_engine_id(engine_mmu_id);
888                 if (engine_id != ~0)
889                         gk20a_fifo_reset_engine(g, engine_id);
890         }
891
892         /* CLEAR the runlists. Do not wait for runlist to start as
893          * some engines may not be available right now */
894         for (i = 0; i < g->fifo.max_runlists; i++)
895                 gk20a_fifo_update_runlist_locked(g, i, ~0, false, false);
896
897         /* clear interrupt */
898         gk20a_writel(g, fifo_intr_mmu_fault_id_r(), fault_id);
899
900         /* resume scheduler */
901         gk20a_writel(g, fifo_error_sched_disable_r(),
902                      gk20a_readl(g, fifo_error_sched_disable_r()));
903
904         /* Spawn a work to enable PMU and restore runlists */
905         schedule_work(&g->fifo.fault_restore_thread);
906 }
907
908 static bool gk20a_fifo_set_ctx_mmu_error(struct gk20a *g,
909                 struct channel_gk20a *ch) {
910         bool verbose = true;
911         if (!ch || !ch->hwctx)
912                 return verbose;
913
914         nvhost_err(dev_from_gk20a(g),
915                 "channel %d with hwctx generated a mmu fault",
916                 ch->hw_chid);
917         if (ch->hwctx->error_notifier) {
918                 u32 err = ch->hwctx->error_notifier->info32;
919                 if (err) {
920                         /* If error code is already set, this mmu fault
921                          * was triggered as part of recovery from other
922                          * error condition.
923                          * Don't overwrite error flag. */
924
925                         /* Fifo timeout debug spew is controlled by user */
926                         if (err == NVHOST_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT)
927                                 verbose = ch->hwctx->timeout_debug_dump;
928                 } else {
929                         gk20a_set_error_notifier(ch->hwctx,
930                                 NVHOST_CHANNEL_FIFO_ERROR_MMU_ERR_FLT);
931                 }
932         }
933         /* mark channel as faulted */
934         ch->hwctx->has_timedout = true;
935         wmb();
936         /* unblock pending waits */
937         wake_up(&ch->semaphore_wq);
938         wake_up(&ch->notifier_wq);
939         wake_up(&ch->submit_wq);
940         return verbose;
941 }
942
943
944 static bool gk20a_fifo_handle_mmu_fault(struct gk20a *g)
945 {
946         bool fake_fault;
947         unsigned long fault_id;
948         u32 engine_mmu_id;
949         int i;
950         bool verbose = true;
951         nvhost_dbg_fn("");
952
953         g->fifo.deferred_reset_pending = false;
954
955         /* Disable ELPG */
956         gk20a_pmu_disable_elpg(g);
957
958         /* If we have recovery in progress, MMU fault id is invalid */
959         if (g->fifo.mmu_fault_engines) {
960                 fault_id = g->fifo.mmu_fault_engines;
961                 g->fifo.mmu_fault_engines = 0;
962                 fake_fault = true;
963         } else {
964                 fault_id = gk20a_readl(g, fifo_intr_mmu_fault_id_r());
965                 fake_fault = false;
966                 nvhost_debug_dump(g->host);
967         }
968
969         /* lock all runlists. Note that locks are are released in
970          * gk20a_fifo_handle_mmu_fault_thread() */
971         for (i = 0; i < g->fifo.max_runlists; i++)
972                 mutex_lock(&g->fifo.runlist_info[i].mutex);
973
974         /* go through all faulted engines */
975         for_each_set_bit(engine_mmu_id, &fault_id, 32) {
976                 /* bits in fifo_intr_mmu_fault_id_r do not correspond 1:1 to
977                  * engines. Convert engine_mmu_id to engine_id */
978                 u32 engine_id = gk20a_mmu_id_to_engine_id(engine_mmu_id);
979                 struct fifo_runlist_info_gk20a *runlist = g->fifo.runlist_info;
980                 struct fifo_mmu_fault_info_gk20a f;
981                 struct channel_gk20a *ch = NULL;
982
983                 get_exception_mmu_fault_info(g, engine_mmu_id, &f);
984                 trace_nvhost_gk20a_mmu_fault(f.fault_hi_v,
985                                              f.fault_lo_v,
986                                              f.fault_info_v,
987                                              f.inst_ptr,
988                                              engine_id,
989                                              f.engine_subid_desc,
990                                              f.client_desc,
991                                              f.fault_type_desc);
992                 nvhost_err(dev_from_gk20a(g), "mmu fault on engine %d, "
993                            "engine subid %d (%s), client %d (%s), "
994                            "addr 0x%08x:0x%08x, type %d (%s), info 0x%08x,"
995                            "inst_ptr 0x%llx\n",
996                            engine_id,
997                            f.engine_subid_v, f.engine_subid_desc,
998                            f.client_v, f.client_desc,
999                            f.fault_hi_v, f.fault_lo_v,
1000                            f.fault_type_v, f.fault_type_desc,
1001                            f.fault_info_v, f.inst_ptr);
1002
1003                 /* get the channel */
1004                 if (fake_fault) {
1005                         /* read and parse engine status */
1006                         u32 status = gk20a_readl(g,
1007                                 fifo_engine_status_r(engine_id));
1008                         u32 ctx_status =
1009                                 fifo_engine_status_ctx_status_v(status);
1010                         bool type_ch = fifo_pbdma_status_id_type_v(status) ==
1011                                 fifo_pbdma_status_id_type_chid_v();
1012
1013                         /* use next_id if context load is failing */
1014                         u32 id = (ctx_status ==
1015                                 fifo_engine_status_ctx_status_ctxsw_load_v()) ?
1016                                 fifo_engine_status_next_id_v(status) :
1017                                 fifo_engine_status_id_v(status);
1018
1019                         if (type_ch) {
1020                                 ch = g->fifo.channel + id;
1021                         } else {
1022                                 nvhost_err(dev_from_gk20a(g), "non-chid type not supported");
1023                                 WARN_ON(1);
1024                         }
1025                 } else {
1026                         /* read channel based on instruction pointer */
1027                         ch = channel_from_inst_ptr(&g->fifo, f.inst_ptr);
1028                 }
1029
1030                 if (ch) {
1031                         if (ch->in_use) {
1032                                 /* disable the channel from hw and increment
1033                                  * syncpoints */
1034                                 gk20a_disable_channel_no_update(ch);
1035
1036                                 /* remove the channel from runlist */
1037                                 clear_bit(ch->hw_chid,
1038                                           runlist->active_channels);
1039                         }
1040
1041                         /* check if engine reset should be deferred */
1042                         if (gk20a_fifo_should_defer_engine_reset(g, engine_id, &f, fake_fault)) {
1043                                 g->fifo.mmu_fault_engines = fault_id;
1044
1045                                 /* handled during channel free */
1046                                 g->fifo.deferred_reset_pending = true;
1047                         } else
1048                                 verbose = gk20a_fifo_set_ctx_mmu_error(g, ch);
1049
1050                 } else if (f.inst_ptr ==
1051                                 g->mm.bar1.inst_block.cpu_pa) {
1052                         nvhost_err(dev_from_gk20a(g), "mmu fault from bar1");
1053                 } else if (f.inst_ptr ==
1054                                 g->mm.pmu.inst_block.cpu_pa) {
1055                         nvhost_err(dev_from_gk20a(g), "mmu fault from pmu");
1056                 } else
1057                         nvhost_err(dev_from_gk20a(g), "couldn't locate channel for mmu fault");
1058         }
1059
1060         if (g->fifo.deferred_reset_pending) {
1061                 nvhost_dbg(dbg_intr | dbg_gpu_dbg, "sm debugger attached,"
1062                            " deferring channel recovery to channel free");
1063                 /* clear interrupt */
1064                 gk20a_writel(g, fifo_intr_mmu_fault_id_r(), fault_id);
1065                 return verbose;
1066         }
1067
1068         /* resetting the engines and clearing the runlists is done in
1069            a separate function to allow deferred reset. */
1070         fifo_gk20a_finish_mmu_fault_handling(g, fault_id);
1071
1072         return verbose;
1073 }
1074
1075 static void gk20a_fifo_get_faulty_channel(struct gk20a *g, int engine_id,
1076                                           u32 *chid, bool *type_ch)
1077 {
1078         u32 status = gk20a_readl(g, fifo_engine_status_r(engine_id));
1079         u32 ctx_status = fifo_engine_status_ctx_status_v(status);
1080
1081         *type_ch = fifo_pbdma_status_id_type_v(status) ==
1082                 fifo_pbdma_status_id_type_chid_v();
1083         /* use next_id if context load is failing */
1084         *chid = (ctx_status ==
1085                 fifo_engine_status_ctx_status_ctxsw_load_v()) ?
1086                 fifo_engine_status_next_id_v(status) :
1087                 fifo_engine_status_id_v(status);
1088 }
1089
1090 void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids,
1091                 bool verbose)
1092 {
1093         unsigned long end_jiffies = jiffies +
1094                 msecs_to_jiffies(gk20a_get_gr_idle_timeout(g));
1095         unsigned long delay = GR_IDLE_CHECK_DEFAULT;
1096         unsigned long engine_id, i;
1097         unsigned long _engine_ids = __engine_ids;
1098         unsigned long engine_ids = 0;
1099         int ret;
1100
1101         if (verbose)
1102                 nvhost_debug_dump(g->host);
1103
1104         /* store faulted engines in advance */
1105         g->fifo.mmu_fault_engines = 0;
1106         for_each_set_bit(engine_id, &_engine_ids, 32) {
1107                 bool ref_type_ch;
1108                 int ref_chid;
1109                 gk20a_fifo_get_faulty_channel(g, engine_id, &ref_chid,
1110                                               &ref_type_ch);
1111
1112                 /* Reset *all* engines that use the
1113                  * same channel as faulty engine */
1114                 for (i = 0; i < g->fifo.max_engines; i++) {
1115                         bool type_ch;
1116                         u32 chid;
1117                         gk20a_fifo_get_faulty_channel(g, i, &chid, &type_ch);
1118                         if (ref_type_ch == type_ch && ref_chid == chid) {
1119                                 engine_ids |= BIT(i);
1120                                 g->fifo.mmu_fault_engines |=
1121                                         BIT(gk20a_engine_id_to_mmu_id(i));
1122                         }
1123                 }
1124
1125         }
1126
1127         /* trigger faults for all bad engines */
1128         for_each_set_bit(engine_id, &engine_ids, 32) {
1129                 if (engine_id > g->fifo.max_engines) {
1130                         WARN_ON(true);
1131                         break;
1132                 }
1133
1134                 gk20a_writel(g, fifo_trigger_mmu_fault_r(engine_id),
1135                              fifo_trigger_mmu_fault_id_f(
1136                              gk20a_engine_id_to_mmu_id(engine_id)) |
1137                              fifo_trigger_mmu_fault_enable_f(1));
1138         }
1139
1140         /* Wait for MMU fault to trigger */
1141         ret = -EBUSY;
1142         do {
1143                 if (gk20a_readl(g, fifo_intr_0_r()) &
1144                                 fifo_intr_0_mmu_fault_pending_f()) {
1145                         ret = 0;
1146                         break;
1147                 }
1148
1149                 usleep_range(delay, delay * 2);
1150                 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
1151         } while (time_before(jiffies, end_jiffies) |
1152                         !tegra_platform_is_silicon());
1153
1154         if (ret)
1155                 nvhost_err(dev_from_gk20a(g), "mmu fault timeout");
1156
1157         /* release mmu fault trigger */
1158         for_each_set_bit(engine_id, &engine_ids, 32)
1159                 gk20a_writel(g, fifo_trigger_mmu_fault_r(engine_id), 0);
1160 }
1161
1162
1163 static bool gk20a_fifo_handle_sched_error(struct gk20a *g)
1164 {
1165         u32 sched_error;
1166         u32 engine_id;
1167         int id = -1;
1168         bool non_chid = false;
1169
1170         /* read and reset the scheduler error register */
1171         sched_error = gk20a_readl(g, fifo_intr_sched_error_r());
1172         gk20a_writel(g, fifo_intr_0_r(), fifo_intr_0_sched_error_reset_f());
1173
1174         for (engine_id = 0; engine_id < g->fifo.max_engines; engine_id++) {
1175                 u32 status = gk20a_readl(g, fifo_engine_status_r(engine_id));
1176                 u32 ctx_status = fifo_engine_status_ctx_status_v(status);
1177                 bool failing_engine;
1178
1179                 /* we are interested in busy engines */
1180                 failing_engine = fifo_engine_status_engine_v(status) ==
1181                         fifo_engine_status_engine_busy_v();
1182
1183                 /* ..that are doing context switch */
1184                 failing_engine = failing_engine &&
1185                         (ctx_status ==
1186                                 fifo_engine_status_ctx_status_ctxsw_switch_v()
1187                         || ctx_status ==
1188                                 fifo_engine_status_ctx_status_ctxsw_save_v()
1189                         || ctx_status ==
1190                                 fifo_engine_status_ctx_status_ctxsw_load_v());
1191
1192                 if (failing_engine) {
1193                         id = (ctx_status ==
1194                                 fifo_engine_status_ctx_status_ctxsw_load_v()) ?
1195                                 fifo_engine_status_next_id_v(status) :
1196                                 fifo_engine_status_id_v(status);
1197                         non_chid = fifo_pbdma_status_id_type_v(status) !=
1198                                 fifo_pbdma_status_id_type_chid_v();
1199                         break;
1200                 }
1201         }
1202
1203         /* could not find the engine - should never happen */
1204         if (unlikely(engine_id >= g->fifo.max_engines))
1205                 goto err;
1206
1207         if (fifo_intr_sched_error_code_f(sched_error) ==
1208                         fifo_intr_sched_error_code_ctxsw_timeout_v()) {
1209                 struct fifo_gk20a *f = &g->fifo;
1210                 struct channel_gk20a *ch = &f->channel[id];
1211                 struct nvhost_hwctx *hwctx = ch->hwctx;
1212
1213                 if (non_chid) {
1214                         gk20a_fifo_recover(g, BIT(engine_id), true);
1215                         goto err;
1216                 }
1217
1218                 if (gk20a_channel_update_and_check_timeout(ch,
1219                         GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000)) {
1220                         gk20a_set_error_notifier(hwctx,
1221                                 NVHOST_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT);
1222                         nvhost_err(dev_from_gk20a(g),
1223                                 "fifo sched ctxsw timeout error:"
1224                                 "engine = %u, ch = %d", engine_id, id);
1225                         gk20a_fifo_recover(g, BIT(engine_id),
1226                                 hwctx ? hwctx->timeout_debug_dump : true);
1227                 } else {
1228                         nvhost_warn(dev_from_gk20a(g),
1229                                 "fifo is waiting for ctx switch for %d ms,"
1230                                 "ch = %d\n",
1231                                 ch->timeout_accumulated_ms,
1232                                 id);
1233                 }
1234                 return hwctx->timeout_debug_dump;
1235         }
1236 err:
1237         nvhost_err(dev_from_gk20a(g), "fifo sched error : 0x%08x, engine=%u, %s=%d",
1238                    sched_error, engine_id, non_chid ? "non-ch" : "ch", id);
1239
1240         return true;
1241 }
1242
1243 static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr)
1244 {
1245         bool print_channel_reset_log = false, reset_engine = false;
1246         struct device *dev = dev_from_gk20a(g);
1247         u32 handled = 0;
1248
1249         nvhost_dbg_fn("");
1250
1251         if (fifo_intr & fifo_intr_0_pio_error_pending_f()) {
1252                 /* pio mode is unused.  this shouldn't happen, ever. */
1253                 /* should we clear it or just leave it pending? */
1254                 nvhost_err(dev, "fifo pio error!\n");
1255                 BUG_ON(1);
1256         }
1257
1258         if (fifo_intr & fifo_intr_0_bind_error_pending_f()) {
1259                 u32 bind_error = gk20a_readl(g, fifo_intr_bind_error_r());
1260                 nvhost_err(dev, "fifo bind error: 0x%08x", bind_error);
1261                 print_channel_reset_log = true;
1262                 handled |= fifo_intr_0_bind_error_pending_f();
1263         }
1264
1265         if (fifo_intr & fifo_intr_0_sched_error_pending_f()) {
1266                 print_channel_reset_log = gk20a_fifo_handle_sched_error(g);
1267                 handled |= fifo_intr_0_sched_error_pending_f();
1268         }
1269
1270         if (fifo_intr & fifo_intr_0_chsw_error_pending_f()) {
1271                 gk20a_fifo_handle_chsw_fault(g);
1272                 handled |= fifo_intr_0_chsw_error_pending_f();
1273         }
1274
1275         if (fifo_intr & fifo_intr_0_mmu_fault_pending_f()) {
1276                 print_channel_reset_log = gk20a_fifo_handle_mmu_fault(g);
1277                 reset_engine  = true;
1278                 handled |= fifo_intr_0_mmu_fault_pending_f();
1279         }
1280
1281         if (fifo_intr & fifo_intr_0_dropped_mmu_fault_pending_f()) {
1282                 gk20a_fifo_handle_dropped_mmu_fault(g);
1283                 handled |= fifo_intr_0_dropped_mmu_fault_pending_f();
1284         }
1285
1286         print_channel_reset_log = !g->fifo.deferred_reset_pending
1287                         && print_channel_reset_log;
1288
1289         if (print_channel_reset_log) {
1290                 int engine_id;
1291                 nvhost_err(dev_from_gk20a(g),
1292                            "channel reset initated from %s", __func__);
1293                 for (engine_id = 0;
1294                      engine_id < g->fifo.max_engines;
1295                      engine_id++) {
1296                         nvhost_dbg_fn("enum:%d -> engine_id:%d", engine_id,
1297                                 g->fifo.engine_info[engine_id].engine_id);
1298                         fifo_pbdma_exception_status(g,
1299                                         &g->fifo.engine_info[engine_id]);
1300                         fifo_engine_exception_status(g,
1301                                         &g->fifo.engine_info[engine_id]);
1302                 }
1303         }
1304
1305         return handled;
1306 }
1307
1308
1309 static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev,
1310                                         struct gk20a *g,
1311                                         struct fifo_gk20a *f,
1312                                         u32 pbdma_id)
1313 {
1314         u32 pbdma_intr_0 = gk20a_readl(g, pbdma_intr_0_r(pbdma_id));
1315         u32 pbdma_intr_1 = gk20a_readl(g, pbdma_intr_1_r(pbdma_id));
1316         u32 handled = 0;
1317         bool reset_device = false;
1318         bool reset_channel = false;
1319
1320         nvhost_dbg_fn("");
1321
1322         nvhost_dbg(dbg_intr, "pbdma id intr pending %d %08x %08x", pbdma_id,
1323                         pbdma_intr_0, pbdma_intr_1);
1324         if (pbdma_intr_0) {
1325                 if (f->intr.pbdma.device_fatal_0 & pbdma_intr_0) {
1326                         dev_err(dev, "unrecoverable device error: "
1327                                 "pbdma_intr_0(%d):0x%08x", pbdma_id, pbdma_intr_0);
1328                         reset_device = true;
1329                         /* TODO: disable pbdma intrs */
1330                         handled |= f->intr.pbdma.device_fatal_0 & pbdma_intr_0;
1331                 }
1332                 if (f->intr.pbdma.channel_fatal_0 & pbdma_intr_0) {
1333                         dev_warn(dev, "channel error: "
1334                                  "pbdma_intr_0(%d):0x%08x", pbdma_id, pbdma_intr_0);
1335                         reset_channel = true;
1336                         /* TODO: clear pbdma channel errors */
1337                         handled |= f->intr.pbdma.channel_fatal_0 & pbdma_intr_0;
1338                 }
1339                 gk20a_writel(g, pbdma_intr_0_r(pbdma_id), pbdma_intr_0);
1340         }
1341
1342         /* all intrs in _intr_1 are "host copy engine" related,
1343          * which gk20a doesn't have. for now just make them channel fatal. */
1344         if (pbdma_intr_1) {
1345                 dev_err(dev, "channel hce error: pbdma_intr_1(%d): 0x%08x",
1346                         pbdma_id, pbdma_intr_1);
1347                 reset_channel = true;
1348                 gk20a_writel(g, pbdma_intr_1_r(pbdma_id), pbdma_intr_1);
1349         }
1350
1351
1352
1353         return handled;
1354 }
1355
1356 static u32 fifo_channel_isr(struct gk20a *g, u32 fifo_intr)
1357 {
1358         gk20a_channel_semaphore_wakeup(g);
1359         return fifo_intr_0_channel_intr_pending_f();
1360 }
1361
1362
1363 static u32 fifo_pbdma_isr(struct gk20a *g, u32 fifo_intr)
1364 {
1365         struct device *dev = dev_from_gk20a(g);
1366         struct fifo_gk20a *f = &g->fifo;
1367         u32 clear_intr = 0, i;
1368         u32 pbdma_pending = gk20a_readl(g, fifo_intr_pbdma_id_r());
1369
1370         for (i = 0; i < fifo_intr_pbdma_id_status__size_1_v(); i++) {
1371                 if (fifo_intr_pbdma_id_status_f(pbdma_pending, i)) {
1372                         nvhost_dbg(dbg_intr, "pbdma id %d intr pending", i);
1373                         clear_intr |=
1374                                 gk20a_fifo_handle_pbdma_intr(dev, g, f, i);
1375                 }
1376         }
1377         return fifo_intr_0_pbdma_intr_pending_f();
1378 }
1379
1380 void gk20a_fifo_isr(struct gk20a *g)
1381 {
1382         u32 error_intr_mask =
1383                 fifo_intr_0_bind_error_pending_f() |
1384                 fifo_intr_0_sched_error_pending_f() |
1385                 fifo_intr_0_chsw_error_pending_f() |
1386                 fifo_intr_0_fb_flush_timeout_pending_f() |
1387                 fifo_intr_0_dropped_mmu_fault_pending_f() |
1388                 fifo_intr_0_mmu_fault_pending_f() |
1389                 fifo_intr_0_lb_error_pending_f() |
1390                 fifo_intr_0_pio_error_pending_f();
1391
1392         u32 fifo_intr = gk20a_readl(g, fifo_intr_0_r());
1393         u32 clear_intr = 0;
1394
1395         /* note we're not actually in an "isr", but rather
1396          * in a threaded interrupt context... */
1397         mutex_lock(&g->fifo.intr.isr.mutex);
1398
1399         nvhost_dbg(dbg_intr, "fifo isr %08x\n", fifo_intr);
1400
1401         /* handle runlist update */
1402         if (fifo_intr & fifo_intr_0_runlist_event_pending_f()) {
1403                 gk20a_fifo_handle_runlist_event(g);
1404                 clear_intr |= fifo_intr_0_runlist_event_pending_f();
1405         }
1406         if (fifo_intr & fifo_intr_0_pbdma_intr_pending_f())
1407                 clear_intr |= fifo_pbdma_isr(g, fifo_intr);
1408
1409         if (unlikely(fifo_intr & error_intr_mask))
1410                 clear_intr = fifo_error_isr(g, fifo_intr);
1411
1412         gk20a_writel(g, fifo_intr_0_r(), clear_intr);
1413
1414         mutex_unlock(&g->fifo.intr.isr.mutex);
1415
1416         return;
1417 }
1418
1419 void gk20a_fifo_nonstall_isr(struct gk20a *g)
1420 {
1421         u32 fifo_intr = gk20a_readl(g, fifo_intr_0_r());
1422         u32 clear_intr = 0;
1423
1424         nvhost_dbg(dbg_intr, "fifo nonstall isr %08x\n", fifo_intr);
1425
1426         if (fifo_intr & fifo_intr_0_channel_intr_pending_f())
1427                 clear_intr |= fifo_channel_isr(g, fifo_intr);
1428
1429         gk20a_writel(g, fifo_intr_0_r(), clear_intr);
1430
1431         return;
1432 }
1433
1434 int gk20a_fifo_preempt_channel(struct gk20a *g, u32 hw_chid)
1435 {
1436         struct fifo_gk20a *f = &g->fifo;
1437         unsigned long end_jiffies = jiffies
1438                 + msecs_to_jiffies(gk20a_get_gr_idle_timeout(g));
1439         u32 delay = GR_IDLE_CHECK_DEFAULT;
1440         u32 ret = 0;
1441         u32 token = PMU_INVALID_MUTEX_OWNER_ID;
1442         u32 elpg_off = 0;
1443         u32 i;
1444
1445         nvhost_dbg_fn("%d", hw_chid);
1446
1447         /* we have no idea which runlist we are using. lock all */
1448         for (i = 0; i < g->fifo.max_runlists; i++)
1449                 mutex_lock(&f->runlist_info[i].mutex);
1450
1451         /* disable elpg if failed to acquire pmu mutex */
1452         elpg_off = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
1453         if (elpg_off)
1454                 gk20a_pmu_disable_elpg(g);
1455
1456         /* issue preempt */
1457         gk20a_writel(g, fifo_preempt_r(),
1458                 fifo_preempt_chid_f(hw_chid) |
1459                 fifo_preempt_type_channel_f());
1460
1461         /* wait for preempt */
1462         ret = -EBUSY;
1463         do {
1464                 if (!(gk20a_readl(g, fifo_preempt_r()) &
1465                         fifo_preempt_pending_true_f())) {
1466                         ret = 0;
1467                         break;
1468                 }
1469
1470                 usleep_range(delay, delay * 2);
1471                 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
1472         } while (time_before(jiffies, end_jiffies) |
1473                         !tegra_platform_is_silicon());
1474
1475         if (ret) {
1476                 int i;
1477                 u32 engines = 0;
1478                 struct fifo_gk20a *f = &g->fifo;
1479                 struct channel_gk20a *ch = &f->channel[hw_chid];
1480
1481                 nvhost_err(dev_from_gk20a(g), "preempt channel %d timeout\n",
1482                             hw_chid);
1483
1484                 /* forcefully reset all busy engines using this channel */
1485                 for (i = 0; i < g->fifo.max_engines; i++) {
1486                         u32 status = gk20a_readl(g, fifo_engine_status_r(i));
1487                         u32 ctx_status =
1488                                 fifo_engine_status_ctx_status_v(status);
1489                         bool type_ch = fifo_pbdma_status_id_type_v(status) ==
1490                                 fifo_pbdma_status_id_type_chid_v();
1491                         bool busy = fifo_engine_status_engine_v(status) ==
1492                                 fifo_engine_status_engine_busy_v();
1493                         u32 id = (ctx_status ==
1494                                 fifo_engine_status_ctx_status_ctxsw_load_v()) ?
1495                                 fifo_engine_status_next_id_v(status) :
1496                                 fifo_engine_status_id_v(status);
1497
1498                         if (type_ch && busy && id == hw_chid)
1499                                 engines |= BIT(i);
1500                 }
1501                 gk20a_set_error_notifier(ch->hwctx,
1502                                 NVHOST_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT);
1503                 gk20a_fifo_recover(g, engines, true);
1504         }
1505
1506         /* re-enable elpg or release pmu mutex */
1507         if (elpg_off)
1508                 gk20a_pmu_enable_elpg(g);
1509         else
1510                 pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
1511
1512         for (i = 0; i < g->fifo.max_runlists; i++)
1513                 mutex_unlock(&f->runlist_info[i].mutex);
1514
1515         return ret;
1516 }
1517
1518 int gk20a_fifo_enable_engine_activity(struct gk20a *g,
1519                                 struct fifo_engine_info_gk20a *eng_info)
1520 {
1521         u32 token = PMU_INVALID_MUTEX_OWNER_ID;
1522         u32 elpg_off;
1523         u32 enable;
1524
1525         nvhost_dbg_fn("");
1526
1527         /* disable elpg if failed to acquire pmu mutex */
1528         elpg_off = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
1529         if (elpg_off)
1530                 gk20a_pmu_disable_elpg(g);
1531
1532         enable = gk20a_readl(g, fifo_sched_disable_r());
1533         enable &= ~(fifo_sched_disable_true_v() >> eng_info->runlist_id);
1534         gk20a_writel(g, fifo_sched_disable_r(), enable);
1535
1536         /* re-enable elpg or release pmu mutex */
1537         if (elpg_off)
1538                 gk20a_pmu_enable_elpg(g);
1539         else
1540                 pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
1541
1542         nvhost_dbg_fn("done");
1543         return 0;
1544 }
1545
1546 int gk20a_fifo_disable_engine_activity(struct gk20a *g,
1547                                 struct fifo_engine_info_gk20a *eng_info,
1548                                 bool wait_for_idle)
1549 {
1550         u32 gr_stat, pbdma_stat, chan_stat, eng_stat, ctx_stat;
1551         u32 pbdma_chid = ~0, engine_chid = ~0, disable;
1552         u32 token = PMU_INVALID_MUTEX_OWNER_ID;
1553         u32 elpg_off;
1554         u32 err = 0;
1555
1556         nvhost_dbg_fn("");
1557
1558         gr_stat =
1559                 gk20a_readl(g, fifo_engine_status_r(eng_info->engine_id));
1560         if (fifo_engine_status_engine_v(gr_stat) ==
1561             fifo_engine_status_engine_busy_v() && !wait_for_idle)
1562                 return -EBUSY;
1563
1564         /* disable elpg if failed to acquire pmu mutex */
1565         elpg_off = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
1566         if (elpg_off)
1567                 gk20a_pmu_disable_elpg(g);
1568
1569         disable = gk20a_readl(g, fifo_sched_disable_r());
1570         disable = set_field(disable,
1571                         fifo_sched_disable_runlist_m(eng_info->runlist_id),
1572                         fifo_sched_disable_runlist_f(fifo_sched_disable_true_v(),
1573                                 eng_info->runlist_id));
1574         gk20a_writel(g, fifo_sched_disable_r(), disable);
1575
1576         /* chid from pbdma status */
1577         pbdma_stat = gk20a_readl(g, fifo_pbdma_status_r(eng_info->pbdma_id));
1578         chan_stat  = fifo_pbdma_status_chan_status_v(pbdma_stat);
1579         if (chan_stat == fifo_pbdma_status_chan_status_valid_v() ||
1580             chan_stat == fifo_pbdma_status_chan_status_chsw_save_v())
1581                 pbdma_chid = fifo_pbdma_status_id_v(pbdma_stat);
1582         else if (chan_stat == fifo_pbdma_status_chan_status_chsw_load_v() ||
1583                  chan_stat == fifo_pbdma_status_chan_status_chsw_switch_v())
1584                 pbdma_chid = fifo_pbdma_status_next_id_v(pbdma_stat);
1585
1586         if (pbdma_chid != ~0) {
1587                 err = gk20a_fifo_preempt_channel(g, pbdma_chid);
1588                 if (err)
1589                         goto clean_up;
1590         }
1591
1592         /* chid from engine status */
1593         eng_stat = gk20a_readl(g, fifo_engine_status_r(eng_info->engine_id));
1594         ctx_stat  = fifo_engine_status_ctx_status_v(eng_stat);
1595         if (ctx_stat == fifo_engine_status_ctx_status_valid_v() ||
1596             ctx_stat == fifo_engine_status_ctx_status_ctxsw_save_v())
1597                 engine_chid = fifo_engine_status_id_v(eng_stat);
1598         else if (ctx_stat == fifo_engine_status_ctx_status_ctxsw_load_v() ||
1599                  ctx_stat == fifo_engine_status_ctx_status_ctxsw_switch_v())
1600                 engine_chid = fifo_engine_status_next_id_v(eng_stat);
1601
1602         if (engine_chid != ~0 && engine_chid != pbdma_chid) {
1603                 err = gk20a_fifo_preempt_channel(g, engine_chid);
1604                 if (err)
1605                         goto clean_up;
1606         }
1607
1608 clean_up:
1609         /* re-enable elpg or release pmu mutex */
1610         if (elpg_off)
1611                 gk20a_pmu_enable_elpg(g);
1612         else
1613                 pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
1614
1615         if (err) {
1616                 nvhost_dbg_fn("failed");
1617                 if (gk20a_fifo_enable_engine_activity(g, eng_info))
1618                         nvhost_err(dev_from_gk20a(g),
1619                                 "failed to enable gr engine activity\n");
1620         } else {
1621                 nvhost_dbg_fn("done");
1622         }
1623         return err;
1624 }
1625
1626 static void gk20a_fifo_runlist_reset_engines(struct gk20a *g, u32 runlist_id)
1627 {
1628         struct fifo_gk20a *f = &g->fifo;
1629         u32 engines = 0;
1630         int i;
1631
1632         for (i = 0; i < f->max_engines; i++) {
1633                 u32 status = gk20a_readl(g, fifo_engine_status_r(i));
1634                 bool engine_busy = fifo_engine_status_engine_v(status) ==
1635                         fifo_engine_status_engine_busy_v();
1636
1637                 if (engine_busy &&
1638                     (f->engine_info[i].runlist_id == runlist_id))
1639                         engines |= BIT(i);
1640         }
1641         gk20a_fifo_recover(g, engines, true);
1642 }
1643
1644 static int gk20a_fifo_runlist_wait_pending(struct gk20a *g, u32 runlist_id)
1645 {
1646         struct fifo_runlist_info_gk20a *runlist;
1647         u32 remain;
1648         bool pending;
1649
1650         runlist = &g->fifo.runlist_info[runlist_id];
1651         remain = wait_event_timeout(runlist->runlist_wq,
1652                 ((pending = gk20a_readl(g, fifo_eng_runlist_r(runlist_id)) &
1653                         fifo_eng_runlist_pending_true_f()) == 0),
1654                 msecs_to_jiffies(gk20a_get_gr_idle_timeout(g)));
1655
1656         if (remain == 0 && pending != 0)
1657                 return -ETIMEDOUT;
1658
1659         return 0;
1660 }
1661
1662 static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
1663                                             u32 hw_chid, bool add,
1664                                             bool wait_for_finish)
1665 {
1666         u32 ret = 0;
1667         struct device *d = dev_from_gk20a(g);
1668         struct fifo_gk20a *f = &g->fifo;
1669         struct fifo_runlist_info_gk20a *runlist = NULL;
1670         u32 *runlist_entry_base = NULL;
1671         u32 *runlist_entry = NULL;
1672         phys_addr_t runlist_pa;
1673         u32 old_buf, new_buf;
1674         u32 chid;
1675         u32 count = 0;
1676         runlist = &f->runlist_info[runlist_id];
1677
1678         /* valid channel, add/remove it from active list.
1679            Otherwise, keep active list untouched for suspend/resume. */
1680         if (hw_chid != ~0) {
1681                 if (add) {
1682                         if (test_and_set_bit(hw_chid,
1683                                 runlist->active_channels) == 1)
1684                                 return 0;
1685                 } else {
1686                         if (test_and_clear_bit(hw_chid,
1687                                 runlist->active_channels) == 0)
1688                                 return 0;
1689                 }
1690         }
1691
1692         old_buf = runlist->cur_buffer;
1693         new_buf = !runlist->cur_buffer;
1694
1695         nvhost_dbg_info("runlist_id : %d, switch to new buffer 0x%16llx",
1696                 runlist_id, runlist->mem[new_buf].iova);
1697
1698         runlist_pa = gk20a_get_phys_from_iova(d, runlist->mem[new_buf].iova);
1699         if (!runlist_pa) {
1700                 ret = -EINVAL;
1701                 goto clean_up;
1702         }
1703
1704         runlist_entry_base = runlist->mem[new_buf].cpuva;
1705         if (!runlist_entry_base) {
1706                 ret = -ENOMEM;
1707                 goto clean_up;
1708         }
1709
1710         if (hw_chid != ~0 || /* add/remove a valid channel */
1711             add /* resume to add all channels back */) {
1712                 runlist_entry = runlist_entry_base;
1713                 for_each_set_bit(chid,
1714                         runlist->active_channels, f->num_channels) {
1715                         nvhost_dbg_info("add channel %d to runlist", chid);
1716                         runlist_entry[0] = chid;
1717                         runlist_entry[1] = 0;
1718                         runlist_entry += 2;
1719                         count++;
1720                 }
1721         } else  /* suspend to remove all channels */
1722                 count = 0;
1723
1724         if (count != 0) {
1725                 gk20a_writel(g, fifo_runlist_base_r(),
1726                         fifo_runlist_base_ptr_f(u64_lo32(runlist_pa >> 12)) |
1727                         fifo_runlist_base_target_vid_mem_f());
1728         }
1729
1730         gk20a_writel(g, fifo_runlist_r(),
1731                 fifo_runlist_engine_f(runlist_id) |
1732                 fifo_eng_runlist_length_f(count));
1733
1734         if (wait_for_finish) {
1735                 ret = gk20a_fifo_runlist_wait_pending(g, runlist_id);
1736
1737                 if (ret == -ETIMEDOUT) {
1738                         nvhost_err(dev_from_gk20a(g),
1739                                    "runlist update timeout");
1740
1741                         gk20a_fifo_runlist_reset_engines(g, runlist_id);
1742
1743                         /* engine reset needs the lock. drop it */
1744                         mutex_unlock(&runlist->mutex);
1745                         /* wait until the runlist is active again */
1746                         ret = gk20a_fifo_runlist_wait_pending(g, runlist_id);
1747                         /* get the lock back. at this point everything should
1748                          * should be fine */
1749                         mutex_lock(&runlist->mutex);
1750
1751                         if (ret)
1752                                 nvhost_err(dev_from_gk20a(g),
1753                                            "runlist update failed: %d", ret);
1754                 } else if (ret == -EINTR)
1755                         nvhost_err(dev_from_gk20a(g),
1756                                    "runlist update interrupted");
1757         }
1758
1759         runlist->cur_buffer = new_buf;
1760
1761 clean_up:
1762         return ret;
1763 }
1764
1765 /* add/remove a channel from runlist
1766    special cases below: runlist->active_channels will NOT be changed.
1767    (hw_chid == ~0 && !add) means remove all active channels from runlist.
1768    (hw_chid == ~0 &&  add) means restore all active channels on runlist. */
1769 int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 hw_chid,
1770                               bool add, bool wait_for_finish)
1771 {
1772         struct fifo_runlist_info_gk20a *runlist = NULL;
1773         struct fifo_gk20a *f = &g->fifo;
1774         u32 token = PMU_INVALID_MUTEX_OWNER_ID;
1775         u32 elpg_off;
1776         u32 ret = 0;
1777
1778         runlist = &f->runlist_info[runlist_id];
1779
1780         mutex_lock(&runlist->mutex);
1781
1782         /* disable elpg if failed to acquire pmu mutex */
1783         elpg_off = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
1784         if (elpg_off)
1785                 gk20a_pmu_disable_elpg(g);
1786
1787         ret = gk20a_fifo_update_runlist_locked(g, runlist_id, hw_chid, add,
1788                                                wait_for_finish);
1789
1790         /* re-enable elpg or release pmu mutex */
1791         if (elpg_off)
1792                 gk20a_pmu_enable_elpg(g);
1793         else
1794                 pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
1795
1796         mutex_unlock(&runlist->mutex);
1797         return ret;
1798 }
1799
1800 int gk20a_fifo_suspend(struct gk20a *g)
1801 {
1802         nvhost_dbg_fn("");
1803
1804         /* stop bar1 snooping */
1805         gk20a_writel(g, fifo_bar1_base_r(),
1806                         fifo_bar1_base_valid_false_f());
1807
1808         /* disable fifo intr */
1809         gk20a_writel(g, fifo_intr_en_0_r(), 0);
1810         gk20a_writel(g, fifo_intr_en_1_r(), 0);
1811
1812         nvhost_dbg_fn("done");
1813         return 0;
1814 }
1815
1816 bool gk20a_fifo_mmu_fault_pending(struct gk20a *g)
1817 {
1818         if (gk20a_readl(g, fifo_intr_0_r()) &
1819                         fifo_intr_0_mmu_fault_pending_f())
1820                 return true;
1821         else
1822                 return false;
1823 }