drivers: video: tegra: change buffer gpu cacheable and add flush
[linux-3.10.git] / drivers / video / tegra / host / gk20a / fifo_gk20a.c
1 /*
2  * drivers/video/tegra/host/gk20a/fifo_gk20a.c
3  *
4  * GK20A Graphics FIFO (gr host)
5  *
6  * Copyright (c) 2011, NVIDIA CORPORATION.  All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License along with
18  * this program; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20  */
21 #include <linux/delay.h>
22 #include <linux/slab.h>
23 #include <linux/nvmap.h>
24
25 #include "../dev.h"
26 #include "../nvhost_as.h"
27
28 #include "gk20a.h"
29 #include "hw_fifo_gk20a.h"
30 #include "hw_pbdma_gk20a.h"
31 #include "hw_ccsr_gk20a.h"
32 #include "hw_ram_gk20a.h"
33 #include "hw_proj_gk20a.h"
34 #include "hw_top_gk20a.h"
35 #include "hw_mc_gk20a.h"
36
37 static int init_engine_info_gk20a(struct fifo_gk20a *f)
38 {
39         struct fifo_engine_info_gk20a *gr_info;
40         const u32 gr_sw_id = ENGINE_GR_GK20A;
41         u32 i;
42         u32 max_info_entries = top_device_info__size_1_v();
43
44         nvhost_dbg_fn("");
45
46         /* all we really care about finding is the graphics entry    */
47         /* especially early on in sim it probably thinks it has more */
48         f->num_engines = 1;
49
50         gr_info = f->engine_info + gr_sw_id;
51
52         gr_info->sw_id = gr_sw_id;
53         gr_info->name = "gr";
54         gr_info->dev_info_id = top_device_info_type_enum_graphics_v();
55         gr_info->mmu_fault_id = fifo_intr_mmu_fault_eng_id_graphics_v();
56         gr_info->runlist_id = ~0;
57         gr_info->pbdma_id   = ~0;
58         gr_info->engine_id  = ~0;
59
60         for (i = 0; i < max_info_entries; i++) {
61                 u32 table_entry = gk20a_readl(f->g, top_device_info_r(i));
62                 u32 entry = top_device_info_entry_v(table_entry);
63                 u32 engine_enum = top_device_info_type_enum_v(table_entry);
64                 u32 table_entry2 = 0;
65
66                 if (entry == top_device_info_entry_not_valid_v())
67                         continue;
68
69                 if (top_device_info_chain_v(table_entry) ==
70                     top_device_info_chain_enable_v()) {
71
72                         table_entry2 = gk20a_readl(f->g,
73                                                    top_device_info_r(++i));
74
75                         engine_enum = top_device_info_type_enum_v(table_entry2);
76                 }
77
78                 if (entry == top_device_info_entry_enum_v() &&
79                     engine_enum == gr_info->dev_info_id) {
80                         int pbdma_id;
81                         u32 runlist_bit;
82
83                         gr_info->runlist_id =
84                                 top_device_info_runlist_enum_v(table_entry);
85                         nvhost_dbg_info("gr info: runlist_id %d", gr_info->runlist_id);
86
87                         gr_info->engine_id =
88                                 top_device_info_engine_enum_v(table_entry);
89                         nvhost_dbg_info("gr info: engine_id %d", gr_info->engine_id);
90
91                         runlist_bit = 1 << gr_info->runlist_id;
92
93                         for (pbdma_id = 0; pbdma_id < f->num_pbdma; pbdma_id++) {
94                                 nvhost_dbg_info("gr info: pbdma_map[%d]=%d",
95                                         pbdma_id, f->pbdma_map[pbdma_id]);
96                                 if (f->pbdma_map[pbdma_id] & runlist_bit)
97                                         break;
98                         }
99
100                         if (pbdma_id == f->num_pbdma) {
101                                 nvhost_dbg(dbg_err, "busted pbmda map");
102                                 return -EINVAL;
103                         }
104                         gr_info->pbdma_id = pbdma_id;
105
106                         break;
107                 }
108         }
109
110         if (gr_info->runlist_id == ~0) {
111                 nvhost_dbg(dbg_err, "busted device info");
112                 return -EINVAL;
113         }
114
115         return 0;
116 }
117
118 static void gk20a_remove_fifo_support(struct fifo_gk20a *f)
119 {
120         struct mem_mgr *memmgr = mem_mgr_from_g(f->g);
121
122         nvhost_dbg_fn("");
123
124         if (f->channel) {
125                 int c;
126                 for (c = 0; c < f->num_channels; c++) {
127                         if (f->channel[c].remove_support)
128                                 f->channel[c].remove_support(f->channel+c);
129                 }
130                 kfree(f->channel);
131                 f->channel = 0;
132         }
133
134         mem_op().munmap(f->userd.mem.ref, f->userd.cpu_va);
135         mem_op().unpin(memmgr, f->userd.mem.ref);
136         mem_op().put(memmgr, f->userd.mem.ref);
137         memset(&f->userd, 0, sizeof(struct userd_desc));
138
139         kfree(f->pbdma_map);
140         f->pbdma_map = NULL;
141
142         kfree(f->engine_info);
143         f->engine_info = NULL;
144 }
145
146 static int fifo_gk20a_init_runlist(struct gk20a *g, struct fifo_gk20a *f)
147 {
148         struct mem_mgr *memmgr = mem_mgr_from_g(g);
149         struct fifo_engine_info_gk20a *engine_info;
150         struct fifo_runlist_info_gk20a *runlist;
151         u32 engine_id;
152         u32 runlist_id;
153         u32 i;
154         u64 runlist_size;
155
156         nvhost_dbg_fn("");
157
158         f->max_runlists = fifo_eng_runlist_base__size_1_v();
159         f->runlist_info = kzalloc(sizeof(struct fifo_runlist_info_gk20a) *
160                                   f->max_runlists, GFP_KERNEL);
161         if (!f->runlist_info)
162                 goto clean_up;
163
164         for (engine_id = 0; engine_id < ENGINE_INVAL_GK20A; engine_id++) {
165                 engine_info = f->engine_info + engine_id;
166                 runlist_id = engine_info->runlist_id;
167                 runlist = &f->runlist_info[runlist_id];
168
169                 runlist->active_channels =
170                         kzalloc((f->num_channels /
171                                 (sizeof(unsigned long) * BITS_PER_BYTE)) + 1,
172                                 GFP_KERNEL);
173                 if (!runlist->active_channels)
174                         goto clean_up;
175
176                 runlist_size  = ram_rl_entry_size_v() * f->num_channels;
177                 for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) {
178                         runlist->mem[i].ref =
179                                 mem_op().alloc(memmgr, runlist_size,
180                                             DEFAULT_NVMAP_ALLOC_ALIGNMENT,
181                                             DEFAULT_NVMAP_ALLOC_FLAGS,
182                                             NVMAP_HEAP_CARVEOUT_GENERIC);
183                         if (!runlist->mem[i].ref)
184                                 goto clean_up;
185                         runlist->mem[i].size = runlist_size;
186                 }
187                 mutex_init(&runlist->mutex);
188                 init_waitqueue_head(&runlist->runlist_wq);
189         }
190
191         return 0;
192
193 clean_up:
194         nvhost_dbg_fn("fail");
195         for (engine_id = 0; engine_id < ENGINE_INVAL_GK20A; engine_id++) {
196                 engine_info = f->engine_info + engine_id;
197                 runlist_id = engine_info->runlist_id;
198                 runlist = &f->runlist_info[runlist_id];
199
200                 for (i = 0; i < MAX_RUNLIST_BUFFERS; i++)
201                         mem_op().put(memmgr,
202                                    runlist->mem[i].ref);
203
204                 kfree(runlist->active_channels);
205         }
206
207         kfree(f->runlist_info);
208         f->runlist_info = NULL;
209
210         return -ENOMEM;
211 }
212
213 static int gk20a_init_fifo_reset_enable_hw(struct gk20a *g)
214 {
215         u32 pmc_enable;
216         u32 intr_stall;
217         u32 mask;
218         u32 timeout;
219         int i;
220
221         nvhost_dbg_fn("");
222
223         /* enable pmc pfifo */
224         pmc_enable = gk20a_readl(g, mc_enable_r());
225         pmc_enable &= ~mc_enable_pfifo_enabled_f();
226         pmc_enable &= ~mc_enable_ce2_enabled_f();
227         pmc_enable &= ~mc_enable_priv_ring_enabled_f();
228         gk20a_writel(g, mc_enable_r(), pmc_enable);
229
230         pmc_enable = gk20a_readl(g, mc_enable_r());
231         pmc_enable |= mc_enable_pfifo_enabled_f();
232         pmc_enable |= mc_enable_ce2_enabled_f();
233         pmc_enable |= mc_enable_priv_ring_enabled_f();
234         gk20a_writel(g, mc_enable_r(), pmc_enable);
235         gk20a_readl(g, mc_enable_r());
236
237         /* enable pbdma */
238         mask = 0;
239         for (i = 0; i < proj_host_num_pbdma_v(); ++i)
240                 mask |= mc_enable_pb_sel_f(mc_enable_pb_0_enabled_v(), i);
241         gk20a_writel(g, mc_enable_pb_r(), mask);
242
243         /* enable pfifo interrupt */
244         gk20a_writel(g, fifo_intr_0_r(), 0xFFFFFFFF);
245         gk20a_writel(g, fifo_intr_en_0_r(), 0xFFFFFFFF); /* TBD: alternative intr tree*/
246         gk20a_writel(g, fifo_intr_en_1_r(), 0xFFFFFFFF); /* TBD: alternative intr tree*/
247
248         /* enable pbdma interrupt */
249         mask = 0;
250         for (i = 0; i < proj_host_num_pbdma_v(); i++) {
251                 intr_stall = gk20a_readl(g, pbdma_intr_stall_r(i));
252                 intr_stall &= ~pbdma_intr_stall_lbreq_enabled_f();
253                 gk20a_writel(g, pbdma_intr_stall_r(i), intr_stall);
254                 gk20a_writel(g, pbdma_intr_0_r(i), 0xFFFFFFFF);
255                 gk20a_writel(g, pbdma_intr_en_0_r(i),
256                         (~0) & ~pbdma_intr_en_0_lbreq_enabled_f());
257                 gk20a_writel(g, pbdma_intr_1_r(i), 0xFFFFFFFF);
258                 gk20a_writel(g, pbdma_intr_en_1_r(i), 0xFFFFFFFF);
259         }
260
261         /* TBD: apply overrides */
262
263         /* TBD: BLCG prod */
264
265         /* reset runlist interrupts */
266         gk20a_writel(g, fifo_intr_runlist_r(), ~0);
267
268         /* TBD: do we need those? */
269         timeout = gk20a_readl(g, fifo_fb_timeout_r());
270         timeout = set_field(timeout, fifo_fb_timeout_period_m(),
271                         fifo_fb_timeout_period_max_f());
272         gk20a_writel(g, fifo_fb_timeout_r(), timeout);
273
274         timeout = gk20a_readl(g, fifo_pb_timeout_r());
275         timeout &= ~fifo_pb_timeout_detection_enabled_f();
276         gk20a_writel(g, fifo_pb_timeout_r(), timeout);
277
278         gk20a_reset_priv_ring(g);
279
280         nvhost_dbg_fn("done");
281
282         return 0;
283 }
284
285 static int gk20a_init_fifo_setup_sw(struct gk20a *g, bool reinit)
286 {
287         struct mem_mgr *memmgr = mem_mgr_from_g(g);
288         struct fifo_gk20a *f = &g->fifo;
289         int chid, i, err;
290
291         nvhost_dbg_fn("");
292
293         if (reinit) {
294                 nvhost_dbg_fn("skip init");
295                 return 0;
296         }
297
298         f->g = g;
299
300         f->num_channels = ccsr_channel__size_1_v();
301         f->num_pbdma = proj_host_num_pbdma_v();
302         f->max_engines = ENGINE_INVAL_GK20A;
303
304         f->userd_entry_size = 1 << ram_userd_base_shift_v();
305         f->userd_total_size = f->userd_entry_size * f->num_channels;
306
307         f->userd.mem.ref = mem_op().alloc(memmgr, f->userd_total_size,
308                                        DEFAULT_NVMAP_ALLOC_ALIGNMENT,
309                                        DEFAULT_NVMAP_ALLOC_FLAGS,
310                                        NVMAP_HEAP_CARVEOUT_GENERIC);
311         if (IS_ERR_OR_NULL(f->userd.mem.ref)) {
312                 err = -ENOMEM;
313                 goto clean_up;
314         }
315
316         f->userd.cpu_va = mem_op().mmap(f->userd.mem.ref);
317         /* f->userd.cpu_va = g->bar1; */
318         if (IS_ERR_OR_NULL(f->userd.cpu_va)) {
319                 f->userd.cpu_va = NULL;
320                 err = -ENOMEM;
321                 goto clean_up;
322         }
323
324         f->userd.cpu_pa = mem_op().pin(memmgr, f->userd.mem.ref);
325         nvhost_dbg_info("userd physical address : 0x%08x",
326                    (u32)f->userd.cpu_pa);
327
328         if (f->userd.cpu_pa == -EINVAL ||
329             f->userd.cpu_pa == -EINTR) {
330                 f->userd.cpu_pa = 0;
331                 err = -ENOMEM;
332                 goto clean_up;
333         }
334
335         /* bar1 va */
336         f->userd.gpu_va = g->mm.bar1.vm.map(&g->mm.bar1.vm,
337                                             memmgr,
338                                             f->userd.mem.ref,
339                                             /*offset_align, flags, kind*/
340                                             4096, 0, 0);
341         nvhost_dbg_info("userd bar1 va = 0x%llx", f->userd.gpu_va);
342
343         f->userd.mem.size = f->userd_total_size;
344
345         f->channel = kzalloc(f->num_channels * sizeof(*f->channel),
346                                 GFP_KERNEL);
347         f->pbdma_map = kzalloc(f->num_pbdma * sizeof(*f->pbdma_map),
348                                 GFP_KERNEL);
349         f->engine_info = kzalloc(f->max_engines * sizeof(*f->engine_info),
350                                 GFP_KERNEL);
351
352         if (!(f->channel && f->pbdma_map && f->engine_info)) {
353                 err = -ENOMEM;
354                 goto clean_up;
355         }
356
357         /* pbdma map needs to be in place before calling engine info init */
358         for (i = 0; i < f->num_pbdma; ++i)
359                 f->pbdma_map[i] = gk20a_readl(g, fifo_pbdma_map_r(i));
360
361         init_engine_info_gk20a(f);
362
363         fifo_gk20a_init_runlist(g, f);
364
365         for (chid = 0; chid < f->num_channels; chid++) {
366                 f->channel[chid].userd_cpu_va =
367                         f->userd.cpu_va + chid * f->userd_entry_size;
368                 f->channel[chid].userd_cpu_pa =
369                         f->userd.cpu_pa + chid * f->userd_entry_size;
370                 f->channel[chid].userd_gpu_va =
371                         f->userd.gpu_va + chid * f->userd_entry_size;
372
373                 gk20a_init_channel_support(g, chid);
374         }
375         mutex_init(&f->ch_inuse_mutex);
376
377         f->remove_support = gk20a_remove_fifo_support;
378
379         nvhost_dbg_fn("done");
380         return 0;
381
382 clean_up:
383         nvhost_dbg_fn("fail");
384         mem_op().munmap(f->userd.mem.ref, f->userd.cpu_va);
385         mem_op().unpin(memmgr, f->userd.mem.ref);
386         mem_op().put(memmgr, f->userd.mem.ref);
387         memset(&f->userd, 0, sizeof(struct userd_desc));
388
389         kfree(f->channel);
390         f->channel = NULL;
391         kfree(f->pbdma_map);
392         f->pbdma_map = NULL;
393         kfree(f->engine_info);
394         f->engine_info = NULL;
395
396         return err;
397 }
398
399 static void gk20a_fifo_handle_runlist_event(struct gk20a *g)
400 {
401         struct fifo_gk20a *f = &g->fifo;
402         struct fifo_runlist_info_gk20a *runlist;
403         unsigned long runlist_event;
404         u32 runlist_id;
405
406         runlist_event = gk20a_readl(g, fifo_intr_runlist_r());
407         gk20a_writel(g, fifo_intr_runlist_r(), runlist_event);
408
409         for_each_set_bit(runlist_id, &runlist_event, f->max_runlists) {
410                 runlist = &f->runlist_info[runlist_id];
411                 wake_up(&runlist->runlist_wq);
412         }
413 }
414
415 static int gk20a_init_fifo_setup_hw(struct gk20a *g)
416 {
417         struct fifo_gk20a *f = &g->fifo;
418
419         nvhost_dbg_fn("");
420
421         /* test write, read through bar1 @ userd region before
422          * turning on the snooping */
423         {
424                 struct fifo_gk20a *f = &g->fifo;
425                 u32 v, v1 = 0x33, v2 = 0x55;
426
427                 u32 bar1_vaddr = f->userd.gpu_va;
428                 volatile u32 *cpu_vaddr = f->userd.cpu_va;
429
430                 nvhost_dbg_info("test bar1 @ vaddr 0x%x",
431                            bar1_vaddr);
432
433                 v = gk20a_bar1_readl(g, bar1_vaddr);
434
435                 *cpu_vaddr = v1;
436                 smp_mb();
437
438                 if (v1 != gk20a_bar1_readl(g, bar1_vaddr)) {
439                         nvhost_err(dev_from_gk20a(g), "bar1 broken @ gk20a!");
440                         return -EINVAL;
441                 }
442
443                 gk20a_bar1_writel(g, bar1_vaddr, v2);
444
445                 if (v2 != gk20a_bar1_readl(g, bar1_vaddr)) {
446                         nvhost_err(dev_from_gk20a(g), "bar1 broken @ gk20a!");
447                         return -EINVAL;
448                 }
449
450                 /* is it visible to the cpu? */
451                 if (*cpu_vaddr != v2) {
452                         nvhost_err(dev_from_gk20a(g),
453                                 "cpu didn't see bar1 write @ %p!",
454                                 cpu_vaddr);
455                         return -EINVAL;
456                 }
457
458                 /* put it back */
459                 gk20a_bar1_writel(g, bar1_vaddr, v);
460         }
461
462         /*XXX all manner of flushes and caching worries, etc */
463
464         /* set the base for the userd region now */
465         gk20a_writel(g, fifo_bar1_base_r(),
466                         fifo_bar1_base_ptr_f(f->userd.gpu_va >> 12) |
467                         fifo_bar1_base_valid_true_f());
468
469         nvhost_dbg_fn("done");
470
471         return 0;
472 }
473
474 int gk20a_init_fifo_support(struct gk20a *g, bool reinit)
475 {
476         u32 err;
477
478         err = gk20a_init_fifo_reset_enable_hw(g);
479         if (err)
480                 return err;
481
482         err = gk20a_init_fifo_setup_sw(g, reinit);
483         if (err)
484                 return err;
485
486         err = gk20a_init_fifo_setup_hw(g);
487         if (err)
488                 return err;
489
490         return err;
491 }
492
493 static void gk20a_fifo_handle_mmu_fault(struct gk20a *g)
494 {
495         u32 fault_id = gk20a_readl(g, fifo_intr_mmu_fault_id_r());
496         u32 fault_info;
497         u32 engine_id;
498
499         for (engine_id = 0;
500              engine_id < fifo_intr_mmu_fault_id_field__size_1_v();
501              engine_id++) {
502                 if ((fault_id & (1 << engine_id)) ==
503                     fifo_intr_mmu_fault_id_field_not_pending_v())
504                         continue;
505
506                 fault_info = gk20a_readl(g,
507                         fifo_intr_mmu_fault_info_r(engine_id));
508
509                 nvhost_err(dev_from_gk20a(g), "mmu fault on engine %d, "
510                         "engine_subid %d, client %d, "
511                         "addr 0x%08x:0x%08x, type %d, info 0x%08x\n",
512                         engine_id,
513                         fifo_intr_mmu_fault_info_engine_subid_v(fault_info),
514                         fifo_intr_mmu_fault_info_client_v(fault_info),
515                         fifo_intr_mmu_fault_hi_r(engine_id),
516                         fifo_intr_mmu_fault_lo_r(engine_id),
517                         fifo_intr_mmu_fault_info_type_v(fault_info),
518                         fault_info);
519
520                 /* don't clear it yet */
521                 /* gk20a_writel(g, fifo_intr_mmu_fault_id_r(), fault_id); */
522         }
523 }
524
525 void gk20a_fifo_isr(struct gk20a *g)
526 {
527         u32 fifo_intr = gk20a_readl(g, fifo_intr_0_r());
528
529         /* handle runlist update */
530         if (fifo_intr & fifo_intr_0_runlist_event_pending_f()) {
531                 gk20a_fifo_handle_runlist_event(g);
532                 fifo_intr &= ~fifo_intr_0_runlist_event_pending_f();
533         }
534
535         /* don't clear this for now
536          * print more info for debugging */
537         if (fifo_intr & fifo_intr_0_sched_error_pending_f()) {
538                 nvhost_err(dev_from_gk20a(g),
539                         "fifo sched error : 0x%08x",
540                         gk20a_readl(g, fifo_intr_sched_error_r()));
541         }
542
543         /* don't clear this for now
544          * print more info for debugging */
545         if (fifo_intr & fifo_intr_0_mmu_fault_pending_f())
546                 gk20a_fifo_handle_mmu_fault(g);
547
548         if (fifo_intr)
549                 nvhost_err(dev_from_gk20a(g),
550                            "unhandled fifo interrupt 0x%08x\n",
551                            fifo_intr);
552 }
553
554 int gk20a_fifo_preempt_channel(struct gk20a *g, u32 runlist_id, u32 hw_chid)
555 {
556         struct fifo_gk20a *f = &g->fifo;
557         struct fifo_runlist_info_gk20a *runlist = &f->runlist_info[runlist_id];
558         u32 timeout = 2000; /* 2 sec */
559         u32 ret = 0;
560
561         mutex_lock(&runlist->mutex);
562
563         /* issue preempt */
564         gk20a_writel(g, fifo_preempt_r(),
565                 fifo_preempt_chid_f(hw_chid) |
566                 fifo_preempt_type_channel_f());
567
568         /* wait for preempt */
569         do {
570                 if (!(gk20a_readl(g, fifo_preempt_r()) &
571                         fifo_preempt_pending_true_f()))
572                         break;
573
574                 if (--timeout == 0) {
575                         nvhost_err(dev_from_gk20a(g),
576                                     "preempt channel %d timeout\n",
577                                     hw_chid);
578                         ret = -EBUSY;
579                 }
580                 mdelay(1);
581         } while (1);
582
583         mutex_unlock(&runlist->mutex);
584
585         return ret;
586 }
587
588 int gk20a_fifo_enable_engine_activity(struct gk20a *g,
589                                 struct fifo_engine_info_gk20a *eng_info)
590 {
591         u32 enable = gk20a_readl(g, fifo_sched_disable_r());
592         enable &= ~(fifo_sched_disable_true_v() >> eng_info->runlist_id);
593         gk20a_writel(g, fifo_sched_disable_r(), enable);
594
595         /* no buffered-mode ? */
596
597         return 0;
598 }
599
600 int gk20a_fifo_disable_engine_activity(struct gk20a *g,
601                                 struct fifo_engine_info_gk20a *eng_info,
602                                 bool wait_for_idle)
603 {
604         u32 gr_stat, pbdma_stat, chan_stat, eng_stat, ctx_stat;
605         u32 pbdma_chid = ~0, engine_chid = ~0, disable;
606         u32 err;
607
608         gr_stat =
609                 gk20a_readl(g, fifo_engine_status_r(eng_info->engine_id));
610         if (fifo_engine_status_engine_v(gr_stat) ==
611             fifo_engine_status_engine_busy_v() && !wait_for_idle)
612                 return -EBUSY;
613
614         disable = gk20a_readl(g, fifo_sched_disable_r());
615         disable = set_field(disable,
616                         fifo_sched_disable_runlist_m(eng_info->runlist_id),
617                         fifo_sched_disable_runlist_f(fifo_sched_disable_true_v(),
618                                 eng_info->runlist_id));
619         gk20a_writel(g, fifo_sched_disable_r(), disable);
620
621         /* no buffered-mode ? */
622
623         /* chid from pbdma status */
624         pbdma_stat = gk20a_readl(g, fifo_pbdma_status_r(eng_info->pbdma_id));
625         chan_stat  = fifo_pbdma_status_chan_status_v(pbdma_stat);
626         if (chan_stat == fifo_pbdma_status_chan_status_valid_v() ||
627             chan_stat == fifo_pbdma_status_chan_status_chsw_save_v())
628                 pbdma_chid = fifo_pbdma_status_id_v(pbdma_stat);
629         else if (chan_stat == fifo_pbdma_status_chan_status_chsw_load_v() ||
630                  chan_stat == fifo_pbdma_status_chan_status_chsw_switch_v())
631                 pbdma_chid = fifo_pbdma_status_next_id_v(pbdma_stat);
632
633         if (pbdma_chid != ~0) {
634                 err = gk20a_fifo_preempt_channel(g,
635                                 eng_info->runlist_id, pbdma_chid);
636                 if (err)
637                         goto clean_up;
638         }
639
640         /* chid from engine status */
641         eng_stat = gk20a_readl(g, fifo_engine_status_r(eng_info->engine_id));
642         ctx_stat  = fifo_engine_status_ctx_status_v(eng_stat);
643         if (ctx_stat == fifo_engine_status_ctx_status_valid_v() ||
644             ctx_stat == fifo_engine_status_ctx_status_ctxsw_save_v())
645                 engine_chid = fifo_engine_status_id_v(eng_stat);
646         else if (ctx_stat == fifo_engine_status_ctx_status_ctxsw_load_v() ||
647                  ctx_stat == fifo_engine_status_ctx_status_ctxsw_switch_v())
648                 engine_chid = fifo_engine_status_next_id_v(eng_stat);
649
650         if (engine_chid != ~0 && engine_chid != pbdma_chid) {
651                 err = gk20a_fifo_preempt_channel(g,
652                                 eng_info->runlist_id, engine_chid);
653                 if (err)
654                         goto clean_up;
655         }
656
657         return 0;
658
659 clean_up:
660         gk20a_fifo_enable_engine_activity(g, eng_info);
661         return err;
662 }