gpu: nvgpu: Fix invalid GPFIFO entries
[linux-3.10.git] / drivers / gpu / nvgpu / gk20a / channel_sync_gk20a.c
1 /*
2  * drivers/video/tegra/host/gk20a/channel_sync_gk20a.c
3  *
4  * GK20A Channel Synchronization Abstraction
5  *
6  * Copyright (c) 2014-2015, NVIDIA CORPORATION.  All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  */
17
18 #include <linux/gk20a.h>
19
20 #include "channel_sync_gk20a.h"
21 #include "gk20a.h"
22 #include "fence_gk20a.h"
23 #include "semaphore_gk20a.h"
24 #include "sync_gk20a.h"
25 #include "mm_gk20a.h"
26
27 #ifdef CONFIG_SYNC
28 #include "../../../staging/android/sync.h"
29 #endif
30
31 #ifdef CONFIG_TEGRA_GK20A
32 #include <linux/nvhost.h>
33 #endif
34
35 #ifdef CONFIG_TEGRA_GK20A
36
37 struct gk20a_channel_syncpt {
38         struct gk20a_channel_sync ops;
39         struct channel_gk20a *c;
40         struct platform_device *host1x_pdev;
41         u32 id;
42 };
43
44 static void add_wait_cmd(u32 *ptr, u32 id, u32 thresh)
45 {
46         /* syncpoint_a */
47         ptr[0] = 0x2001001C;
48         /* payload */
49         ptr[1] = thresh;
50         /* syncpoint_b */
51         ptr[2] = 0x2001001D;
52         /* syncpt_id, switch_en, wait */
53         ptr[3] = (id << 8) | 0x10;
54 }
55
56 static int gk20a_channel_syncpt_wait_syncpt(struct gk20a_channel_sync *s,
57                 u32 id, u32 thresh, struct priv_cmd_entry **entry,
58                 struct gk20a_fence **fence)
59 {
60         struct gk20a_channel_syncpt *sp =
61                 container_of(s, struct gk20a_channel_syncpt, ops);
62         struct priv_cmd_entry *wait_cmd = NULL;
63
64         if (!nvhost_syncpt_is_valid_pt_ext(sp->host1x_pdev, id)) {
65                 dev_warn(dev_from_gk20a(sp->c->g),
66                                 "invalid wait id in gpfifo submit, elided");
67                 return 0;
68         }
69
70         if (nvhost_syncpt_is_expired_ext(sp->host1x_pdev, id, thresh))
71                 return 0;
72
73         gk20a_channel_alloc_priv_cmdbuf(sp->c, 4, &wait_cmd);
74         if (wait_cmd == NULL) {
75                 gk20a_err(dev_from_gk20a(sp->c->g),
76                                 "not enough priv cmd buffer space");
77                 return -EAGAIN;
78         }
79
80         add_wait_cmd(&wait_cmd->ptr[0], id, thresh);
81
82         *entry = wait_cmd;
83         *fence = NULL;
84         return 0;
85 }
86
87 static int gk20a_channel_syncpt_wait_fd(struct gk20a_channel_sync *s, int fd,
88                        struct priv_cmd_entry **entry,
89                        struct gk20a_fence **fence)
90 {
91 #ifdef CONFIG_SYNC
92         int i;
93         int num_wait_cmds;
94         struct sync_pt *pt;
95         struct sync_fence *sync_fence;
96         struct priv_cmd_entry *wait_cmd = NULL;
97         struct gk20a_channel_syncpt *sp =
98                 container_of(s, struct gk20a_channel_syncpt, ops);
99         struct channel_gk20a *c = sp->c;
100
101         sync_fence = nvhost_sync_fdget(fd);
102         if (!sync_fence)
103                 return -EINVAL;
104
105         /* validate syncpt ids */
106         list_for_each_entry(pt, &sync_fence->pt_list_head, pt_list) {
107                 u32 wait_id = nvhost_sync_pt_id(pt);
108                 if (!wait_id || !nvhost_syncpt_is_valid_pt_ext(sp->host1x_pdev,
109                                         wait_id)) {
110                         sync_fence_put(sync_fence);
111                         return -EINVAL;
112                 }
113         }
114
115         num_wait_cmds = nvhost_sync_num_pts(sync_fence);
116         if (num_wait_cmds == 0)
117                 return 0;
118
119         gk20a_channel_alloc_priv_cmdbuf(c, 4 * num_wait_cmds, &wait_cmd);
120         if (wait_cmd == NULL) {
121                 gk20a_err(dev_from_gk20a(c->g),
122                                 "not enough priv cmd buffer space");
123                 sync_fence_put(sync_fence);
124                 return -EAGAIN;
125         }
126
127         i = 0;
128         list_for_each_entry(pt, &sync_fence->pt_list_head, pt_list) {
129                 u32 wait_id = nvhost_sync_pt_id(pt);
130                 u32 wait_value = nvhost_sync_pt_thresh(pt);
131
132                 if (nvhost_syncpt_is_expired_ext(sp->host1x_pdev,
133                                 wait_id, wait_value)) {
134                         wait_cmd->ptr[i * 4 + 0] = 0;
135                         wait_cmd->ptr[i * 4 + 1] = 0;
136                         wait_cmd->ptr[i * 4 + 2] = 0;
137                         wait_cmd->ptr[i * 4 + 3] = 0;
138                 } else
139                         add_wait_cmd(&wait_cmd->ptr[i * 4], wait_id,
140                                         wait_value);
141                 i++;
142         }
143         WARN_ON(i != num_wait_cmds);
144         sync_fence_put(sync_fence);
145
146         *entry = wait_cmd;
147         *fence = NULL;
148         return 0;
149 #else
150         return -ENODEV;
151 #endif
152 }
153
154 static void gk20a_channel_syncpt_update(void *priv, int nr_completed)
155 {
156         struct channel_gk20a *ch20a = priv;
157         gk20a_channel_update(ch20a, nr_completed);
158 }
159
160 static int __gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
161                                        bool wfi_cmd,
162                                        bool register_irq,
163                                        struct priv_cmd_entry **entry,
164                                        struct gk20a_fence **fence)
165 {
166         u32 thresh;
167         int incr_cmd_size;
168         int j = 0;
169         int err;
170         struct priv_cmd_entry *incr_cmd = NULL;
171         struct gk20a_channel_syncpt *sp =
172                 container_of(s, struct gk20a_channel_syncpt, ops);
173         struct channel_gk20a *c = sp->c;
174
175         incr_cmd_size = 6;
176         if (wfi_cmd)
177                 incr_cmd_size += 2;
178
179         gk20a_channel_alloc_priv_cmdbuf(c, incr_cmd_size, &incr_cmd);
180         if (incr_cmd == NULL) {
181                 gk20a_err(dev_from_gk20a(c->g),
182                                 "not enough priv cmd buffer space");
183                 return -EAGAIN;
184         }
185
186         /* WAR for hw bug 1491360: syncpt needs to be incremented twice */
187
188         if (wfi_cmd) {
189                 /* wfi */
190                 incr_cmd->ptr[j++] = 0x2001001E;
191                 /* handle, ignored */
192                 incr_cmd->ptr[j++] = 0x00000000;
193         }
194         /* syncpoint_a */
195         incr_cmd->ptr[j++] = 0x2001001C;
196         /* payload, ignored */
197         incr_cmd->ptr[j++] = 0;
198         /* syncpoint_b */
199         incr_cmd->ptr[j++] = 0x2001001D;
200         /* syncpt_id, incr */
201         incr_cmd->ptr[j++] = (sp->id << 8) | 0x1;
202         /* syncpoint_b */
203         incr_cmd->ptr[j++] = 0x2001001D;
204         /* syncpt_id, incr */
205         incr_cmd->ptr[j++] = (sp->id << 8) | 0x1;
206         WARN_ON(j != incr_cmd_size);
207
208         thresh = nvhost_syncpt_incr_max_ext(sp->host1x_pdev, sp->id, 2);
209
210         if (register_irq) {
211                 err = nvhost_intr_register_notifier(sp->host1x_pdev,
212                                 sp->id, thresh,
213                                 gk20a_channel_syncpt_update, c);
214
215                 /* Adding interrupt action should never fail. A proper error
216                  * handling here would require us to decrement the syncpt max
217                  * back to its original value. */
218                 WARN(err, "failed to set submit complete interrupt");
219         }
220
221         *fence = gk20a_fence_from_syncpt(sp->host1x_pdev, sp->id, thresh,
222                                          wfi_cmd);
223         *entry = incr_cmd;
224         return 0;
225 }
226
227 static int gk20a_channel_syncpt_incr_wfi(struct gk20a_channel_sync *s,
228                                   struct priv_cmd_entry **entry,
229                                   struct gk20a_fence **fence)
230 {
231         return __gk20a_channel_syncpt_incr(s,
232                         true /* wfi */,
233                         false /* no irq handler */,
234                         entry, fence);
235 }
236
237 static int gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
238                               struct priv_cmd_entry **entry,
239                               struct gk20a_fence **fence)
240 {
241         /* Don't put wfi cmd to this one since we're not returning
242          * a fence to user space. */
243         return __gk20a_channel_syncpt_incr(s,
244                         false /* no wfi */,
245                         true /* register irq */,
246                         entry, fence);
247 }
248
249 static int gk20a_channel_syncpt_incr_user(struct gk20a_channel_sync *s,
250                                    int wait_fence_fd,
251                                    struct priv_cmd_entry **entry,
252                                    struct gk20a_fence **fence,
253                                    bool wfi)
254 {
255         /* Need to do 'wfi + host incr' since we return the fence
256          * to user space. */
257         return __gk20a_channel_syncpt_incr(s,
258                         wfi,
259                         true /* register irq */,
260                         entry, fence);
261 }
262
263 static void gk20a_channel_syncpt_set_min_eq_max(struct gk20a_channel_sync *s)
264 {
265         struct gk20a_channel_syncpt *sp =
266                 container_of(s, struct gk20a_channel_syncpt, ops);
267         nvhost_syncpt_set_min_eq_max_ext(sp->host1x_pdev, sp->id);
268 }
269
270 static void gk20a_channel_syncpt_signal_timeline(
271                 struct gk20a_channel_sync *s)
272 {
273         /* Nothing to do. */
274 }
275
276 static int gk20a_channel_syncpt_id(struct gk20a_channel_sync *s)
277 {
278         struct gk20a_channel_syncpt *sp =
279                 container_of(s, struct gk20a_channel_syncpt, ops);
280         return sp->id;
281 }
282
283 static void gk20a_channel_syncpt_destroy(struct gk20a_channel_sync *s)
284 {
285         struct gk20a_channel_syncpt *sp =
286                 container_of(s, struct gk20a_channel_syncpt, ops);
287         nvhost_syncpt_set_min_eq_max_ext(sp->host1x_pdev, sp->id);
288         nvhost_syncpt_put_ref_ext(sp->id);
289         kfree(sp);
290 }
291
292 static struct gk20a_channel_sync *
293 gk20a_channel_syncpt_create(struct channel_gk20a *c)
294 {
295         struct gk20a_channel_syncpt *sp;
296
297         sp = kzalloc(sizeof(*sp), GFP_KERNEL);
298         if (!sp)
299                 return NULL;
300
301         sp->c = c;
302         sp->host1x_pdev = c->g->host1x_dev;
303         sp->id = nvhost_get_syncpt_host_managed(c->g->dev, c->hw_chid);
304         nvhost_syncpt_set_min_eq_max_ext(sp->host1x_pdev, sp->id);
305         if (!sp->id) {
306                 kfree(sp);
307                 gk20a_err(&c->g->dev->dev, "failed to get free syncpt");
308                 return NULL;
309         }
310
311         sp->ops.wait_syncpt             = gk20a_channel_syncpt_wait_syncpt;
312         sp->ops.wait_fd                 = gk20a_channel_syncpt_wait_fd;
313         sp->ops.incr                    = gk20a_channel_syncpt_incr;
314         sp->ops.incr_wfi                = gk20a_channel_syncpt_incr_wfi;
315         sp->ops.incr_user               = gk20a_channel_syncpt_incr_user;
316         sp->ops.set_min_eq_max          = gk20a_channel_syncpt_set_min_eq_max;
317         sp->ops.signal_timeline         = gk20a_channel_syncpt_signal_timeline;
318         sp->ops.syncpt_id               = gk20a_channel_syncpt_id;
319         sp->ops.destroy                 = gk20a_channel_syncpt_destroy;
320
321         sp->ops.aggressive_destroy      = true;
322
323         return &sp->ops;
324 }
325 #endif /* CONFIG_TEGRA_GK20A */
326
327 struct gk20a_channel_semaphore {
328         struct gk20a_channel_sync ops;
329         struct channel_gk20a *c;
330
331         /* A semaphore pool owned by this channel. */
332         struct gk20a_semaphore_pool *pool;
333
334         /* A sync timeline that advances when gpu completes work. */
335         struct sync_timeline *timeline;
336 };
337
338 #ifdef CONFIG_SYNC
339 struct wait_fence_work {
340         struct sync_fence_waiter waiter;
341         struct channel_gk20a *ch;
342         struct gk20a_semaphore *sema;
343 };
344
345 static void gk20a_channel_semaphore_launcher(
346                 struct sync_fence *fence,
347                 struct sync_fence_waiter *waiter)
348 {
349         int err;
350         struct wait_fence_work *w =
351                 container_of(waiter, struct wait_fence_work, waiter);
352         struct gk20a *g = w->ch->g;
353
354         gk20a_dbg_info("waiting for pre fence %p '%s'",
355                         fence, fence->name);
356         err = sync_fence_wait(fence, -1);
357         if (err < 0)
358                 dev_err(&g->dev->dev, "error waiting pre-fence: %d\n", err);
359
360         gk20a_dbg_info(
361                   "wait completed (%d) for fence %p '%s', triggering gpu work",
362                   err, fence, fence->name);
363         sync_fence_put(fence);
364         gk20a_semaphore_release(w->sema);
365         gk20a_semaphore_put(w->sema);
366         kfree(w);
367 }
368 #endif
369
370 static int add_sema_cmd(u32 *ptr, u64 sema, u32 payload,
371                         bool acquire, bool wfi)
372 {
373         int i = 0;
374         /* semaphore_a */
375         ptr[i++] = 0x20010004;
376         /* offset_upper */
377         ptr[i++] = (sema >> 32) & 0xff;
378         /* semaphore_b */
379         ptr[i++] = 0x20010005;
380         /* offset */
381         ptr[i++] = sema & 0xffffffff;
382         /* semaphore_c */
383         ptr[i++] = 0x20010006;
384         /* payload */
385         ptr[i++] = payload;
386         if (acquire) {
387                 /* semaphore_d */
388                 ptr[i++] = 0x20010007;
389                 /* operation: acq_geq, switch_en */
390                 ptr[i++] = 0x4 | (0x1 << 12);
391         } else {
392                 /* semaphore_d */
393                 ptr[i++] = 0x20010007;
394                 /* operation: release, wfi */
395                 ptr[i++] = 0x2 | ((wfi ? 0x0 : 0x1) << 20);
396                 /* non_stall_int */
397                 ptr[i++] = 0x20010008;
398                 /* ignored */
399                 ptr[i++] = 0;
400         }
401         return i;
402 }
403
404 static int gk20a_channel_semaphore_wait_syncpt(
405                 struct gk20a_channel_sync *s, u32 id,
406                 u32 thresh, struct priv_cmd_entry **entry,
407                 struct gk20a_fence **fence)
408 {
409         struct gk20a_channel_semaphore *sema =
410                 container_of(s, struct gk20a_channel_semaphore, ops);
411         struct device *dev = dev_from_gk20a(sema->c->g);
412         gk20a_err(dev, "trying to use syncpoint synchronization");
413         return -ENODEV;
414 }
415
416 static int gk20a_channel_semaphore_wait_fd(
417                 struct gk20a_channel_sync *s, int fd,
418                 struct priv_cmd_entry **entry,
419                 struct gk20a_fence **fence)
420 {
421         struct gk20a_channel_semaphore *sema =
422                 container_of(s, struct gk20a_channel_semaphore, ops);
423         struct channel_gk20a *c = sema->c;
424 #ifdef CONFIG_SYNC
425         struct sync_fence *sync_fence;
426         struct priv_cmd_entry *wait_cmd = NULL;
427         struct wait_fence_work *w;
428         int written;
429         int err;
430         u64 va;
431
432         sync_fence = gk20a_sync_fence_fdget(fd);
433         if (!sync_fence)
434                 return -EINVAL;
435
436         w = kzalloc(sizeof(*w), GFP_KERNEL);
437         if (!w) {
438                 err = -ENOMEM;
439                 goto fail;
440         }
441         sync_fence_waiter_init(&w->waiter, gk20a_channel_semaphore_launcher);
442         w->ch = c;
443         w->sema = gk20a_semaphore_alloc(sema->pool);
444         if (!w->sema) {
445                 gk20a_err(dev_from_gk20a(c->g), "ran out of semaphores");
446                 err = -EAGAIN;
447                 goto fail;
448         }
449
450         /* worker takes one reference */
451         gk20a_semaphore_get(w->sema);
452
453         gk20a_channel_alloc_priv_cmdbuf(c, 8, &wait_cmd);
454         if (wait_cmd == NULL) {
455                 gk20a_err(dev_from_gk20a(c->g),
456                                 "not enough priv cmd buffer space");
457                 err = -EAGAIN;
458                 goto fail;
459         }
460
461         va = gk20a_semaphore_gpu_va(w->sema, c->vm);
462         /* GPU unblocked when when the semaphore value becomes 1. */
463         written = add_sema_cmd(wait_cmd->ptr, va, 1, true, false);
464         WARN_ON(written != wait_cmd->size);
465         sync_fence_wait_async(sync_fence, &w->waiter);
466
467         /* XXX - this fixes an actual bug, we need to hold a ref to this
468            semaphore while the job is in flight. */
469         *fence = gk20a_fence_from_semaphore(sema->timeline, w->sema,
470                                             &c->semaphore_wq,
471                                             NULL, false);
472         *entry = wait_cmd;
473         return 0;
474 fail:
475         if (w && w->sema)
476                 gk20a_semaphore_put(w->sema);
477         kfree(w);
478         sync_fence_put(sync_fence);
479         return err;
480 #else
481         gk20a_err(dev_from_gk20a(c->g),
482                   "trying to use sync fds with CONFIG_SYNC disabled");
483         return -ENODEV;
484 #endif
485 }
486
487 static int __gk20a_channel_semaphore_incr(
488                 struct gk20a_channel_sync *s, bool wfi_cmd,
489                 struct sync_fence *dependency,
490                 struct priv_cmd_entry **entry,
491                 struct gk20a_fence **fence)
492 {
493         u64 va;
494         int incr_cmd_size;
495         int written;
496         struct priv_cmd_entry *incr_cmd = NULL;
497         struct gk20a_channel_semaphore *sp =
498                 container_of(s, struct gk20a_channel_semaphore, ops);
499         struct channel_gk20a *c = sp->c;
500         struct gk20a_semaphore *semaphore;
501
502         semaphore = gk20a_semaphore_alloc(sp->pool);
503         if (!semaphore) {
504                 gk20a_err(dev_from_gk20a(c->g),
505                                 "ran out of semaphores");
506                 return -EAGAIN;
507         }
508
509         incr_cmd_size = 10;
510         gk20a_channel_alloc_priv_cmdbuf(c, incr_cmd_size, &incr_cmd);
511         if (incr_cmd == NULL) {
512                 gk20a_err(dev_from_gk20a(c->g),
513                                 "not enough priv cmd buffer space");
514                 gk20a_semaphore_put(semaphore);
515                 return -EAGAIN;
516         }
517
518         /* Release the completion semaphore. */
519         va = gk20a_semaphore_gpu_va(semaphore, c->vm);
520         written = add_sema_cmd(incr_cmd->ptr, va, 1, false, wfi_cmd);
521         WARN_ON(written != incr_cmd_size);
522
523         *fence = gk20a_fence_from_semaphore(sp->timeline, semaphore,
524                                             &c->semaphore_wq,
525                                             dependency, wfi_cmd);
526         *entry = incr_cmd;
527         return 0;
528 }
529
530 static int gk20a_channel_semaphore_incr_wfi(
531                 struct gk20a_channel_sync *s,
532                 struct priv_cmd_entry **entry,
533                 struct gk20a_fence **fence)
534 {
535         return __gk20a_channel_semaphore_incr(s,
536                         true /* wfi */,
537                         NULL,
538                         entry, fence);
539 }
540
541 static int gk20a_channel_semaphore_incr(
542                 struct gk20a_channel_sync *s,
543                 struct priv_cmd_entry **entry,
544                 struct gk20a_fence **fence)
545 {
546         /* Don't put wfi cmd to this one since we're not returning
547          * a fence to user space. */
548         return __gk20a_channel_semaphore_incr(s, false /* no wfi */,
549                                               NULL, entry, fence);
550 }
551
552 static int gk20a_channel_semaphore_incr_user(
553                 struct gk20a_channel_sync *s,
554                 int wait_fence_fd,
555                 struct priv_cmd_entry **entry,
556                 struct gk20a_fence **fence,
557                 bool wfi)
558 {
559 #ifdef CONFIG_SYNC
560         struct sync_fence *dependency = NULL;
561         int err;
562
563         if (wait_fence_fd >= 0) {
564                 dependency = gk20a_sync_fence_fdget(wait_fence_fd);
565                 if (!dependency)
566                         return -EINVAL;
567         }
568
569         err = __gk20a_channel_semaphore_incr(s, wfi, dependency,
570                                              entry, fence);
571         if (err) {
572                 if (dependency)
573                         sync_fence_put(dependency);
574                 return err;
575         }
576
577         return 0;
578 #else
579         struct gk20a_channel_semaphore *sema =
580                 container_of(s, struct gk20a_channel_semaphore, ops);
581         gk20a_err(dev_from_gk20a(sema->c->g),
582                   "trying to use sync fds with CONFIG_SYNC disabled");
583         return -ENODEV;
584 #endif
585 }
586
587 static void gk20a_channel_semaphore_set_min_eq_max(struct gk20a_channel_sync *s)
588 {
589         /* Nothing to do. */
590 }
591
592 static void gk20a_channel_semaphore_signal_timeline(
593                 struct gk20a_channel_sync *s)
594 {
595         struct gk20a_channel_semaphore *sp =
596                 container_of(s, struct gk20a_channel_semaphore, ops);
597         gk20a_sync_timeline_signal(sp->timeline);
598 }
599
600 static int gk20a_channel_semaphore_syncpt_id(struct gk20a_channel_sync *s)
601 {
602         return -EINVAL;
603 }
604
605 static void gk20a_channel_semaphore_destroy(struct gk20a_channel_sync *s)
606 {
607         struct gk20a_channel_semaphore *sema =
608                 container_of(s, struct gk20a_channel_semaphore, ops);
609         if (sema->timeline)
610                 gk20a_sync_timeline_destroy(sema->timeline);
611         if (sema->pool) {
612                 gk20a_semaphore_pool_unmap(sema->pool, sema->c->vm);
613                 gk20a_semaphore_pool_put(sema->pool);
614         }
615         kfree(sema);
616 }
617
618 static struct gk20a_channel_sync *
619 gk20a_channel_semaphore_create(struct channel_gk20a *c)
620 {
621         int err;
622         int asid = -1;
623         struct gk20a_channel_semaphore *sema;
624         char pool_name[20];
625
626         if (WARN_ON(!c->vm))
627                 return NULL;
628
629         sema = kzalloc(sizeof(*sema), GFP_KERNEL);
630         if (!sema)
631                 return NULL;
632         sema->c = c;
633
634         if (c->vm->as_share)
635                 asid = c->vm->as_share->id;
636
637         /* A pool of 256 semaphores fits into one 4k page. */
638         sprintf(pool_name, "semaphore_pool-%d", c->hw_chid);
639         sema->pool = gk20a_semaphore_pool_alloc(dev_from_gk20a(c->g),
640                                                 pool_name, 256);
641         if (!sema->pool)
642                 goto clean_up;
643
644         /* Map the semaphore pool to the channel vm. Map as read-write to the
645          * owner channel (all other channels should map as read only!). */
646         err = gk20a_semaphore_pool_map(sema->pool, c->vm, gk20a_mem_flag_none);
647         if (err)
648                 goto clean_up;
649
650 #ifdef CONFIG_SYNC
651         sema->timeline = gk20a_sync_timeline_create(
652                         "gk20a_ch%d_as%d", c->hw_chid, asid);
653         if (!sema->timeline)
654                 goto clean_up;
655 #endif
656         sema->ops.wait_syncpt   = gk20a_channel_semaphore_wait_syncpt;
657         sema->ops.wait_fd       = gk20a_channel_semaphore_wait_fd;
658         sema->ops.incr          = gk20a_channel_semaphore_incr;
659         sema->ops.incr_wfi      = gk20a_channel_semaphore_incr_wfi;
660         sema->ops.incr_user     = gk20a_channel_semaphore_incr_user;
661         sema->ops.set_min_eq_max = gk20a_channel_semaphore_set_min_eq_max;
662         sema->ops.signal_timeline = gk20a_channel_semaphore_signal_timeline;
663         sema->ops.syncpt_id     = gk20a_channel_semaphore_syncpt_id;
664         sema->ops.destroy       = gk20a_channel_semaphore_destroy;
665
666         /* Aggressively destroying the semaphore sync would cause overhead
667          * since the pool needs to be mapped to GMMU. */
668         sema->ops.aggressive_destroy = false;
669
670         return &sema->ops;
671 clean_up:
672         gk20a_channel_semaphore_destroy(&sema->ops);
673         return NULL;
674 }
675
676 struct gk20a_channel_sync *gk20a_channel_sync_create(struct channel_gk20a *c)
677 {
678 #ifdef CONFIG_TEGRA_GK20A
679         if (gk20a_platform_has_syncpoints(c->g->dev))
680                 return gk20a_channel_syncpt_create(c);
681 #endif
682         return gk20a_channel_semaphore_create(c);
683 }