b0430bd8081c077ee3e5493d5889f61b4ddab928
[linux-3.10.git] / drivers / gpu / nvgpu / gk20a / channel_sync_gk20a.c
1 /*
2  * drivers/video/tegra/host/gk20a/channel_sync_gk20a.c
3  *
4  * GK20A Channel Synchronization Abstraction
5  *
6  * Copyright (c) 2014-2015, NVIDIA CORPORATION.  All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  */
17
18 #include <linux/gk20a.h>
19
20 #include "channel_sync_gk20a.h"
21 #include "gk20a.h"
22 #include "fence_gk20a.h"
23 #include "semaphore_gk20a.h"
24 #include "sync_gk20a.h"
25 #include "mm_gk20a.h"
26
27 #ifdef CONFIG_SYNC
28 #include "../../../staging/android/sync.h"
29 #endif
30
31 #ifdef CONFIG_TEGRA_GK20A
32 #include <linux/nvhost.h>
33 #endif
34
35 #ifdef CONFIG_TEGRA_GK20A
36
37 struct gk20a_channel_syncpt {
38         struct gk20a_channel_sync ops;
39         struct channel_gk20a *c;
40         struct platform_device *host1x_pdev;
41         u32 id;
42 };
43
44 static void add_wait_cmd(u32 *ptr, u32 id, u32 thresh)
45 {
46         /* syncpoint_a */
47         ptr[0] = 0x2001001C;
48         /* payload */
49         ptr[1] = thresh;
50         /* syncpoint_b */
51         ptr[2] = 0x2001001D;
52         /* syncpt_id, switch_en, wait */
53         ptr[3] = (id << 8) | 0x10;
54 }
55
56 static int gk20a_channel_syncpt_wait_syncpt(struct gk20a_channel_sync *s,
57                 u32 id, u32 thresh, struct priv_cmd_entry **entry,
58                 struct gk20a_fence **fence)
59 {
60         struct gk20a_channel_syncpt *sp =
61                 container_of(s, struct gk20a_channel_syncpt, ops);
62         struct priv_cmd_entry *wait_cmd = NULL;
63
64         if (!nvhost_syncpt_is_valid_pt_ext(sp->host1x_pdev, id)) {
65                 dev_warn(dev_from_gk20a(sp->c->g),
66                                 "invalid wait id in gpfifo submit, elided");
67                 return 0;
68         }
69
70         if (nvhost_syncpt_is_expired_ext(sp->host1x_pdev, id, thresh))
71                 return 0;
72
73         gk20a_channel_alloc_priv_cmdbuf(sp->c, 4, &wait_cmd);
74         if (wait_cmd == NULL) {
75                 gk20a_err(dev_from_gk20a(sp->c->g),
76                                 "not enough priv cmd buffer space");
77                 return -EAGAIN;
78         }
79
80         add_wait_cmd(&wait_cmd->ptr[0], id, thresh);
81
82         *entry = wait_cmd;
83         *fence = NULL;
84         return 0;
85 }
86
87 static int gk20a_channel_syncpt_wait_fd(struct gk20a_channel_sync *s, int fd,
88                        struct priv_cmd_entry **entry,
89                        struct gk20a_fence **fence)
90 {
91 #ifdef CONFIG_SYNC
92         int i;
93         int num_wait_cmds;
94         struct sync_pt *pt;
95         struct sync_fence *sync_fence;
96         struct priv_cmd_entry *wait_cmd = NULL;
97         struct gk20a_channel_syncpt *sp =
98                 container_of(s, struct gk20a_channel_syncpt, ops);
99         struct channel_gk20a *c = sp->c;
100
101         sync_fence = nvhost_sync_fdget(fd);
102         if (!sync_fence)
103                 return -EINVAL;
104
105         /* validate syncpt ids */
106         list_for_each_entry(pt, &sync_fence->pt_list_head, pt_list) {
107                 u32 wait_id = nvhost_sync_pt_id(pt);
108                 if (!wait_id || !nvhost_syncpt_is_valid_pt_ext(sp->host1x_pdev,
109                                         wait_id)) {
110                         sync_fence_put(sync_fence);
111                         return -EINVAL;
112                 }
113         }
114
115         num_wait_cmds = nvhost_sync_num_pts(sync_fence);
116         if (num_wait_cmds == 0)
117                 return 0;
118
119         gk20a_channel_alloc_priv_cmdbuf(c, 4 * num_wait_cmds, &wait_cmd);
120         if (wait_cmd == NULL) {
121                 gk20a_err(dev_from_gk20a(c->g),
122                                 "not enough priv cmd buffer space");
123                 sync_fence_put(sync_fence);
124                 return -EAGAIN;
125         }
126
127         i = 0;
128         list_for_each_entry(pt, &sync_fence->pt_list_head, pt_list) {
129                 u32 wait_id = nvhost_sync_pt_id(pt);
130                 u32 wait_value = nvhost_sync_pt_thresh(pt);
131
132                 if (nvhost_syncpt_is_expired_ext(sp->host1x_pdev,
133                                 wait_id, wait_value)) {
134                         wait_cmd->ptr[i * 4 + 0] = 0;
135                         wait_cmd->ptr[i * 4 + 1] = 0;
136                         wait_cmd->ptr[i * 4 + 2] = 0;
137                         wait_cmd->ptr[i * 4 + 3] = 0;
138                 } else
139                         add_wait_cmd(&wait_cmd->ptr[i * 4], wait_id,
140                                         wait_value);
141                 i++;
142         }
143         WARN_ON(i != num_wait_cmds);
144         sync_fence_put(sync_fence);
145
146         *entry = wait_cmd;
147         *fence = NULL;
148         return 0;
149 #else
150         return -ENODEV;
151 #endif
152 }
153
154 static void gk20a_channel_syncpt_update(void *priv, int nr_completed)
155 {
156         struct channel_gk20a *ch20a = priv;
157         gk20a_channel_update(ch20a, nr_completed);
158 }
159
160 static int __gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
161                                        bool wfi_cmd,
162                                        bool register_irq,
163                                        struct priv_cmd_entry **entry,
164                                        struct gk20a_fence **fence)
165 {
166         u32 thresh;
167         int incr_cmd_size;
168         int j = 0;
169         int err;
170         struct priv_cmd_entry *incr_cmd = NULL;
171         struct gk20a_channel_syncpt *sp =
172                 container_of(s, struct gk20a_channel_syncpt, ops);
173         struct channel_gk20a *c = sp->c;
174
175         incr_cmd_size = 6;
176         if (wfi_cmd)
177                 incr_cmd_size += 2;
178
179         gk20a_channel_alloc_priv_cmdbuf(c, incr_cmd_size, &incr_cmd);
180         if (incr_cmd == NULL) {
181                 gk20a_err(dev_from_gk20a(c->g),
182                                 "not enough priv cmd buffer space");
183                 return -EAGAIN;
184         }
185
186         /* WAR for hw bug 1491360: syncpt needs to be incremented twice */
187
188         if (wfi_cmd) {
189                 /* wfi */
190                 incr_cmd->ptr[j++] = 0x2001001E;
191                 /* handle, ignored */
192                 incr_cmd->ptr[j++] = 0x00000000;
193         }
194         /* syncpoint_a */
195         incr_cmd->ptr[j++] = 0x2001001C;
196         /* payload, ignored */
197         incr_cmd->ptr[j++] = 0;
198         /* syncpoint_b */
199         incr_cmd->ptr[j++] = 0x2001001D;
200         /* syncpt_id, incr */
201         incr_cmd->ptr[j++] = (sp->id << 8) | 0x1;
202         /* syncpoint_b */
203         incr_cmd->ptr[j++] = 0x2001001D;
204         /* syncpt_id, incr */
205         incr_cmd->ptr[j++] = (sp->id << 8) | 0x1;
206         WARN_ON(j != incr_cmd_size);
207
208         thresh = nvhost_syncpt_incr_max_ext(sp->host1x_pdev, sp->id, 2);
209
210         if (register_irq) {
211                 err = nvhost_intr_register_notifier(sp->host1x_pdev,
212                                 sp->id, thresh,
213                                 gk20a_channel_syncpt_update, c);
214
215                 /* Adding interrupt action should never fail. A proper error
216                  * handling here would require us to decrement the syncpt max
217                  * back to its original value. */
218                 WARN(err, "failed to set submit complete interrupt");
219         }
220
221         *fence = gk20a_fence_from_syncpt(sp->host1x_pdev, sp->id, thresh,
222                                          wfi_cmd);
223         *entry = incr_cmd;
224         return 0;
225 }
226
227 static int gk20a_channel_syncpt_incr_wfi(struct gk20a_channel_sync *s,
228                                   struct priv_cmd_entry **entry,
229                                   struct gk20a_fence **fence)
230 {
231         return __gk20a_channel_syncpt_incr(s,
232                         true /* wfi */,
233                         false /* no irq handler */,
234                         entry, fence);
235 }
236
237 static int gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
238                               struct priv_cmd_entry **entry,
239                               struct gk20a_fence **fence)
240 {
241         /* Don't put wfi cmd to this one since we're not returning
242          * a fence to user space. */
243         return __gk20a_channel_syncpt_incr(s,
244                         false /* no wfi */,
245                         true /* register irq */,
246                         entry, fence);
247 }
248
249 static int gk20a_channel_syncpt_incr_user(struct gk20a_channel_sync *s,
250                                    int wait_fence_fd,
251                                    struct priv_cmd_entry **entry,
252                                    struct gk20a_fence **fence,
253                                    bool wfi)
254 {
255         /* Need to do 'wfi + host incr' since we return the fence
256          * to user space. */
257         return __gk20a_channel_syncpt_incr(s,
258                         wfi,
259                         true /* register irq */,
260                         entry, fence);
261 }
262
263 static void gk20a_channel_syncpt_set_min_eq_max(struct gk20a_channel_sync *s)
264 {
265         struct gk20a_channel_syncpt *sp =
266                 container_of(s, struct gk20a_channel_syncpt, ops);
267         nvhost_syncpt_set_min_eq_max_ext(sp->host1x_pdev, sp->id);
268 }
269
270 static void gk20a_channel_syncpt_signal_timeline(
271                 struct gk20a_channel_sync *s)
272 {
273         /* Nothing to do. */
274 }
275
276 static int gk20a_channel_syncpt_id(struct gk20a_channel_sync *s)
277 {
278         struct gk20a_channel_syncpt *sp =
279                 container_of(s, struct gk20a_channel_syncpt, ops);
280         return sp->id;
281 }
282
283 static void gk20a_channel_syncpt_destroy(struct gk20a_channel_sync *s)
284 {
285         struct gk20a_channel_syncpt *sp =
286                 container_of(s, struct gk20a_channel_syncpt, ops);
287         nvhost_syncpt_set_min_eq_max_ext(sp->host1x_pdev, sp->id);
288         nvhost_syncpt_put_ref_ext(sp->host1x_pdev, sp->id);
289         kfree(sp);
290 }
291
292 static struct gk20a_channel_sync *
293 gk20a_channel_syncpt_create(struct channel_gk20a *c)
294 {
295         struct gk20a_channel_syncpt *sp;
296         char syncpt_name[16];
297
298         sp = kzalloc(sizeof(*sp), GFP_KERNEL);
299         if (!sp)
300                 return NULL;
301
302         sp->c = c;
303         sp->host1x_pdev = c->g->host1x_dev;
304
305         sprintf(syncpt_name, "%s_%d", dev_name(&c->g->dev->dev), c->hw_chid);
306
307         sp->id = nvhost_get_syncpt_host_managed(sp->host1x_pdev,
308                                                 c->hw_chid, syncpt_name);
309         if (!sp->id) {
310                 kfree(sp);
311                 gk20a_err(&c->g->dev->dev, "failed to get free syncpt");
312                 return NULL;
313         }
314
315         nvhost_syncpt_set_min_eq_max_ext(sp->host1x_pdev, sp->id);
316
317         sp->ops.wait_syncpt             = gk20a_channel_syncpt_wait_syncpt;
318         sp->ops.wait_fd                 = gk20a_channel_syncpt_wait_fd;
319         sp->ops.incr                    = gk20a_channel_syncpt_incr;
320         sp->ops.incr_wfi                = gk20a_channel_syncpt_incr_wfi;
321         sp->ops.incr_user               = gk20a_channel_syncpt_incr_user;
322         sp->ops.set_min_eq_max          = gk20a_channel_syncpt_set_min_eq_max;
323         sp->ops.signal_timeline         = gk20a_channel_syncpt_signal_timeline;
324         sp->ops.syncpt_id               = gk20a_channel_syncpt_id;
325         sp->ops.destroy                 = gk20a_channel_syncpt_destroy;
326
327         sp->ops.aggressive_destroy      = true;
328
329         return &sp->ops;
330 }
331 #endif /* CONFIG_TEGRA_GK20A */
332
333 struct gk20a_channel_semaphore {
334         struct gk20a_channel_sync ops;
335         struct channel_gk20a *c;
336
337         /* A semaphore pool owned by this channel. */
338         struct gk20a_semaphore_pool *pool;
339
340         /* A sync timeline that advances when gpu completes work. */
341         struct sync_timeline *timeline;
342 };
343
344 #ifdef CONFIG_SYNC
345 struct wait_fence_work {
346         struct sync_fence_waiter waiter;
347         struct channel_gk20a *ch;
348         struct gk20a_semaphore *sema;
349 };
350
351 static void gk20a_channel_semaphore_launcher(
352                 struct sync_fence *fence,
353                 struct sync_fence_waiter *waiter)
354 {
355         int err;
356         struct wait_fence_work *w =
357                 container_of(waiter, struct wait_fence_work, waiter);
358         struct gk20a *g = w->ch->g;
359
360         gk20a_dbg_info("waiting for pre fence %p '%s'",
361                         fence, fence->name);
362         err = sync_fence_wait(fence, -1);
363         if (err < 0)
364                 dev_err(&g->dev->dev, "error waiting pre-fence: %d\n", err);
365
366         gk20a_dbg_info(
367                   "wait completed (%d) for fence %p '%s', triggering gpu work",
368                   err, fence, fence->name);
369         sync_fence_put(fence);
370         gk20a_semaphore_release(w->sema);
371         gk20a_semaphore_put(w->sema);
372         kfree(w);
373 }
374 #endif
375
376 static int add_sema_cmd(u32 *ptr, u64 sema, u32 payload,
377                         bool acquire, bool wfi)
378 {
379         int i = 0;
380         /* semaphore_a */
381         ptr[i++] = 0x20010004;
382         /* offset_upper */
383         ptr[i++] = (sema >> 32) & 0xff;
384         /* semaphore_b */
385         ptr[i++] = 0x20010005;
386         /* offset */
387         ptr[i++] = sema & 0xffffffff;
388         /* semaphore_c */
389         ptr[i++] = 0x20010006;
390         /* payload */
391         ptr[i++] = payload;
392         if (acquire) {
393                 /* semaphore_d */
394                 ptr[i++] = 0x20010007;
395                 /* operation: acq_geq, switch_en */
396                 ptr[i++] = 0x4 | (0x1 << 12);
397         } else {
398                 /* semaphore_d */
399                 ptr[i++] = 0x20010007;
400                 /* operation: release, wfi */
401                 ptr[i++] = 0x2 | ((wfi ? 0x0 : 0x1) << 20);
402                 /* non_stall_int */
403                 ptr[i++] = 0x20010008;
404                 /* ignored */
405                 ptr[i++] = 0;
406         }
407         return i;
408 }
409
410 static int gk20a_channel_semaphore_wait_syncpt(
411                 struct gk20a_channel_sync *s, u32 id,
412                 u32 thresh, struct priv_cmd_entry **entry,
413                 struct gk20a_fence **fence)
414 {
415         struct gk20a_channel_semaphore *sema =
416                 container_of(s, struct gk20a_channel_semaphore, ops);
417         struct device *dev = dev_from_gk20a(sema->c->g);
418         gk20a_err(dev, "trying to use syncpoint synchronization");
419         return -ENODEV;
420 }
421
422 static int gk20a_channel_semaphore_wait_fd(
423                 struct gk20a_channel_sync *s, int fd,
424                 struct priv_cmd_entry **entry,
425                 struct gk20a_fence **fence)
426 {
427         struct gk20a_channel_semaphore *sema =
428                 container_of(s, struct gk20a_channel_semaphore, ops);
429         struct channel_gk20a *c = sema->c;
430 #ifdef CONFIG_SYNC
431         struct sync_fence *sync_fence;
432         struct priv_cmd_entry *wait_cmd = NULL;
433         struct wait_fence_work *w;
434         int written;
435         int err;
436         u64 va;
437
438         sync_fence = gk20a_sync_fence_fdget(fd);
439         if (!sync_fence)
440                 return -EINVAL;
441
442         w = kzalloc(sizeof(*w), GFP_KERNEL);
443         if (!w) {
444                 err = -ENOMEM;
445                 goto fail;
446         }
447         sync_fence_waiter_init(&w->waiter, gk20a_channel_semaphore_launcher);
448         w->ch = c;
449         w->sema = gk20a_semaphore_alloc(sema->pool);
450         if (!w->sema) {
451                 gk20a_err(dev_from_gk20a(c->g), "ran out of semaphores");
452                 err = -EAGAIN;
453                 goto fail;
454         }
455
456         /* worker takes one reference */
457         gk20a_semaphore_get(w->sema);
458
459         gk20a_channel_alloc_priv_cmdbuf(c, 8, &wait_cmd);
460         if (wait_cmd == NULL) {
461                 gk20a_err(dev_from_gk20a(c->g),
462                                 "not enough priv cmd buffer space");
463                 err = -EAGAIN;
464                 goto fail;
465         }
466
467         va = gk20a_semaphore_gpu_va(w->sema, c->vm);
468         /* GPU unblocked when when the semaphore value becomes 1. */
469         written = add_sema_cmd(wait_cmd->ptr, va, 1, true, false);
470         WARN_ON(written != wait_cmd->size);
471         sync_fence_wait_async(sync_fence, &w->waiter);
472
473         /* XXX - this fixes an actual bug, we need to hold a ref to this
474            semaphore while the job is in flight. */
475         *fence = gk20a_fence_from_semaphore(sema->timeline, w->sema,
476                                             &c->semaphore_wq,
477                                             NULL, false);
478         *entry = wait_cmd;
479         return 0;
480 fail:
481         if (w && w->sema)
482                 gk20a_semaphore_put(w->sema);
483         kfree(w);
484         sync_fence_put(sync_fence);
485         return err;
486 #else
487         gk20a_err(dev_from_gk20a(c->g),
488                   "trying to use sync fds with CONFIG_SYNC disabled");
489         return -ENODEV;
490 #endif
491 }
492
493 static int __gk20a_channel_semaphore_incr(
494                 struct gk20a_channel_sync *s, bool wfi_cmd,
495                 struct sync_fence *dependency,
496                 struct priv_cmd_entry **entry,
497                 struct gk20a_fence **fence)
498 {
499         u64 va;
500         int incr_cmd_size;
501         int written;
502         struct priv_cmd_entry *incr_cmd = NULL;
503         struct gk20a_channel_semaphore *sp =
504                 container_of(s, struct gk20a_channel_semaphore, ops);
505         struct channel_gk20a *c = sp->c;
506         struct gk20a_semaphore *semaphore;
507
508         semaphore = gk20a_semaphore_alloc(sp->pool);
509         if (!semaphore) {
510                 gk20a_err(dev_from_gk20a(c->g),
511                                 "ran out of semaphores");
512                 return -EAGAIN;
513         }
514
515         incr_cmd_size = 10;
516         gk20a_channel_alloc_priv_cmdbuf(c, incr_cmd_size, &incr_cmd);
517         if (incr_cmd == NULL) {
518                 gk20a_err(dev_from_gk20a(c->g),
519                                 "not enough priv cmd buffer space");
520                 gk20a_semaphore_put(semaphore);
521                 return -EAGAIN;
522         }
523
524         /* Release the completion semaphore. */
525         va = gk20a_semaphore_gpu_va(semaphore, c->vm);
526         written = add_sema_cmd(incr_cmd->ptr, va, 1, false, wfi_cmd);
527         WARN_ON(written != incr_cmd_size);
528
529         *fence = gk20a_fence_from_semaphore(sp->timeline, semaphore,
530                                             &c->semaphore_wq,
531                                             dependency, wfi_cmd);
532         *entry = incr_cmd;
533         return 0;
534 }
535
536 static int gk20a_channel_semaphore_incr_wfi(
537                 struct gk20a_channel_sync *s,
538                 struct priv_cmd_entry **entry,
539                 struct gk20a_fence **fence)
540 {
541         return __gk20a_channel_semaphore_incr(s,
542                         true /* wfi */,
543                         NULL,
544                         entry, fence);
545 }
546
547 static int gk20a_channel_semaphore_incr(
548                 struct gk20a_channel_sync *s,
549                 struct priv_cmd_entry **entry,
550                 struct gk20a_fence **fence)
551 {
552         /* Don't put wfi cmd to this one since we're not returning
553          * a fence to user space. */
554         return __gk20a_channel_semaphore_incr(s, false /* no wfi */,
555                                               NULL, entry, fence);
556 }
557
558 static int gk20a_channel_semaphore_incr_user(
559                 struct gk20a_channel_sync *s,
560                 int wait_fence_fd,
561                 struct priv_cmd_entry **entry,
562                 struct gk20a_fence **fence,
563                 bool wfi)
564 {
565 #ifdef CONFIG_SYNC
566         struct sync_fence *dependency = NULL;
567         int err;
568
569         if (wait_fence_fd >= 0) {
570                 dependency = gk20a_sync_fence_fdget(wait_fence_fd);
571                 if (!dependency)
572                         return -EINVAL;
573         }
574
575         err = __gk20a_channel_semaphore_incr(s, wfi, dependency,
576                                              entry, fence);
577         if (err) {
578                 if (dependency)
579                         sync_fence_put(dependency);
580                 return err;
581         }
582
583         return 0;
584 #else
585         struct gk20a_channel_semaphore *sema =
586                 container_of(s, struct gk20a_channel_semaphore, ops);
587         gk20a_err(dev_from_gk20a(sema->c->g),
588                   "trying to use sync fds with CONFIG_SYNC disabled");
589         return -ENODEV;
590 #endif
591 }
592
593 static void gk20a_channel_semaphore_set_min_eq_max(struct gk20a_channel_sync *s)
594 {
595         /* Nothing to do. */
596 }
597
598 static void gk20a_channel_semaphore_signal_timeline(
599                 struct gk20a_channel_sync *s)
600 {
601         struct gk20a_channel_semaphore *sp =
602                 container_of(s, struct gk20a_channel_semaphore, ops);
603         gk20a_sync_timeline_signal(sp->timeline);
604 }
605
606 static int gk20a_channel_semaphore_syncpt_id(struct gk20a_channel_sync *s)
607 {
608         return -EINVAL;
609 }
610
611 static void gk20a_channel_semaphore_destroy(struct gk20a_channel_sync *s)
612 {
613         struct gk20a_channel_semaphore *sema =
614                 container_of(s, struct gk20a_channel_semaphore, ops);
615         if (sema->timeline)
616                 gk20a_sync_timeline_destroy(sema->timeline);
617         if (sema->pool) {
618                 gk20a_semaphore_pool_unmap(sema->pool, sema->c->vm);
619                 gk20a_semaphore_pool_put(sema->pool);
620         }
621         kfree(sema);
622 }
623
624 static struct gk20a_channel_sync *
625 gk20a_channel_semaphore_create(struct channel_gk20a *c)
626 {
627         int err;
628         int asid = -1;
629         struct gk20a_channel_semaphore *sema;
630         char pool_name[20];
631
632         if (WARN_ON(!c->vm))
633                 return NULL;
634
635         sema = kzalloc(sizeof(*sema), GFP_KERNEL);
636         if (!sema)
637                 return NULL;
638         sema->c = c;
639
640         if (c->vm->as_share)
641                 asid = c->vm->as_share->id;
642
643         /* A pool of 256 semaphores fits into one 4k page. */
644         sprintf(pool_name, "semaphore_pool-%d", c->hw_chid);
645         sema->pool = gk20a_semaphore_pool_alloc(dev_from_gk20a(c->g),
646                                                 pool_name, 256);
647         if (!sema->pool)
648                 goto clean_up;
649
650         /* Map the semaphore pool to the channel vm. Map as read-write to the
651          * owner channel (all other channels should map as read only!). */
652         err = gk20a_semaphore_pool_map(sema->pool, c->vm, gk20a_mem_flag_none);
653         if (err)
654                 goto clean_up;
655
656 #ifdef CONFIG_SYNC
657         sema->timeline = gk20a_sync_timeline_create(
658                         "gk20a_ch%d_as%d", c->hw_chid, asid);
659         if (!sema->timeline)
660                 goto clean_up;
661 #endif
662         sema->ops.wait_syncpt   = gk20a_channel_semaphore_wait_syncpt;
663         sema->ops.wait_fd       = gk20a_channel_semaphore_wait_fd;
664         sema->ops.incr          = gk20a_channel_semaphore_incr;
665         sema->ops.incr_wfi      = gk20a_channel_semaphore_incr_wfi;
666         sema->ops.incr_user     = gk20a_channel_semaphore_incr_user;
667         sema->ops.set_min_eq_max = gk20a_channel_semaphore_set_min_eq_max;
668         sema->ops.signal_timeline = gk20a_channel_semaphore_signal_timeline;
669         sema->ops.syncpt_id     = gk20a_channel_semaphore_syncpt_id;
670         sema->ops.destroy       = gk20a_channel_semaphore_destroy;
671
672         /* Aggressively destroying the semaphore sync would cause overhead
673          * since the pool needs to be mapped to GMMU. */
674         sema->ops.aggressive_destroy = false;
675
676         return &sema->ops;
677 clean_up:
678         gk20a_channel_semaphore_destroy(&sema->ops);
679         return NULL;
680 }
681
682 struct gk20a_channel_sync *gk20a_channel_sync_create(struct channel_gk20a *c)
683 {
684 #ifdef CONFIG_TEGRA_GK20A
685         if (gk20a_platform_has_syncpoints(c->g->dev))
686                 return gk20a_channel_syncpt_create(c);
687 #endif
688         return gk20a_channel_semaphore_create(c);
689 }