6c82653423e9fef756247df53af461c0ec4962a9
[linux-3.10.git] / drivers / gpu / nvgpu / gk20a / channel_sync_gk20a.c
1 /*
2  * drivers/video/tegra/host/gk20a/channel_sync_gk20a.c
3  *
4  * GK20A Channel Synchronization Abstraction
5  *
6  * Copyright (c) 2014-2015, NVIDIA CORPORATION.  All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  */
17
18 #include <linux/gk20a.h>
19
20 #include "channel_sync_gk20a.h"
21 #include "gk20a.h"
22 #include "fence_gk20a.h"
23 #include "semaphore_gk20a.h"
24 #include "sync_gk20a.h"
25 #include "mm_gk20a.h"
26
27 #ifdef CONFIG_SYNC
28 #include "../../../staging/android/sync.h"
29 #endif
30
31 #ifdef CONFIG_TEGRA_GK20A
32 #include <linux/nvhost.h>
33 #endif
34
35 #ifdef CONFIG_TEGRA_GK20A
36
37 struct gk20a_channel_syncpt {
38         struct gk20a_channel_sync ops;
39         struct channel_gk20a *c;
40         struct platform_device *host1x_pdev;
41         u32 id;
42 };
43
44 static void add_wait_cmd(u32 *ptr, u32 id, u32 thresh)
45 {
46         /* syncpoint_a */
47         ptr[0] = 0x2001001C;
48         /* payload */
49         ptr[1] = thresh;
50         /* syncpoint_b */
51         ptr[2] = 0x2001001D;
52         /* syncpt_id, switch_en, wait */
53         ptr[3] = (id << 8) | 0x10;
54 }
55
56 static int gk20a_channel_syncpt_wait_syncpt(struct gk20a_channel_sync *s,
57                 u32 id, u32 thresh, struct priv_cmd_entry **entry,
58                 struct gk20a_fence **fence)
59 {
60         struct gk20a_channel_syncpt *sp =
61                 container_of(s, struct gk20a_channel_syncpt, ops);
62         struct priv_cmd_entry *wait_cmd = NULL;
63
64         if (!nvhost_syncpt_is_valid_pt_ext(sp->host1x_pdev, id)) {
65                 dev_warn(dev_from_gk20a(sp->c->g),
66                                 "invalid wait id in gpfifo submit, elided");
67                 return 0;
68         }
69
70         if (nvhost_syncpt_is_expired_ext(sp->host1x_pdev, id, thresh))
71                 return 0;
72
73         gk20a_channel_alloc_priv_cmdbuf(sp->c, 4, &wait_cmd);
74         if (wait_cmd == NULL) {
75                 gk20a_err(dev_from_gk20a(sp->c->g),
76                                 "not enough priv cmd buffer space");
77                 return -EAGAIN;
78         }
79
80         add_wait_cmd(&wait_cmd->ptr[0], id, thresh);
81
82         *entry = wait_cmd;
83         *fence = NULL;
84         return 0;
85 }
86
87 static int gk20a_channel_syncpt_wait_fd(struct gk20a_channel_sync *s, int fd,
88                        struct priv_cmd_entry **entry,
89                        struct gk20a_fence **fence)
90 {
91 #ifdef CONFIG_SYNC
92         int i;
93         int num_wait_cmds;
94         struct sync_pt *pt;
95         struct sync_fence *sync_fence;
96         struct priv_cmd_entry *wait_cmd = NULL;
97         struct gk20a_channel_syncpt *sp =
98                 container_of(s, struct gk20a_channel_syncpt, ops);
99         struct channel_gk20a *c = sp->c;
100
101         sync_fence = nvhost_sync_fdget(fd);
102         if (!sync_fence)
103                 return -EINVAL;
104
105         /* validate syncpt ids */
106         list_for_each_entry(pt, &sync_fence->pt_list_head, pt_list) {
107                 u32 wait_id = nvhost_sync_pt_id(pt);
108                 if (!wait_id || !nvhost_syncpt_is_valid_pt_ext(sp->host1x_pdev,
109                                         wait_id)) {
110                         sync_fence_put(sync_fence);
111                         return -EINVAL;
112                 }
113         }
114
115         num_wait_cmds = nvhost_sync_num_pts(sync_fence);
116         if (num_wait_cmds == 0)
117                 return 0;
118
119         gk20a_channel_alloc_priv_cmdbuf(c, 4 * num_wait_cmds, &wait_cmd);
120         if (wait_cmd == NULL) {
121                 gk20a_err(dev_from_gk20a(c->g),
122                                 "not enough priv cmd buffer space");
123                 sync_fence_put(sync_fence);
124                 return -EAGAIN;
125         }
126
127         i = 0;
128         list_for_each_entry(pt, &sync_fence->pt_list_head, pt_list) {
129                 u32 wait_id = nvhost_sync_pt_id(pt);
130                 u32 wait_value = nvhost_sync_pt_thresh(pt);
131
132                 if (nvhost_syncpt_is_expired_ext(sp->host1x_pdev,
133                                 wait_id, wait_value)) {
134                         wait_cmd->ptr[i * 4 + 0] = 0;
135                         wait_cmd->ptr[i * 4 + 1] = 0;
136                         wait_cmd->ptr[i * 4 + 2] = 0;
137                         wait_cmd->ptr[i * 4 + 3] = 0;
138                 } else
139                         add_wait_cmd(&wait_cmd->ptr[i * 4], wait_id,
140                                         wait_value);
141                 i++;
142         }
143         WARN_ON(i != num_wait_cmds);
144         sync_fence_put(sync_fence);
145
146         *entry = wait_cmd;
147         *fence = NULL;
148         return 0;
149 #else
150         return -ENODEV;
151 #endif
152 }
153
154 static void gk20a_channel_syncpt_update(void *priv, int nr_completed)
155 {
156         struct channel_gk20a *ch = priv;
157
158         gk20a_channel_update(ch, nr_completed);
159
160         /* note: channel_get() is in __gk20a_channel_syncpt_incr() */
161         gk20a_channel_put(ch);
162 }
163
164 static int __gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
165                                        bool wfi_cmd,
166                                        bool register_irq,
167                                        struct priv_cmd_entry **entry,
168                                        struct gk20a_fence **fence,
169                                        bool need_sync_fence)
170 {
171         u32 thresh;
172         int incr_cmd_size;
173         int j = 0;
174         int err;
175         struct priv_cmd_entry *incr_cmd = NULL;
176         struct gk20a_channel_syncpt *sp =
177                 container_of(s, struct gk20a_channel_syncpt, ops);
178         struct channel_gk20a *c = sp->c;
179
180         incr_cmd_size = 6;
181         if (wfi_cmd)
182                 incr_cmd_size += 2;
183
184         gk20a_channel_alloc_priv_cmdbuf(c, incr_cmd_size, &incr_cmd);
185         if (incr_cmd == NULL) {
186                 gk20a_err(dev_from_gk20a(c->g),
187                                 "not enough priv cmd buffer space");
188                 return -EAGAIN;
189         }
190
191         /* WAR for hw bug 1491360: syncpt needs to be incremented twice */
192
193         if (wfi_cmd) {
194                 /* wfi */
195                 incr_cmd->ptr[j++] = 0x2001001E;
196                 /* handle, ignored */
197                 incr_cmd->ptr[j++] = 0x00000000;
198         }
199         /* syncpoint_a */
200         incr_cmd->ptr[j++] = 0x2001001C;
201         /* payload, ignored */
202         incr_cmd->ptr[j++] = 0;
203         /* syncpoint_b */
204         incr_cmd->ptr[j++] = 0x2001001D;
205         /* syncpt_id, incr */
206         incr_cmd->ptr[j++] = (sp->id << 8) | 0x1;
207         /* syncpoint_b */
208         incr_cmd->ptr[j++] = 0x2001001D;
209         /* syncpt_id, incr */
210         incr_cmd->ptr[j++] = (sp->id << 8) | 0x1;
211         WARN_ON(j != incr_cmd_size);
212
213         thresh = nvhost_syncpt_incr_max_ext(sp->host1x_pdev, sp->id, 2);
214
215         if (register_irq) {
216                 struct channel_gk20a *referenced = gk20a_channel_get(c);
217
218                 WARN_ON(!referenced);
219
220                 if (referenced) {
221                         /* note: channel_put() is in
222                          * gk20a_channel_syncpt_update() */
223
224                         err = nvhost_intr_register_notifier(
225                                 sp->host1x_pdev,
226                                 sp->id, thresh,
227                                 gk20a_channel_syncpt_update, c);
228                         if (err)
229                                 gk20a_channel_put(referenced);
230
231                         /* Adding interrupt action should
232                          * never fail. A proper error handling
233                          * here would require us to decrement
234                          * the syncpt max back to its original
235                          * value. */
236                         WARN(err,
237                              "failed to set submit complete interrupt");
238                 }
239         }
240
241         *fence = gk20a_fence_from_syncpt(sp->host1x_pdev, sp->id, thresh,
242                                          wfi_cmd, need_sync_fence);
243         *entry = incr_cmd;
244         return 0;
245 }
246
247 static int gk20a_channel_syncpt_incr_wfi(struct gk20a_channel_sync *s,
248                                   struct priv_cmd_entry **entry,
249                                   struct gk20a_fence **fence)
250 {
251         return __gk20a_channel_syncpt_incr(s,
252                         true /* wfi */,
253                         false /* no irq handler */,
254                         entry, fence, true);
255 }
256
257 static int gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
258                               struct priv_cmd_entry **entry,
259                               struct gk20a_fence **fence,
260                               bool need_sync_fence)
261 {
262         /* Don't put wfi cmd to this one since we're not returning
263          * a fence to user space. */
264         return __gk20a_channel_syncpt_incr(s,
265                         false /* no wfi */,
266                         true /* register irq */,
267                         entry, fence, need_sync_fence);
268 }
269
270 static int gk20a_channel_syncpt_incr_user(struct gk20a_channel_sync *s,
271                                    int wait_fence_fd,
272                                    struct priv_cmd_entry **entry,
273                                    struct gk20a_fence **fence,
274                                    bool wfi,
275                                    bool need_sync_fence)
276 {
277         /* Need to do 'wfi + host incr' since we return the fence
278          * to user space. */
279         return __gk20a_channel_syncpt_incr(s,
280                         wfi,
281                         true /* register irq */,
282                         entry, fence, need_sync_fence);
283 }
284
285 static void gk20a_channel_syncpt_set_min_eq_max(struct gk20a_channel_sync *s)
286 {
287         struct gk20a_channel_syncpt *sp =
288                 container_of(s, struct gk20a_channel_syncpt, ops);
289         nvhost_syncpt_set_min_eq_max_ext(sp->host1x_pdev, sp->id);
290 }
291
292 static void gk20a_channel_syncpt_signal_timeline(
293                 struct gk20a_channel_sync *s)
294 {
295         /* Nothing to do. */
296 }
297
298 static int gk20a_channel_syncpt_id(struct gk20a_channel_sync *s)
299 {
300         struct gk20a_channel_syncpt *sp =
301                 container_of(s, struct gk20a_channel_syncpt, ops);
302         return sp->id;
303 }
304
305 static void gk20a_channel_syncpt_destroy(struct gk20a_channel_sync *s)
306 {
307         struct gk20a_channel_syncpt *sp =
308                 container_of(s, struct gk20a_channel_syncpt, ops);
309         nvhost_syncpt_set_min_eq_max_ext(sp->host1x_pdev, sp->id);
310         nvhost_syncpt_put_ref_ext(sp->host1x_pdev, sp->id);
311         kfree(sp);
312 }
313
314 static struct gk20a_channel_sync *
315 gk20a_channel_syncpt_create(struct channel_gk20a *c)
316 {
317         struct gk20a_channel_syncpt *sp;
318         char syncpt_name[32];
319
320         sp = kzalloc(sizeof(*sp), GFP_KERNEL);
321         if (!sp)
322                 return NULL;
323
324         sp->c = c;
325         sp->host1x_pdev = c->g->host1x_dev;
326
327         snprintf(syncpt_name, sizeof(syncpt_name),
328                 "%s_%d", dev_name(&c->g->dev->dev), c->hw_chid);
329
330         sp->id = nvhost_get_syncpt_host_managed(sp->host1x_pdev,
331                                                 c->hw_chid, syncpt_name);
332         if (!sp->id) {
333                 kfree(sp);
334                 gk20a_err(&c->g->dev->dev, "failed to get free syncpt");
335                 return NULL;
336         }
337
338         nvhost_syncpt_set_min_eq_max_ext(sp->host1x_pdev, sp->id);
339
340         sp->ops.wait_syncpt             = gk20a_channel_syncpt_wait_syncpt;
341         sp->ops.wait_fd                 = gk20a_channel_syncpt_wait_fd;
342         sp->ops.incr                    = gk20a_channel_syncpt_incr;
343         sp->ops.incr_wfi                = gk20a_channel_syncpt_incr_wfi;
344         sp->ops.incr_user               = gk20a_channel_syncpt_incr_user;
345         sp->ops.set_min_eq_max          = gk20a_channel_syncpt_set_min_eq_max;
346         sp->ops.signal_timeline         = gk20a_channel_syncpt_signal_timeline;
347         sp->ops.syncpt_id               = gk20a_channel_syncpt_id;
348         sp->ops.destroy                 = gk20a_channel_syncpt_destroy;
349
350         return &sp->ops;
351 }
352 #endif /* CONFIG_TEGRA_GK20A */
353
354 struct gk20a_channel_semaphore {
355         struct gk20a_channel_sync ops;
356         struct channel_gk20a *c;
357
358         /* A semaphore pool owned by this channel. */
359         struct gk20a_semaphore_pool *pool;
360
361         /* A sync timeline that advances when gpu completes work. */
362         struct sync_timeline *timeline;
363 };
364
365 #ifdef CONFIG_SYNC
366 struct wait_fence_work {
367         struct sync_fence_waiter waiter;
368         struct channel_gk20a *ch;
369         struct gk20a_semaphore *sema;
370 };
371
372 static void gk20a_channel_semaphore_launcher(
373                 struct sync_fence *fence,
374                 struct sync_fence_waiter *waiter)
375 {
376         int err;
377         struct wait_fence_work *w =
378                 container_of(waiter, struct wait_fence_work, waiter);
379         struct gk20a *g = w->ch->g;
380
381         gk20a_dbg_info("waiting for pre fence %p '%s'",
382                         fence, fence->name);
383         err = sync_fence_wait(fence, -1);
384         if (err < 0)
385                 dev_err(&g->dev->dev, "error waiting pre-fence: %d\n", err);
386
387         gk20a_dbg_info(
388                   "wait completed (%d) for fence %p '%s', triggering gpu work",
389                   err, fence, fence->name);
390         sync_fence_put(fence);
391         gk20a_semaphore_release(w->sema);
392         gk20a_semaphore_put(w->sema);
393         kfree(w);
394 }
395 #endif
396
397 static int add_sema_cmd(u32 *ptr, u64 sema, u32 payload,
398                         bool acquire, bool wfi)
399 {
400         int i = 0;
401         /* semaphore_a */
402         ptr[i++] = 0x20010004;
403         /* offset_upper */
404         ptr[i++] = (sema >> 32) & 0xff;
405         /* semaphore_b */
406         ptr[i++] = 0x20010005;
407         /* offset */
408         ptr[i++] = sema & 0xffffffff;
409         /* semaphore_c */
410         ptr[i++] = 0x20010006;
411         /* payload */
412         ptr[i++] = payload;
413         if (acquire) {
414                 /* semaphore_d */
415                 ptr[i++] = 0x20010007;
416                 /* operation: acq_geq, switch_en */
417                 ptr[i++] = 0x4 | (0x1 << 12);
418         } else {
419                 /* semaphore_d */
420                 ptr[i++] = 0x20010007;
421                 /* operation: release, wfi */
422                 ptr[i++] = 0x2 | ((wfi ? 0x0 : 0x1) << 20);
423                 /* non_stall_int */
424                 ptr[i++] = 0x20010008;
425                 /* ignored */
426                 ptr[i++] = 0;
427         }
428         return i;
429 }
430
431 static int gk20a_channel_semaphore_wait_syncpt(
432                 struct gk20a_channel_sync *s, u32 id,
433                 u32 thresh, struct priv_cmd_entry **entry,
434                 struct gk20a_fence **fence)
435 {
436         struct gk20a_channel_semaphore *sema =
437                 container_of(s, struct gk20a_channel_semaphore, ops);
438         struct device *dev = dev_from_gk20a(sema->c->g);
439         gk20a_err(dev, "trying to use syncpoint synchronization");
440         return -ENODEV;
441 }
442
443 static int gk20a_channel_semaphore_wait_fd(
444                 struct gk20a_channel_sync *s, int fd,
445                 struct priv_cmd_entry **entry,
446                 struct gk20a_fence **fence)
447 {
448         struct gk20a_channel_semaphore *sema =
449                 container_of(s, struct gk20a_channel_semaphore, ops);
450         struct channel_gk20a *c = sema->c;
451 #ifdef CONFIG_SYNC
452         struct sync_fence *sync_fence;
453         struct priv_cmd_entry *wait_cmd = NULL;
454         struct wait_fence_work *w;
455         int written;
456         int err;
457         u64 va;
458
459         sync_fence = gk20a_sync_fence_fdget(fd);
460         if (!sync_fence)
461                 return -EINVAL;
462
463         w = kzalloc(sizeof(*w), GFP_KERNEL);
464         if (!w) {
465                 err = -ENOMEM;
466                 goto fail;
467         }
468         sync_fence_waiter_init(&w->waiter, gk20a_channel_semaphore_launcher);
469         w->ch = c;
470         w->sema = gk20a_semaphore_alloc(sema->pool);
471         if (!w->sema) {
472                 gk20a_err(dev_from_gk20a(c->g), "ran out of semaphores");
473                 err = -EAGAIN;
474                 goto fail;
475         }
476
477         /* worker takes one reference */
478         gk20a_semaphore_get(w->sema);
479
480         gk20a_channel_alloc_priv_cmdbuf(c, 8, &wait_cmd);
481         if (wait_cmd == NULL) {
482                 gk20a_err(dev_from_gk20a(c->g),
483                                 "not enough priv cmd buffer space");
484                 err = -EAGAIN;
485                 goto fail;
486         }
487
488         va = gk20a_semaphore_gpu_va(w->sema, c->vm);
489         /* GPU unblocked when when the semaphore value becomes 1. */
490         written = add_sema_cmd(wait_cmd->ptr, va, 1, true, false);
491         WARN_ON(written != wait_cmd->size);
492         sync_fence_wait_async(sync_fence, &w->waiter);
493
494         /* XXX - this fixes an actual bug, we need to hold a ref to this
495            semaphore while the job is in flight. */
496         *fence = gk20a_fence_from_semaphore(sema->timeline, w->sema,
497                                             &c->semaphore_wq,
498                                             NULL, false);
499         *entry = wait_cmd;
500         return 0;
501 fail:
502         if (w && w->sema)
503                 gk20a_semaphore_put(w->sema);
504         kfree(w);
505         sync_fence_put(sync_fence);
506         return err;
507 #else
508         gk20a_err(dev_from_gk20a(c->g),
509                   "trying to use sync fds with CONFIG_SYNC disabled");
510         return -ENODEV;
511 #endif
512 }
513
514 static int __gk20a_channel_semaphore_incr(
515                 struct gk20a_channel_sync *s, bool wfi_cmd,
516                 struct sync_fence *dependency,
517                 struct priv_cmd_entry **entry,
518                 struct gk20a_fence **fence,
519                 bool need_sync_fence)
520 {
521         u64 va;
522         int incr_cmd_size;
523         int written;
524         struct priv_cmd_entry *incr_cmd = NULL;
525         struct gk20a_channel_semaphore *sp =
526                 container_of(s, struct gk20a_channel_semaphore, ops);
527         struct channel_gk20a *c = sp->c;
528         struct gk20a_semaphore *semaphore;
529
530         semaphore = gk20a_semaphore_alloc(sp->pool);
531         if (!semaphore) {
532                 gk20a_err(dev_from_gk20a(c->g),
533                                 "ran out of semaphores");
534                 return -EAGAIN;
535         }
536
537         incr_cmd_size = 10;
538         gk20a_channel_alloc_priv_cmdbuf(c, incr_cmd_size, &incr_cmd);
539         if (incr_cmd == NULL) {
540                 gk20a_err(dev_from_gk20a(c->g),
541                                 "not enough priv cmd buffer space");
542                 gk20a_semaphore_put(semaphore);
543                 return -EAGAIN;
544         }
545
546         /* Release the completion semaphore. */
547         va = gk20a_semaphore_gpu_va(semaphore, c->vm);
548         written = add_sema_cmd(incr_cmd->ptr, va, 1, false, wfi_cmd);
549         WARN_ON(written != incr_cmd_size);
550
551         *fence = gk20a_fence_from_semaphore(sp->timeline, semaphore,
552                                             &c->semaphore_wq,
553                                             dependency, wfi_cmd);
554         *entry = incr_cmd;
555         return 0;
556 }
557
558 static int gk20a_channel_semaphore_incr_wfi(
559                 struct gk20a_channel_sync *s,
560                 struct priv_cmd_entry **entry,
561                 struct gk20a_fence **fence)
562 {
563         return __gk20a_channel_semaphore_incr(s,
564                         true /* wfi */,
565                         NULL,
566                         entry, fence, true);
567 }
568
569 static int gk20a_channel_semaphore_incr(
570                 struct gk20a_channel_sync *s,
571                 struct priv_cmd_entry **entry,
572                 struct gk20a_fence **fence,
573                 bool need_sync_fence)
574 {
575         /* Don't put wfi cmd to this one since we're not returning
576          * a fence to user space. */
577         return __gk20a_channel_semaphore_incr(s, false /* no wfi */,
578                                       NULL, entry, fence, need_sync_fence);
579 }
580
581 static int gk20a_channel_semaphore_incr_user(
582                 struct gk20a_channel_sync *s,
583                 int wait_fence_fd,
584                 struct priv_cmd_entry **entry,
585                 struct gk20a_fence **fence,
586                 bool wfi,
587                 bool need_sync_fence)
588 {
589 #ifdef CONFIG_SYNC
590         struct sync_fence *dependency = NULL;
591         int err;
592
593         if (wait_fence_fd >= 0) {
594                 dependency = gk20a_sync_fence_fdget(wait_fence_fd);
595                 if (!dependency)
596                         return -EINVAL;
597         }
598
599         err = __gk20a_channel_semaphore_incr(s, wfi, dependency,
600                                              entry, fence, need_sync_fence);
601         if (err) {
602                 if (dependency)
603                         sync_fence_put(dependency);
604                 return err;
605         }
606
607         return 0;
608 #else
609         struct gk20a_channel_semaphore *sema =
610                 container_of(s, struct gk20a_channel_semaphore, ops);
611         gk20a_err(dev_from_gk20a(sema->c->g),
612                   "trying to use sync fds with CONFIG_SYNC disabled");
613         return -ENODEV;
614 #endif
615 }
616
617 static void gk20a_channel_semaphore_set_min_eq_max(struct gk20a_channel_sync *s)
618 {
619         /* Nothing to do. */
620 }
621
622 static void gk20a_channel_semaphore_signal_timeline(
623                 struct gk20a_channel_sync *s)
624 {
625         struct gk20a_channel_semaphore *sp =
626                 container_of(s, struct gk20a_channel_semaphore, ops);
627         gk20a_sync_timeline_signal(sp->timeline);
628 }
629
630 static int gk20a_channel_semaphore_syncpt_id(struct gk20a_channel_sync *s)
631 {
632         return -EINVAL;
633 }
634
635 static void gk20a_channel_semaphore_destroy(struct gk20a_channel_sync *s)
636 {
637         struct gk20a_channel_semaphore *sema =
638                 container_of(s, struct gk20a_channel_semaphore, ops);
639         if (sema->timeline)
640                 gk20a_sync_timeline_destroy(sema->timeline);
641         if (sema->pool) {
642                 gk20a_semaphore_pool_unmap(sema->pool, sema->c->vm);
643                 gk20a_semaphore_pool_put(sema->pool);
644         }
645         kfree(sema);
646 }
647
648 static struct gk20a_channel_sync *
649 gk20a_channel_semaphore_create(struct channel_gk20a *c)
650 {
651         int err;
652         int asid = -1;
653         struct gk20a_channel_semaphore *sema;
654         char pool_name[20];
655
656         if (WARN_ON(!c->vm))
657                 return NULL;
658
659         sema = kzalloc(sizeof(*sema), GFP_KERNEL);
660         if (!sema)
661                 return NULL;
662         sema->c = c;
663
664         if (c->vm->as_share)
665                 asid = c->vm->as_share->id;
666
667         /* A pool of 256 semaphores fits into one 4k page. */
668         sprintf(pool_name, "semaphore_pool-%d", c->hw_chid);
669         sema->pool = gk20a_semaphore_pool_alloc(dev_from_gk20a(c->g),
670                                                 pool_name, 256);
671         if (!sema->pool)
672                 goto clean_up;
673
674         /* Map the semaphore pool to the channel vm. Map as read-write to the
675          * owner channel (all other channels should map as read only!). */
676         err = gk20a_semaphore_pool_map(sema->pool, c->vm, gk20a_mem_flag_none);
677         if (err)
678                 goto clean_up;
679
680 #ifdef CONFIG_SYNC
681         sema->timeline = gk20a_sync_timeline_create(
682                         "gk20a_ch%d_as%d", c->hw_chid, asid);
683         if (!sema->timeline)
684                 goto clean_up;
685 #endif
686         sema->ops.wait_syncpt   = gk20a_channel_semaphore_wait_syncpt;
687         sema->ops.wait_fd       = gk20a_channel_semaphore_wait_fd;
688         sema->ops.incr          = gk20a_channel_semaphore_incr;
689         sema->ops.incr_wfi      = gk20a_channel_semaphore_incr_wfi;
690         sema->ops.incr_user     = gk20a_channel_semaphore_incr_user;
691         sema->ops.set_min_eq_max = gk20a_channel_semaphore_set_min_eq_max;
692         sema->ops.signal_timeline = gk20a_channel_semaphore_signal_timeline;
693         sema->ops.syncpt_id     = gk20a_channel_semaphore_syncpt_id;
694         sema->ops.destroy       = gk20a_channel_semaphore_destroy;
695
696         return &sema->ops;
697 clean_up:
698         gk20a_channel_semaphore_destroy(&sema->ops);
699         return NULL;
700 }
701
702 struct gk20a_channel_sync *gk20a_channel_sync_create(struct channel_gk20a *c)
703 {
704 #ifdef CONFIG_TEGRA_GK20A
705         if (gk20a_platform_has_syncpoints(c->g->dev))
706                 return gk20a_channel_syncpt_create(c);
707 #endif
708         return gk20a_channel_semaphore_create(c);
709 }