4a5501a53edc22701a7d6103a9138e91fcd331cf
[linux-3.10.git] / drivers / video / tegra / host / nvhost_syncpt.c
1 /*
2  * drivers/video/tegra/host/nvhost_syncpt.c
3  *
4  * Tegra Graphics Host Syncpoints
5  *
6  * Copyright (c) 2010-2013, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #include <linux/nvhost_ioctl.h>
22 #include <linux/platform_device.h>
23 #include <linux/slab.h>
24 #include <linux/stat.h>
25 #include <trace/events/nvhost.h>
26 #include "nvhost_syncpt.h"
27 #include "nvhost_acm.h"
28 #include "dev.h"
29 #include "chip_support.h"
30
31 #define MAX_SYNCPT_LENGTH       5
32
33 /* Name of sysfs node for min and max value */
34 static const char *min_name = "min";
35 static const char *max_name = "max";
36
37 static const char *num_syncpts_name = "num_pts";
38 static const char *num_mutexes_name = "num_mlocks";
39 static const char *num_waitbases_name = "num_bases";
40
41 /**
42  * Resets syncpoint and waitbase values to sw shadows
43  */
44 void nvhost_syncpt_reset(struct nvhost_syncpt *sp)
45 {
46         u32 i;
47
48         for (i = 0; i < nvhost_syncpt_nb_pts(sp); i++)
49                 syncpt_op().reset(sp, i);
50         for (i = 0; i < nvhost_syncpt_nb_bases(sp); i++)
51                 syncpt_op().reset_wait_base(sp, i);
52         wmb();
53 }
54
55 /**
56  * Resets syncpoint and waitbase values of a
57  * single client to sw shadows
58  */
59 void nvhost_syncpt_reset_client(struct platform_device *pdev)
60 {
61         struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
62         struct nvhost_master *nvhost_master = nvhost_get_host(pdev);
63         u32 id;
64
65         BUG_ON(!(syncpt_op().reset && syncpt_op().reset_wait_base));
66
67         for (id = 0; pdata->syncpts[id] &&
68                 (id < NVHOST_MODULE_MAX_SYNCPTS); ++id)
69                 syncpt_op().reset(&nvhost_master->syncpt, pdata->syncpts[id]);
70
71         for (id = 0; pdata->waitbases[id] &&
72                 (id < NVHOST_MODULE_MAX_WAITBASES); ++id)
73                 syncpt_op().reset_wait_base(&nvhost_master->syncpt,
74                         pdata->waitbases[id]);
75         wmb();
76 }
77
78
79 /**
80  * Updates sw shadow state for client managed registers
81  */
82 void nvhost_syncpt_save(struct nvhost_syncpt *sp)
83 {
84         u32 i;
85
86         for (i = 0; i < nvhost_syncpt_nb_pts(sp); i++) {
87                 if (nvhost_syncpt_client_managed(sp, i))
88                         syncpt_op().update_min(sp, i);
89                 else
90                         WARN_ON(!nvhost_syncpt_min_eq_max(sp, i));
91         }
92
93         for (i = 0; i < nvhost_syncpt_nb_bases(sp); i++)
94                 syncpt_op().read_wait_base(sp, i);
95 }
96
97 /**
98  * Updates the last value read from hardware.
99  */
100 u32 nvhost_syncpt_update_min(struct nvhost_syncpt *sp, u32 id)
101 {
102         u32 val;
103
104         val = syncpt_op().update_min(sp, id);
105         trace_nvhost_syncpt_update_min(id, val);
106
107         return val;
108 }
109
110 /**
111  * Get the current syncpoint value
112  */
113 u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id)
114 {
115         u32 val;
116         nvhost_module_busy(syncpt_to_dev(sp)->dev);
117         val = syncpt_op().update_min(sp, id);
118         nvhost_module_idle(syncpt_to_dev(sp)->dev);
119         return val;
120 }
121
122 /**
123  * Get the current syncpoint base
124  */
125 u32 nvhost_syncpt_read_wait_base(struct nvhost_syncpt *sp, u32 id)
126 {
127         u32 val;
128         nvhost_module_busy(syncpt_to_dev(sp)->dev);
129         syncpt_op().read_wait_base(sp, id);
130         val = sp->base_val[id];
131         nvhost_module_idle(syncpt_to_dev(sp)->dev);
132         return val;
133 }
134
135 /**
136  * Write a cpu syncpoint increment to the hardware, without touching
137  * the cache. Caller is responsible for host being powered.
138  */
139 void nvhost_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id)
140 {
141         syncpt_op().cpu_incr(sp, id);
142 }
143
144 /**
145  * Increment syncpoint value from cpu, updating cache
146  */
147 void nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id)
148 {
149         if (nvhost_syncpt_client_managed(sp, id))
150                 nvhost_syncpt_incr_max(sp, id, 1);
151         nvhost_module_busy(syncpt_to_dev(sp)->dev);
152         nvhost_syncpt_cpu_incr(sp, id);
153         nvhost_module_idle(syncpt_to_dev(sp)->dev);
154 }
155
156 /**
157  * Updated sync point form hardware, and returns true if syncpoint is expired,
158  * false if we may need to wait
159  */
160 static bool syncpt_update_min_is_expired(
161         struct nvhost_syncpt *sp,
162         u32 id,
163         u32 thresh)
164 {
165         syncpt_op().update_min(sp, id);
166         return nvhost_syncpt_is_expired(sp, id, thresh);
167 }
168
169 /**
170  * Main entrypoint for syncpoint value waits.
171  */
172 int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id,
173                         u32 thresh, u32 timeout, u32 *value)
174 {
175         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
176         void *ref;
177         void *waiter;
178         int err = 0, check_count = 0, low_timeout = 0;
179         u32 val;
180
181         if (value)
182                 *value = 0;
183
184         /* first check cache */
185         if (nvhost_syncpt_is_expired(sp, id, thresh)) {
186                 if (value)
187                         *value = nvhost_syncpt_read_min(sp, id);
188                 return 0;
189         }
190
191         /* keep host alive */
192         nvhost_module_busy(syncpt_to_dev(sp)->dev);
193
194         /* try to read from register */
195         val = syncpt_op().update_min(sp, id);
196         if (nvhost_syncpt_is_expired(sp, id, thresh)) {
197                 if (value)
198                         *value = val;
199                 goto done;
200         }
201
202         if (!timeout) {
203                 err = -EAGAIN;
204                 goto done;
205         }
206
207         /* schedule a wakeup when the syncpoint value is reached */
208         waiter = nvhost_intr_alloc_waiter();
209         if (!waiter) {
210                 err = -ENOMEM;
211                 goto done;
212         }
213
214         err = nvhost_intr_add_action(&(syncpt_to_dev(sp)->intr), id, thresh,
215                                 NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE, &wq,
216                                 waiter,
217                                 &ref);
218         if (err)
219                 goto done;
220
221         err = -EAGAIN;
222         /* Caller-specified timeout may be impractically low */
223         if (timeout < SYNCPT_CHECK_PERIOD)
224                 low_timeout = timeout;
225
226         /* wait for the syncpoint, or timeout, or signal */
227         while (timeout) {
228                 u32 check = min_t(u32, SYNCPT_CHECK_PERIOD, timeout);
229                 int remain = wait_event_interruptible_timeout(wq,
230                                 syncpt_update_min_is_expired(sp, id, thresh),
231                                 check);
232                 if (remain > 0 || nvhost_syncpt_is_expired(sp, id, thresh)) {
233                         if (value)
234                                 *value = nvhost_syncpt_read_min(sp, id);
235                         err = 0;
236                         break;
237                 }
238                 if (remain < 0) {
239                         err = remain;
240                         break;
241                 }
242                 if (timeout != NVHOST_NO_TIMEOUT)
243                         timeout -= check;
244                 if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) {
245                         dev_warn(&syncpt_to_dev(sp)->dev->dev,
246                                 "%s: syncpoint id %d (%s) stuck waiting %d, timeout=%d\n",
247                                  current->comm, id, syncpt_op().name(sp, id),
248                                  thresh, timeout);
249                         syncpt_op().debug(sp);
250                         if (check_count == MAX_STUCK_CHECK_COUNT) {
251                                 if (low_timeout) {
252                                         dev_warn(&syncpt_to_dev(sp)->dev->dev,
253                                                 "is timeout %d too low?\n",
254                                                 low_timeout);
255                                 }
256                                 nvhost_debug_dump(syncpt_to_dev(sp));
257                         }
258                         check_count++;
259                 }
260         }
261         nvhost_intr_put_ref(&(syncpt_to_dev(sp)->intr), id, ref);
262
263 done:
264         nvhost_module_idle(syncpt_to_dev(sp)->dev);
265         return err;
266 }
267
268 /**
269  * Returns true if syncpoint is expired, false if we may need to wait
270  */
271 bool nvhost_syncpt_is_expired(
272         struct nvhost_syncpt *sp,
273         u32 id,
274         u32 thresh)
275 {
276         u32 current_val;
277         u32 future_val;
278         current_val = (u32)atomic_read(&sp->min_val[id]);
279         future_val = (u32)atomic_read(&sp->max_val[id]);
280
281         /* Note the use of unsigned arithmetic here (mod 1<<32).
282          *
283          * c = current_val = min_val    = the current value of the syncpoint.
284          * t = thresh                   = the value we are checking
285          * f = future_val  = max_val    = the value c will reach when all
286          *                                outstanding increments have completed.
287          *
288          * Note that c always chases f until it reaches f.
289          *
290          * Dtf = (f - t)
291          * Dtc = (c - t)
292          *
293          *  Consider all cases:
294          *
295          *      A) .....c..t..f.....    Dtf < Dtc       need to wait
296          *      B) .....c.....f..t..    Dtf > Dtc       expired
297          *      C) ..t..c.....f.....    Dtf > Dtc       expired    (Dct very large)
298          *
299          *  Any case where f==c: always expired (for any t).    Dtf == Dcf
300          *  Any case where t==c: always expired (for any f).    Dtf >= Dtc (because Dtc==0)
301          *  Any case where t==f!=c: always wait.                Dtf <  Dtc (because Dtf==0,
302          *                                                      Dtc!=0)
303          *
304          *  Other cases:
305          *
306          *      A) .....t..f..c.....    Dtf < Dtc       need to wait
307          *      A) .....f..c..t.....    Dtf < Dtc       need to wait
308          *      A) .....f..t..c.....    Dtf > Dtc       expired
309          *
310          *   So:
311          *         Dtf >= Dtc implies EXPIRED   (return true)
312          *         Dtf <  Dtc implies WAIT      (return false)
313          *
314          * Note: If t is expired then we *cannot* wait on it. We would wait
315          * forever (hang the system).
316          *
317          * Note: do NOT get clever and remove the -thresh from both sides. It
318          * is NOT the same.
319          *
320          * If future valueis zero, we have a client managed sync point. In that
321          * case we do a direct comparison.
322          */
323         if (!nvhost_syncpt_client_managed(sp, id))
324                 return future_val - thresh >= current_val - thresh;
325         else
326                 return (s32)(current_val - thresh) >= 0;
327 }
328
329 void nvhost_syncpt_debug(struct nvhost_syncpt *sp)
330 {
331         syncpt_op().debug(sp);
332 }
333
334 int nvhost_mutex_try_lock(struct nvhost_syncpt *sp, int idx)
335 {
336         struct nvhost_master *host = syncpt_to_dev(sp);
337         u32 reg;
338
339         nvhost_module_busy(host->dev);
340         reg = syncpt_op().mutex_try_lock(sp, idx);
341         if (reg) {
342                 nvhost_module_idle(host->dev);
343                 return -EBUSY;
344         }
345         atomic_inc(&sp->lock_counts[idx]);
346         return 0;
347 }
348
349 void nvhost_mutex_unlock(struct nvhost_syncpt *sp, int idx)
350 {
351         syncpt_op().mutex_unlock(sp, idx);
352         nvhost_module_idle(syncpt_to_dev(sp)->dev);
353         atomic_dec(&sp->lock_counts[idx]);
354 }
355
356 /* remove a wait pointed to by patch_addr */
357 int nvhost_syncpt_patch_wait(struct nvhost_syncpt *sp, void *patch_addr)
358 {
359         return syncpt_op().patch_wait(sp, patch_addr);
360 }
361
362 /* Displays the current value of the sync point via sysfs */
363 static ssize_t syncpt_min_show(struct kobject *kobj,
364                 struct kobj_attribute *attr, char *buf)
365 {
366         struct nvhost_syncpt_attr *syncpt_attr =
367                 container_of(attr, struct nvhost_syncpt_attr, attr);
368
369         return snprintf(buf, PAGE_SIZE, "%u\n",
370                         nvhost_syncpt_read(&syncpt_attr->host->syncpt,
371                                 syncpt_attr->id));
372 }
373
374 static ssize_t syncpt_max_show(struct kobject *kobj,
375                 struct kobj_attribute *attr, char *buf)
376 {
377         struct nvhost_syncpt_attr *syncpt_attr =
378                 container_of(attr, struct nvhost_syncpt_attr, attr);
379
380         return snprintf(buf, PAGE_SIZE, "%u\n",
381                         nvhost_syncpt_read_max(&syncpt_attr->host->syncpt,
382                                 syncpt_attr->id));
383 }
384
385
386 static ssize_t nvhost_capability_show(struct kobject *kobj,
387                 struct kobj_attribute *attr, char *buf)
388 {
389         struct nvhost_capability_node *node =
390                 container_of(attr, struct nvhost_capability_node, attr);
391
392         return snprintf(buf, PAGE_SIZE, "%u\n", node->func(node->sp));
393 }
394
395 static inline int nvhost_syncpt_set_sysfs_capability_node(
396                                 struct nvhost_syncpt *sp, const char *name,
397                                 struct nvhost_capability_node *node,
398                                 int (*func)(struct nvhost_syncpt *sp))
399 {
400         node->attr.attr.name = name;
401         node->attr.attr.mode = S_IRUGO;
402         node->attr.show = nvhost_capability_show;
403         node->func = func;
404         node->sp = sp;
405
406         return sysfs_create_file(sp->caps_kobj, &node->attr.attr);
407 }
408
409 int nvhost_syncpt_init(struct platform_device *dev,
410                 struct nvhost_syncpt *sp)
411 {
412         int i;
413         struct nvhost_master *host = syncpt_to_dev(sp);
414         int err = 0;
415
416         /* Allocate structs for min, max and base values */
417         sp->min_val = kzalloc(sizeof(atomic_t) * nvhost_syncpt_nb_pts(sp),
418                         GFP_KERNEL);
419         sp->max_val = kzalloc(sizeof(atomic_t) * nvhost_syncpt_nb_pts(sp),
420                         GFP_KERNEL);
421         sp->base_val = kzalloc(sizeof(u32) * nvhost_syncpt_nb_bases(sp),
422                         GFP_KERNEL);
423         sp->lock_counts =
424                 kzalloc(sizeof(atomic_t) * nvhost_syncpt_nb_mlocks(sp),
425                         GFP_KERNEL);
426         sp->caps_nodes = kzalloc(sizeof(struct nvhost_capability_node) * 3,
427                         GFP_KERNEL);
428
429         if (!(sp->min_val && sp->max_val && sp->base_val && sp->lock_counts &&
430                 sp->caps_nodes)) {
431                 /* frees happen in the deinit */
432                 err = -ENOMEM;
433                 goto fail;
434         }
435
436         sp->kobj = kobject_create_and_add("syncpt", &dev->dev.kobj);
437         if (!sp->kobj) {
438                 err = -EIO;
439                 goto fail;
440         }
441
442         sp->caps_kobj = kobject_create_and_add("capabilities", &dev->dev.kobj);
443         if (!sp->caps_kobj) {
444                 err = -EIO;
445                 goto fail;
446         }
447
448         if (nvhost_syncpt_set_sysfs_capability_node(sp, num_syncpts_name,
449                 sp->caps_nodes, &nvhost_syncpt_nb_pts)) {
450                 err = -EIO;
451                 goto fail;
452         }
453
454         if (nvhost_syncpt_set_sysfs_capability_node(sp, num_waitbases_name,
455                 sp->caps_nodes + 1, &nvhost_syncpt_nb_bases)) {
456                 err = -EIO;
457                 goto fail;
458         }
459
460         if (nvhost_syncpt_set_sysfs_capability_node(sp, num_mutexes_name,
461                 sp->caps_nodes + 2, &nvhost_syncpt_nb_mlocks)) {
462                 err = -EIO;
463                 goto fail;
464         }
465
466         /* Allocate two attributes for each sync point: min and max */
467         sp->syncpt_attrs = kzalloc(sizeof(*sp->syncpt_attrs)
468                         * nvhost_syncpt_nb_pts(sp) * 2, GFP_KERNEL);
469         if (!sp->syncpt_attrs) {
470                 err = -ENOMEM;
471                 goto fail;
472         }
473
474         /* Fill in the attributes */
475         for (i = 0; i < nvhost_syncpt_nb_pts(sp); i++) {
476                 char name[MAX_SYNCPT_LENGTH];
477                 struct kobject *kobj;
478                 struct nvhost_syncpt_attr *min = &sp->syncpt_attrs[i*2];
479                 struct nvhost_syncpt_attr *max = &sp->syncpt_attrs[i*2+1];
480
481                 /* Create one directory per sync point */
482                 snprintf(name, sizeof(name), "%d", i);
483                 kobj = kobject_create_and_add(name, sp->kobj);
484                 if (!kobj) {
485                         err = -EIO;
486                         goto fail;
487                 }
488
489                 min->id = i;
490                 min->host = host;
491                 min->attr.attr.name = min_name;
492                 min->attr.attr.mode = S_IRUGO;
493                 min->attr.show = syncpt_min_show;
494                 if (sysfs_create_file(kobj, &min->attr.attr)) {
495                         err = -EIO;
496                         goto fail;
497                 }
498
499                 max->id = i;
500                 max->host = host;
501                 max->attr.attr.name = max_name;
502                 max->attr.attr.mode = S_IRUGO;
503                 max->attr.show = syncpt_max_show;
504                 if (sysfs_create_file(kobj, &max->attr.attr)) {
505                         err = -EIO;
506                         goto fail;
507                 }
508         }
509
510         return err;
511
512 fail:
513         nvhost_syncpt_deinit(sp);
514         return err;
515 }
516
517 void nvhost_syncpt_deinit(struct nvhost_syncpt *sp)
518 {
519         kobject_put(sp->kobj);
520         kobject_put(sp->caps_kobj);
521
522         kfree(sp->min_val);
523         sp->min_val = NULL;
524
525         kfree(sp->max_val);
526         sp->max_val = NULL;
527
528         kfree(sp->base_val);
529         sp->base_val = NULL;
530
531         kfree(sp->lock_counts);
532         sp->lock_counts = 0;
533
534         kfree(sp->syncpt_attrs);
535         sp->syncpt_attrs = NULL;
536
537         kfree(sp->caps_nodes);
538         sp->caps_nodes = NULL;
539
540 }
541
542 int nvhost_syncpt_client_managed(struct nvhost_syncpt *sp, u32 id)
543 {
544         u64 mask = 1ULL << id;
545         return !!(syncpt_to_dev(sp)->info.client_managed & mask);
546 }
547
548 int nvhost_syncpt_nb_pts(struct nvhost_syncpt *sp)
549 {
550         return syncpt_to_dev(sp)->info.nb_pts;
551 }
552
553 int nvhost_syncpt_nb_bases(struct nvhost_syncpt *sp)
554 {
555         return syncpt_to_dev(sp)->info.nb_bases;
556 }
557
558 int nvhost_syncpt_nb_mlocks(struct nvhost_syncpt *sp)
559 {
560         return syncpt_to_dev(sp)->info.nb_mlocks;
561 }
562
563 void nvhost_syncpt_set_manager(struct nvhost_syncpt *sp, int id, bool client)
564 {
565         u64 mask = 1ULL << id;
566         syncpt_to_dev(sp)->info.client_managed &= ~mask;
567         syncpt_to_dev(sp)->info.client_managed |= client ? mask : 0;
568 }
569
570 /* public sync point API */
571 u32 nvhost_syncpt_incr_max_ext(struct platform_device *dev, u32 id, u32 incrs)
572 {
573         struct platform_device *pdev;
574         struct nvhost_syncpt *sp;
575
576         if (!nvhost_get_parent(dev)) {
577                 dev_err(&dev->dev, "Incr max called with wrong dev\n");
578                 return 0;
579         }
580
581         /* get the parent */
582         pdev = to_platform_device(dev->dev.parent);
583         sp = &(nvhost_get_host(pdev)->syncpt);
584
585         return nvhost_syncpt_incr_max(sp, id, incrs);
586 }
587
588 void nvhost_syncpt_cpu_incr_ext(struct platform_device *dev, u32 id)
589 {
590         struct platform_device *pdev;
591         struct nvhost_syncpt *sp;
592
593         if (!nvhost_get_parent(dev)) {
594                 dev_err(&dev->dev, "Incr called with wrong dev\n");
595                 return;
596         }
597
598         /* get the parent */
599         pdev = to_platform_device(dev->dev.parent);
600         sp = &(nvhost_get_host(pdev)->syncpt);
601
602         nvhost_syncpt_cpu_incr(sp, id);
603 }
604
605 void nvhost_syncpt_cpu_set_wait_base(struct platform_device *pdev, u32 id,
606                                         u32 val)
607 {
608         struct nvhost_syncpt *sp = &(nvhost_get_host(pdev)->syncpt);
609
610         sp->base_val[id] = val;
611         syncpt_op().reset_wait_base(sp, id);
612         wmb();
613 }
614
615 u32 nvhost_syncpt_read_ext(struct platform_device *dev, u32 id)
616 {
617         struct platform_device *pdev;
618         struct nvhost_syncpt *sp;
619
620         if (!nvhost_get_parent(dev)) {
621                 dev_err(&dev->dev, "Read called with wrong dev\n");
622                 return 0;
623         }
624
625         /* get the parent */
626         pdev = to_platform_device(dev->dev.parent);
627         sp = &(nvhost_get_host(pdev)->syncpt);
628
629         return nvhost_syncpt_read(sp, id);
630 }
631
632 int nvhost_syncpt_wait_timeout_ext(struct platform_device *dev, u32 id,
633         u32 thresh, u32 timeout, u32 *value)
634 {
635         struct platform_device *pdev;
636         struct nvhost_syncpt *sp;
637
638         if (!nvhost_get_parent(dev)) {
639                 dev_err(&dev->dev, "Wait called with wrong dev\n");
640                 return -EINVAL;
641         }
642
643         /* get the parent */
644         pdev = to_platform_device(dev->dev.parent);
645         sp = &(nvhost_get_host(pdev)->syncpt);
646
647         return nvhost_syncpt_wait_timeout(sp, id, thresh, timeout, value);
648 }