fcedb5c1a2c86c6ead8d072afc8b17b0f6d1b525
[linux-2.6.git] / drivers / video / tegra / host / nvhost_acm.c
1 /*
2  * drivers/video/tegra/host/nvhost_acm.c
3  *
4  * Tegra Graphics Host Automatic Clock Management
5  *
6  * Copyright (c) 2010-2014, NVIDIA Corporation. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #include "nvhost_acm.h"
22 #include "dev.h"
23 #include <linux/slab.h>
24 #include <linux/string.h>
25 #include <linux/sched.h>
26 #include <linux/err.h>
27 #include <linux/device.h>
28 #include <linux/delay.h>
29 #include <linux/platform_device.h>
30 #include <mach/powergate.h>
31 #include <mach/clk.h>
32 #include <mach/hardware.h>
33
34 #define ACM_SUSPEND_WAIT_FOR_IDLE_TIMEOUT       (2 * HZ)
35 #define POWERGATE_DELAY                         10
36 #define MAX_DEVID_LENGTH                        16
37
38 DEFINE_MUTEX(client_list_lock);
39
40 struct nvhost_module_client {
41         struct list_head node;
42         unsigned long rate[NVHOST_MODULE_MAX_CLOCKS];
43         void *priv;
44 };
45
46 static void do_powergate_locked(int id)
47 {
48         if (id != -1 && tegra_powergate_is_powered(id))
49                 tegra_powergate_partition(id);
50 }
51
52 static void do_unpowergate_locked(int id)
53 {
54         if (id != -1)
55                 tegra_unpowergate_partition(id);
56 }
57
58 static void do_module_reset_locked(struct nvhost_device *dev)
59 {
60         /* assert module and mc client reset */
61         if (dev->powergate_ids[0] != -1) {
62                 tegra_powergate_mc_disable(dev->powergate_ids[0]);
63                 tegra_periph_reset_assert(dev->clk[0]);
64                 tegra_powergate_mc_flush(dev->powergate_ids[0]);
65         }
66         if (dev->powergate_ids[1] != -1) {
67                 tegra_powergate_mc_disable(dev->powergate_ids[1]);
68                 tegra_periph_reset_assert(dev->clk[1]);
69                 tegra_powergate_mc_flush(dev->powergate_ids[1]);
70         }
71
72         udelay(POWERGATE_DELAY);
73
74         /* deassert reset */
75         if (dev->powergate_ids[0] != -1) {
76                 tegra_powergate_mc_flush_done(dev->powergate_ids[0]);
77                 tegra_periph_reset_deassert(dev->clk[0]);
78                 tegra_powergate_mc_enable(dev->powergate_ids[0]);
79         }
80         if (dev->powergate_ids[1] != -1) {
81                 tegra_powergate_mc_flush_done(dev->powergate_ids[1]);
82                 tegra_periph_reset_deassert(dev->clk[1]);
83                 tegra_powergate_mc_enable(dev->powergate_ids[1]);
84         }
85 }
86
87 void nvhost_module_reset(struct nvhost_device *dev)
88 {
89         dev_dbg(&dev->dev,
90                 "%s: asserting %s module reset (id %d, id2 %d)\n",
91                 __func__, dev->name,
92                 dev->powergate_ids[0], dev->powergate_ids[1]);
93
94         mutex_lock(&dev->lock);
95         do_module_reset_locked(dev);
96         mutex_unlock(&dev->lock);
97
98         dev_dbg(&dev->dev, "%s: module %s out of reset\n",
99                 __func__, dev->name);
100 }
101
102 static void to_state_clockgated_locked(struct nvhost_device *dev)
103 {
104         struct nvhost_driver *drv = to_nvhost_driver(dev->dev.driver);
105
106         if (dev->powerstate == NVHOST_POWER_STATE_RUNNING) {
107                 int i, err;
108                 if (drv->prepare_clockoff) {
109                         err = drv->prepare_clockoff(dev);
110                         if (err) {
111                                 dev_err(&dev->dev, "error clock gating");
112                                 return;
113                         }
114                 }
115                 for (i = 0; i < dev->num_clks; i++)
116                         clk_disable(dev->clk[i]);
117                 if (dev->dev.parent)
118                         nvhost_module_idle(to_nvhost_device(dev->dev.parent));
119         } else if (dev->powerstate == NVHOST_POWER_STATE_POWERGATED
120                         && dev->can_powergate) {
121                 do_unpowergate_locked(dev->powergate_ids[0]);
122                 do_unpowergate_locked(dev->powergate_ids[1]);
123
124                 if (dev->powerup_reset)
125                         do_module_reset_locked(dev);
126         }
127         dev->powerstate = NVHOST_POWER_STATE_CLOCKGATED;
128 }
129
130 static void to_state_running_locked(struct nvhost_device *dev)
131 {
132         struct nvhost_driver *drv = to_nvhost_driver(dev->dev.driver);
133         int prev_state = dev->powerstate;
134
135         if (dev->powerstate == NVHOST_POWER_STATE_POWERGATED)
136                 to_state_clockgated_locked(dev);
137
138         if (dev->powerstate == NVHOST_POWER_STATE_CLOCKGATED) {
139                 int i;
140
141                 if (dev->dev.parent)
142                         nvhost_module_busy(to_nvhost_device(dev->dev.parent));
143
144                 for (i = 0; i < dev->num_clks; i++) {
145                         int err = clk_enable(dev->clk[i]);
146                         if (err) {
147                                 dev_err(&dev->dev, "Cannot turn on clock %s",
148                                         dev->clocks[i].name);
149                                 return;
150                         }
151                 }
152
153                 /* Invoke callback after enabling clock. This is used for
154                  * re-enabling host1x interrupts. */
155                 if (prev_state == NVHOST_POWER_STATE_CLOCKGATED
156                                 && drv->finalize_clockon)
157                         drv->finalize_clockon(dev);
158
159                 /* Invoke callback after power un-gating. This is used for
160                  * restoring context. */
161                 if (prev_state == NVHOST_POWER_STATE_POWERGATED
162                                 && drv->finalize_poweron)
163                         drv->finalize_poweron(dev);
164         }
165         dev->powerstate = NVHOST_POWER_STATE_RUNNING;
166 }
167
168 /* This gets called from powergate_handler() and from module suspend.
169  * Module suspend is done for all modules, runtime power gating only
170  * for modules with can_powergate set.
171  */
172 static int to_state_powergated_locked(struct nvhost_device *dev)
173 {
174         int err = 0;
175         struct nvhost_driver *drv = to_nvhost_driver(dev->dev.driver);
176
177         if (drv->prepare_poweroff
178                         && dev->powerstate != NVHOST_POWER_STATE_POWERGATED) {
179                 /* Clock needs to be on in prepare_poweroff */
180                 to_state_running_locked(dev);
181                 err = drv->prepare_poweroff(dev);
182                 if (err)
183                         return err;
184         }
185
186         if (dev->powerstate == NVHOST_POWER_STATE_RUNNING)
187                 to_state_clockgated_locked(dev);
188
189         if (dev->can_powergate) {
190                 do_powergate_locked(dev->powergate_ids[0]);
191                 do_powergate_locked(dev->powergate_ids[1]);
192         }
193
194         dev->powerstate = NVHOST_POWER_STATE_POWERGATED;
195         return 0;
196 }
197
198 static void schedule_powergating_locked(struct nvhost_device *dev)
199 {
200         if (dev->can_powergate)
201                 schedule_delayed_work(&dev->powerstate_down,
202                                 msecs_to_jiffies(dev->powergate_delay));
203 }
204
205 static void schedule_clockgating_locked(struct nvhost_device *dev)
206 {
207         schedule_delayed_work(&dev->powerstate_down,
208                         msecs_to_jiffies(dev->clockgate_delay));
209 }
210
211 void nvhost_module_busy(struct nvhost_device *dev)
212 {
213         struct nvhost_driver *drv = to_nvhost_driver(dev->dev.driver);
214
215         if (drv->busy)
216                 drv->busy(dev);
217
218         mutex_lock(&dev->lock);
219         cancel_delayed_work(&dev->powerstate_down);
220
221         dev->refcount++;
222         if (unlikely(dev->refcount <= 0))
223                 pr_err("unbalanced refcount %d\n", dev->refcount);
224         if (!nvhost_module_powered(dev))
225                 to_state_running_locked(dev);
226
227         mutex_unlock(&dev->lock);
228 }
229
230 static void powerstate_down_handler(struct work_struct *work)
231 {
232         struct nvhost_device *dev;
233
234         dev = container_of(to_delayed_work(work),
235                         struct nvhost_device,
236                         powerstate_down);
237
238         mutex_lock(&dev->lock);
239         if (dev->refcount == 0) {
240                 switch (dev->powerstate) {
241                 case NVHOST_POWER_STATE_RUNNING:
242                         to_state_clockgated_locked(dev);
243                         schedule_powergating_locked(dev);
244                         break;
245                 case NVHOST_POWER_STATE_CLOCKGATED:
246                         if (to_state_powergated_locked(dev))
247                                 schedule_powergating_locked(dev);
248                         break;
249                 default:
250                         break;
251                 }
252         }
253         mutex_unlock(&dev->lock);
254 }
255
256 void nvhost_module_idle_mult(struct nvhost_device *dev, int refs)
257 {
258         struct nvhost_driver *drv = to_nvhost_driver(dev->dev.driver);
259         bool kick = false;
260
261         mutex_lock(&dev->lock);
262         dev->refcount -= refs;
263         if (dev->refcount == 0) {
264                 if (nvhost_module_powered(dev))
265                         schedule_clockgating_locked(dev);
266                 kick = true;
267         }
268         mutex_unlock(&dev->lock);
269
270         if (kick) {
271                 wake_up(&dev->idle_wq);
272
273                 if (drv->idle)
274                         drv->idle(dev);
275         }
276 }
277
278 int nvhost_module_get_rate(struct nvhost_device *dev, unsigned long *rate,
279                 int index)
280 {
281         struct clk *c;
282
283         c = dev->clk[index];
284         if (IS_ERR_OR_NULL(c))
285                 return -EINVAL;
286
287         /* Need to enable client to get correct rate */
288         nvhost_module_busy(dev);
289         *rate = clk_get_rate(c);
290         nvhost_module_idle(dev);
291         return 0;
292
293 }
294
295 static int nvhost_module_update_rate(struct nvhost_device *dev, int index)
296 {
297         unsigned long rate = 0;
298         struct nvhost_module_client *m;
299
300         if (!dev->clk[index])
301                 return -EINVAL;
302
303         list_for_each_entry(m, &dev->client_list, node) {
304                 rate = max(m->rate[index], rate);
305         }
306         if (!rate)
307                 rate = clk_round_rate(dev->clk[index],
308                                 dev->clocks[index].default_rate);
309
310         return clk_set_rate(dev->clk[index], rate);
311 }
312
313 int nvhost_module_set_rate(struct nvhost_device *dev, void *priv,
314                 unsigned long rate, int index)
315 {
316         struct nvhost_module_client *m;
317         int i, ret = 0;
318
319         mutex_lock(&client_list_lock);
320         list_for_each_entry(m, &dev->client_list, node) {
321                 if (m->priv == priv) {
322                         for (i = 0; i < dev->num_clks; i++)
323                                 m->rate[i] = clk_round_rate(dev->clk[i], rate);
324                         break;
325                 }
326         }
327
328         for (i = 0; i < dev->num_clks; i++) {
329                 ret = nvhost_module_update_rate(dev, i);
330                 if (ret < 0)
331                         break;
332         }
333         mutex_unlock(&client_list_lock);
334         return ret;
335
336 }
337
338 int nvhost_module_add_client(struct nvhost_device *dev, void *priv)
339 {
340         int i;
341         unsigned long rate;
342         struct nvhost_module_client *client;
343
344         client = kzalloc(sizeof(*client), GFP_KERNEL);
345         if (!client)
346                 return -ENOMEM;
347
348         INIT_LIST_HEAD(&client->node);
349         client->priv = priv;
350
351         for (i = 0; i < dev->num_clks; i++) {
352                 rate = clk_round_rate(dev->clk[i],
353                                 dev->clocks[i].default_rate);
354                 client->rate[i] = rate;
355         }
356         mutex_lock(&client_list_lock);
357         list_add_tail(&client->node, &dev->client_list);
358         mutex_unlock(&client_list_lock);
359         return 0;
360 }
361
362 void nvhost_module_remove_client(struct nvhost_device *dev, void *priv)
363 {
364         int i;
365         struct nvhost_module_client *m;
366         int found = 0;
367
368         mutex_lock(&client_list_lock);
369         list_for_each_entry(m, &dev->client_list, node) {
370                 if (priv == m->priv) {
371                         list_del(&m->node);
372                         found = 1;
373                         break;
374                 }
375         }
376         if (found) {
377                 kfree(m);
378                 for (i = 0; i < dev->num_clks; i++)
379                         nvhost_module_update_rate(dev, i);
380         }
381         mutex_unlock(&client_list_lock);
382 }
383
384 static ssize_t refcount_show(struct kobject *kobj,
385         struct kobj_attribute *attr, char *buf)
386 {
387         int ret;
388         struct nvhost_device_power_attr *power_attribute =
389                 container_of(attr, struct nvhost_device_power_attr, \
390                         power_attr[NVHOST_POWER_SYSFS_ATTRIB_REFCOUNT]);
391         struct nvhost_device *dev = power_attribute->ndev;
392
393         mutex_lock(&dev->lock);
394         ret = sprintf(buf, "%d\n", dev->refcount);
395         mutex_unlock(&dev->lock);
396
397         return ret;
398 }
399
400 static ssize_t powergate_delay_store(struct kobject *kobj,
401         struct kobj_attribute *attr, const char *buf, size_t count)
402 {
403         int powergate_delay = 0, ret = 0;
404         struct nvhost_device_power_attr *power_attribute =
405                 container_of(attr, struct nvhost_device_power_attr, \
406                         power_attr[NVHOST_POWER_SYSFS_ATTRIB_POWERGATE_DELAY]);
407         struct nvhost_device *dev = power_attribute->ndev;
408
409         if (!dev->can_powergate) {
410                 dev_info(&dev->dev, "does not support power-gating\n");
411                 return count;
412         }
413
414         mutex_lock(&dev->lock);
415         ret = sscanf(buf, "%d", &powergate_delay);
416         if (ret == 1 && powergate_delay >= 0)
417                 dev->powergate_delay = powergate_delay;
418         else
419                 dev_err(&dev->dev, "Invalid powergate delay\n");
420         mutex_unlock(&dev->lock);
421
422         return count;
423 }
424
425 static ssize_t powergate_delay_show(struct kobject *kobj,
426         struct kobj_attribute *attr, char *buf)
427 {
428         int ret;
429         struct nvhost_device_power_attr *power_attribute =
430                 container_of(attr, struct nvhost_device_power_attr, \
431                         power_attr[NVHOST_POWER_SYSFS_ATTRIB_POWERGATE_DELAY]);
432         struct nvhost_device *dev = power_attribute->ndev;
433
434         mutex_lock(&dev->lock);
435         ret = sprintf(buf, "%d\n", dev->powergate_delay);
436         mutex_unlock(&dev->lock);
437
438         return ret;
439 }
440
441 static ssize_t clockgate_delay_store(struct kobject *kobj,
442         struct kobj_attribute *attr, const char *buf, size_t count)
443 {
444         int clockgate_delay = 0, ret = 0;
445         struct nvhost_device_power_attr *power_attribute =
446                 container_of(attr, struct nvhost_device_power_attr, \
447                         power_attr[NVHOST_POWER_SYSFS_ATTRIB_CLOCKGATE_DELAY]);
448         struct nvhost_device *dev = power_attribute->ndev;
449
450         mutex_lock(&dev->lock);
451         ret = sscanf(buf, "%d", &clockgate_delay);
452         if (ret == 1 && clockgate_delay >= 0)
453                 dev->clockgate_delay = clockgate_delay;
454         else
455                 dev_err(&dev->dev, "Invalid clockgate delay\n");
456         mutex_unlock(&dev->lock);
457
458         return count;
459 }
460
461 static ssize_t clockgate_delay_show(struct kobject *kobj,
462         struct kobj_attribute *attr, char *buf)
463 {
464         int ret;
465         struct nvhost_device_power_attr *power_attribute =
466                 container_of(attr, struct nvhost_device_power_attr, \
467                         power_attr[NVHOST_POWER_SYSFS_ATTRIB_CLOCKGATE_DELAY]);
468         struct nvhost_device *dev = power_attribute->ndev;
469
470         mutex_lock(&dev->lock);
471         ret = sprintf(buf, "%d\n", dev->clockgate_delay);
472         mutex_unlock(&dev->lock);
473
474         return ret;
475 }
476
477 int nvhost_module_init(struct nvhost_device *dev)
478 {
479         int i = 0, err = 0;
480         struct kobj_attribute *attr = NULL;
481
482         /* initialize clocks to known state */
483         INIT_LIST_HEAD(&dev->client_list);
484         while (dev->clocks[i].name && i < NVHOST_MODULE_MAX_CLOCKS) {
485                 char devname[MAX_DEVID_LENGTH];
486                 long rate = dev->clocks[i].default_rate;
487                 struct clk *c;
488
489                 snprintf(devname, MAX_DEVID_LENGTH, "tegra_%s", dev->name);
490                 c = clk_get_sys(devname, dev->clocks[i].name);
491                 if (IS_ERR_OR_NULL(c)) {
492                         dev_err(&dev->dev, "Cannot get clock %s\n",
493                                         dev->clocks[i].name);
494                         continue;
495                 }
496
497                 rate = clk_round_rate(c, rate);
498                 clk_enable(c);
499                 clk_set_rate(c, rate);
500                 clk_disable(c);
501                 dev->clk[i] = c;
502                 i++;
503         }
504         dev->num_clks = i;
505
506         mutex_init(&dev->lock);
507         init_waitqueue_head(&dev->idle_wq);
508         INIT_DELAYED_WORK(&dev->powerstate_down, powerstate_down_handler);
509
510         /* power gate units that we can power gate */
511         if (dev->can_powergate) {
512                 do_powergate_locked(dev->powergate_ids[0]);
513                 do_powergate_locked(dev->powergate_ids[1]);
514                 dev->powerstate = NVHOST_POWER_STATE_POWERGATED;
515         } else {
516                 do_unpowergate_locked(dev->powergate_ids[0]);
517                 do_unpowergate_locked(dev->powergate_ids[1]);
518                 dev->powerstate = NVHOST_POWER_STATE_CLOCKGATED;
519         }
520
521         /* Init the power sysfs attributes for this device */
522         dev->power_attrib = kzalloc(sizeof(struct nvhost_device_power_attr),
523                 GFP_KERNEL);
524         if (!dev->power_attrib) {
525                 dev_err(&dev->dev, "Unable to allocate sysfs attributes\n");
526                 return -ENOMEM;
527         }
528         dev->power_attrib->ndev = dev;
529
530         dev->power_kobj = kobject_create_and_add("acm", &dev->dev.kobj);
531         if (!dev->power_kobj) {
532                 dev_err(&dev->dev, "Could not add dir 'power'\n");
533                 err = -EIO;
534                 goto fail_attrib_alloc;
535         }
536
537         attr = &dev->power_attrib->power_attr[NVHOST_POWER_SYSFS_ATTRIB_CLOCKGATE_DELAY];
538         attr->attr.name = "clockgate_delay";
539         attr->attr.mode = S_IWUSR | S_IRUGO;
540         attr->show = clockgate_delay_show;
541         attr->store = clockgate_delay_store;
542         if (sysfs_create_file(dev->power_kobj, &attr->attr)) {
543                 dev_err(&dev->dev, "Could not create sysfs attribute clockgate_delay\n");
544                 err = -EIO;
545                 goto fail_clockdelay;
546         }
547
548         attr = &dev->power_attrib->power_attr[NVHOST_POWER_SYSFS_ATTRIB_POWERGATE_DELAY];
549         attr->attr.name = "powergate_delay";
550         attr->attr.mode = S_IWUSR | S_IRUGO;
551         attr->show = powergate_delay_show;
552         attr->store = powergate_delay_store;
553         if (sysfs_create_file(dev->power_kobj, &attr->attr)) {
554                 dev_err(&dev->dev, "Could not create sysfs attribute powergate_delay\n");
555                 err = -EIO;
556                 goto fail_powergatedelay;
557         }
558
559         attr = &dev->power_attrib->power_attr[NVHOST_POWER_SYSFS_ATTRIB_REFCOUNT];
560         attr->attr.name = "refcount";
561         attr->attr.mode = S_IRUGO;
562         attr->show = refcount_show;
563         if (sysfs_create_file(dev->power_kobj, &attr->attr)) {
564                 dev_err(&dev->dev, "Could not create sysfs attribute refcount\n");
565                 err = -EIO;
566                 goto fail_refcount;
567         }
568
569         return 0;
570
571 fail_refcount:
572         attr = &dev->power_attrib->power_attr[NVHOST_POWER_SYSFS_ATTRIB_POWERGATE_DELAY];
573         sysfs_remove_file(dev->power_kobj, &attr->attr);
574
575 fail_powergatedelay:
576         attr = &dev->power_attrib->power_attr[NVHOST_POWER_SYSFS_ATTRIB_CLOCKGATE_DELAY];
577         sysfs_remove_file(dev->power_kobj, &attr->attr);
578
579 fail_clockdelay:
580         kobject_put(dev->power_kobj);
581
582 fail_attrib_alloc:
583         kfree(dev->power_attrib);
584
585         return err;
586 }
587
588 static int is_module_idle(struct nvhost_device *dev)
589 {
590         int count;
591         mutex_lock(&dev->lock);
592         count = dev->refcount;
593         mutex_unlock(&dev->lock);
594         return (count == 0);
595 }
596
597 int nvhost_module_suspend(struct nvhost_device *dev)
598 {
599         int ret;
600         struct nvhost_driver *drv = to_nvhost_driver(dev->dev.driver);
601
602         ret = wait_event_timeout(dev->idle_wq, is_module_idle(dev),
603                         ACM_SUSPEND_WAIT_FOR_IDLE_TIMEOUT);
604         if (ret == 0) {
605                 dev_info(&dev->dev, "%s prevented suspend\n",
606                                 dev->name);
607                 return -EBUSY;
608         }
609
610         mutex_lock(&dev->lock);
611         cancel_delayed_work(&dev->powerstate_down);
612         to_state_powergated_locked(dev);
613         mutex_unlock(&dev->lock);
614
615         if (drv->suspend_ndev)
616                 drv->suspend_ndev(dev);
617
618         return 0;
619 }
620
621 void nvhost_module_deinit(struct nvhost_device *dev)
622 {
623         int i;
624         struct nvhost_driver *drv = to_nvhost_driver(dev->dev.driver);
625
626         if (drv->deinit)
627                 drv->deinit(dev);
628
629         nvhost_module_suspend(dev);
630         for (i = 0; i < dev->num_clks; i++)
631                 clk_put(dev->clk[i]);
632         dev->powerstate = NVHOST_POWER_STATE_DEINIT;
633 }
634
635 /* public host1x power management APIs */
636 bool nvhost_module_powered_ext(struct nvhost_device *dev)
637 {
638         return nvhost_module_powered(dev);
639 }
640
641 void nvhost_module_busy_ext(struct nvhost_device *dev)
642 {
643         nvhost_module_busy(dev);
644 }
645
646 void nvhost_module_idle_ext(struct nvhost_device *dev)
647 {
648         nvhost_module_idle(dev);
649 }