video: tegra: host: Move device data to nvhost_device
[linux-2.6.git] / drivers / video / tegra / host / nvhost_acm.c
1 /*
2  * drivers/video/tegra/host/nvhost_acm.c
3  *
4  * Tegra Graphics Host Automatic Clock Management
5  *
6  * Copyright (c) 2010-2011, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include "nvhost_acm.h"
24 #include "dev.h"
25 #include <linux/slab.h>
26 #include <linux/string.h>
27 #include <linux/sched.h>
28 #include <linux/err.h>
29 #include <linux/device.h>
30 #include <linux/delay.h>
31 #include <mach/powergate.h>
32 #include <mach/clk.h>
33 #include <mach/hardware.h>
34
35 #define ACM_SUSPEND_WAIT_FOR_IDLE_TIMEOUT (2 * HZ)
36 #define POWERGATE_DELAY 10
37 #define MAX_DEVID_LENGTH 16
38
39 DEFINE_MUTEX(client_list_lock);
40
41 struct nvhost_module_client {
42         struct list_head node;
43         unsigned long rate[NVHOST_MODULE_MAX_CLOCKS];
44         void *priv;
45 };
46
47 static void do_powergate_locked(int id)
48 {
49         if (id != -1 && tegra_powergate_is_powered(id))
50                 tegra_powergate_partition(id);
51 }
52
53 static void do_unpowergate_locked(int id)
54 {
55         if (id != -1)
56                 tegra_unpowergate_partition(id);
57 }
58
59 void nvhost_module_reset(struct nvhost_device *dev)
60 {
61         dev_dbg(&dev->dev,
62                 "%s: asserting %s module reset (id %d, id2 %d)\n",
63                 __func__, dev->name,
64                 dev->powergate_ids[0], dev->powergate_ids[1]);
65
66         mutex_lock(&dev->lock);
67
68         /* assert module and mc client reset */
69         if (dev->powergate_ids[0] != -1) {
70                 tegra_powergate_mc_disable(dev->powergate_ids[0]);
71                 tegra_periph_reset_assert(dev->clk[0]);
72                 tegra_powergate_mc_flush(dev->powergate_ids[0]);
73         }
74         if (dev->powergate_ids[1] != -1) {
75                 tegra_powergate_mc_disable(dev->powergate_ids[1]);
76                 tegra_periph_reset_assert(dev->clk[1]);
77                 tegra_powergate_mc_flush(dev->powergate_ids[1]);
78         }
79
80         udelay(POWERGATE_DELAY);
81
82         /* deassert reset */
83         if (dev->powergate_ids[0] != -1) {
84                 tegra_powergate_mc_flush_done(dev->powergate_ids[0]);
85                 tegra_periph_reset_deassert(dev->clk[0]);
86                 tegra_powergate_mc_enable(dev->powergate_ids[0]);
87         }
88         if (dev->powergate_ids[1] != -1) {
89                 tegra_powergate_mc_flush_done(dev->powergate_ids[1]);
90                 tegra_periph_reset_deassert(dev->clk[1]);
91                 tegra_powergate_mc_enable(dev->powergate_ids[1]);
92         }
93
94         mutex_unlock(&dev->lock);
95
96         dev_dbg(&dev->dev, "%s: module %s out of reset\n",
97                 __func__, dev->name);
98 }
99
100 static void to_state_clockgated_locked(struct nvhost_device *dev)
101 {
102         if (dev->powerstate == NVHOST_POWER_STATE_RUNNING) {
103                 int i;
104                 for (i = 0; i < dev->num_clks; i++)
105                         clk_disable(dev->clk[i]);
106                 if (dev->dev.parent)
107                         nvhost_module_idle(to_nvhost_device(dev->dev.parent));
108         } else if (dev->powerstate == NVHOST_POWER_STATE_POWERGATED
109                         && dev->can_powergate) {
110                 do_unpowergate_locked(dev->powergate_ids[0]);
111                 do_unpowergate_locked(dev->powergate_ids[1]);
112         }
113         dev->powerstate = NVHOST_POWER_STATE_CLOCKGATED;
114 }
115
116 static void to_state_running_locked(struct nvhost_device *dev)
117 {
118         int prev_state = dev->powerstate;
119         if (dev->powerstate == NVHOST_POWER_STATE_POWERGATED)
120                 to_state_clockgated_locked(dev);
121         if (dev->powerstate == NVHOST_POWER_STATE_CLOCKGATED) {
122                 int i;
123
124                 if (dev->dev.parent)
125                         nvhost_module_busy(to_nvhost_device(dev->dev.parent));
126
127                 for (i = 0; i < dev->num_clks; i++) {
128                         int err = clk_enable(dev->clk[i]);
129                         BUG_ON(err);
130                 }
131
132                 if (prev_state == NVHOST_POWER_STATE_POWERGATED
133                                 && dev->finalize_poweron)
134                         dev->finalize_poweron(dev);
135         }
136         dev->powerstate = NVHOST_POWER_STATE_RUNNING;
137 }
138
139 /* This gets called from powergate_handler() and from module suspend.
140  * Module suspend is done for all modules, runtime power gating only
141  * for modules with can_powergate set.
142  */
143 static int to_state_powergated_locked(struct nvhost_device *dev)
144 {
145         int err = 0;
146
147         if (dev->prepare_poweroff
148                         && dev->powerstate != NVHOST_POWER_STATE_POWERGATED) {
149                 /* Clock needs to be on in prepare_poweroff */
150                 to_state_running_locked(dev);
151                 err = dev->prepare_poweroff(dev);
152                 if (err)
153                         return err;
154         }
155
156         if (dev->powerstate == NVHOST_POWER_STATE_RUNNING)
157                 to_state_clockgated_locked(dev);
158
159         if (dev->can_powergate) {
160                 do_powergate_locked(dev->powergate_ids[0]);
161                 do_powergate_locked(dev->powergate_ids[1]);
162         }
163
164         dev->powerstate = NVHOST_POWER_STATE_POWERGATED;
165         return 0;
166 }
167
168 static void schedule_powergating_locked(struct nvhost_device *dev)
169 {
170         if (dev->can_powergate)
171                 schedule_delayed_work(&dev->powerstate_down,
172                                 msecs_to_jiffies(dev->powergate_delay));
173 }
174
175 static void schedule_clockgating_locked(struct nvhost_device *dev)
176 {
177         schedule_delayed_work(&dev->powerstate_down,
178                         msecs_to_jiffies(dev->clockgate_delay));
179 }
180
181 void nvhost_module_busy(struct nvhost_device *dev)
182 {
183         if (dev->busy)
184                 dev->busy(dev);
185
186         mutex_lock(&dev->lock);
187         cancel_delayed_work(&dev->powerstate_down);
188
189         dev->refcount++;
190         if (dev->refcount > 0 && !nvhost_module_powered(dev))
191                 to_state_running_locked(dev);
192         mutex_unlock(&dev->lock);
193 }
194
195 static void powerstate_down_handler(struct work_struct *work)
196 {
197         struct nvhost_device *dev;
198
199         dev = container_of(to_delayed_work(work),
200                         struct nvhost_device,
201                         powerstate_down);
202
203         mutex_lock(&dev->lock);
204         if (dev->refcount == 0) {
205                 switch (dev->powerstate) {
206                 case NVHOST_POWER_STATE_RUNNING:
207                         to_state_clockgated_locked(dev);
208                         schedule_powergating_locked(dev);
209                         break;
210                 case NVHOST_POWER_STATE_CLOCKGATED:
211                         if (to_state_powergated_locked(dev))
212                                 schedule_powergating_locked(dev);
213                         break;
214                 default:
215                         break;
216                 }
217         }
218         mutex_unlock(&dev->lock);
219 }
220
221
222 void nvhost_module_idle_mult(struct nvhost_device *dev, int refs)
223 {
224         bool kick = false;
225
226         mutex_lock(&dev->lock);
227         dev->refcount -= refs;
228         if (dev->refcount == 0) {
229                 if (nvhost_module_powered(dev))
230                         schedule_clockgating_locked(dev);
231                 kick = true;
232         }
233         mutex_unlock(&dev->lock);
234
235         if (kick) {
236                 wake_up(&dev->idle_wq);
237
238                 if (dev->idle)
239                         dev->idle(dev);
240         }
241 }
242
243 int nvhost_module_get_rate(struct nvhost_device *dev, unsigned long *rate,
244                 int index)
245 {
246         struct clk *c;
247
248         c = dev->clk[index];
249         if (IS_ERR_OR_NULL(c))
250                 return -EINVAL;
251
252         /* Need to enable client to get correct rate */
253         nvhost_module_busy(dev);
254         *rate = clk_get_rate(c);
255         nvhost_module_idle(dev);
256         return 0;
257
258 }
259
260 static int nvhost_module_update_rate(struct nvhost_device *dev, int index)
261 {
262         unsigned long rate = 0;
263         struct nvhost_module_client *m;
264
265         if (!dev->clk[index])
266                 return -EINVAL;
267
268         list_for_each_entry(m, &dev->client_list, node) {
269                 rate = max(m->rate[index], rate);
270         }
271         if (!rate)
272                 rate = clk_round_rate(dev->clk[index],
273                                 dev->clocks[index].default_rate);
274
275         return clk_set_rate(dev->clk[index], rate);
276 }
277
278 int nvhost_module_set_rate(struct nvhost_device *dev, void *priv,
279                 unsigned long rate, int index)
280 {
281         struct nvhost_module_client *m;
282         int ret;
283
284         mutex_lock(&client_list_lock);
285         list_for_each_entry(m, &dev->client_list, node) {
286                 if (m->priv == priv) {
287                         rate = clk_round_rate(dev->clk[index], rate);
288                         m->rate[index] = rate;
289                         break;
290                 }
291         }
292         ret = nvhost_module_update_rate(dev, index);
293         mutex_unlock(&client_list_lock);
294         return ret;
295
296 }
297
298 int nvhost_module_add_client(struct nvhost_device *dev, void *priv)
299 {
300         int i;
301         unsigned long rate;
302         struct nvhost_module_client *client;
303
304         client = kzalloc(sizeof(*client), GFP_KERNEL);
305         if (!client)
306                 return -ENOMEM;
307
308         INIT_LIST_HEAD(&client->node);
309         client->priv = priv;
310
311         for (i = 0; i < dev->num_clks; i++) {
312                 rate = clk_round_rate(dev->clk[i],
313                                 dev->clocks[i].default_rate);
314                 client->rate[i] = rate;
315         }
316         mutex_lock(&client_list_lock);
317         list_add_tail(&client->node, &dev->client_list);
318         mutex_unlock(&client_list_lock);
319         return 0;
320 }
321
322 void nvhost_module_remove_client(struct nvhost_device *dev, void *priv)
323 {
324         int i;
325         struct nvhost_module_client *m;
326
327         mutex_lock(&client_list_lock);
328         list_for_each_entry(m, &dev->client_list, node) {
329                 if (priv == m->priv) {
330                         list_del(&m->node);
331                         break;
332                 }
333         }
334         if (m) {
335                 kfree(m);
336                 for (i = 0; i < dev->num_clks; i++)
337                         nvhost_module_update_rate(dev, i);
338         }
339         mutex_unlock(&client_list_lock);
340 }
341
342 void nvhost_module_preinit(struct nvhost_device *dev)
343 {
344         int i = 0;
345
346         /* initialize clocks to known state */
347         while (dev->clocks[i].name && i < NVHOST_MODULE_MAX_CLOCKS) {
348                 char devname[MAX_DEVID_LENGTH];
349                 long rate = dev->clocks[i].default_rate;
350                 struct clk *c;
351
352                 snprintf(devname, MAX_DEVID_LENGTH, "tegra_%s", dev->name);
353                 c = clk_get_sys(devname, dev->clocks[i].name);
354                 BUG_ON(IS_ERR_OR_NULL(c));
355
356                 rate = clk_round_rate(c, rate);
357                 clk_enable(c);
358                 clk_set_rate(c, rate);
359                 clk_disable(c);
360                 i++;
361         }
362
363         if (dev->can_powergate) {
364                 do_powergate_locked(dev->powergate_ids[0]);
365                 do_powergate_locked(dev->powergate_ids[1]);
366         } else {
367                 do_unpowergate_locked(dev->powergate_ids[0]);
368                 do_unpowergate_locked(dev->powergate_ids[1]);
369         }
370 }
371
372 int nvhost_module_init(struct nvhost_device *dev)
373 {
374         int i = 0;
375
376         nvhost_module_preinit(dev);
377
378         INIT_LIST_HEAD(&dev->client_list);
379         while (dev->clocks[i].name && i < NVHOST_MODULE_MAX_CLOCKS) {
380                 char devname[MAX_DEVID_LENGTH];
381
382                 snprintf(devname, MAX_DEVID_LENGTH, "tegra_%s", dev->name);
383                 dev->clk[i] = clk_get_sys(devname, dev->clocks[i].name);
384                 BUG_ON(IS_ERR_OR_NULL(dev->clk[i]));
385                 i++;
386         }
387         dev->num_clks = i;
388
389         mutex_init(&dev->lock);
390         init_waitqueue_head(&dev->idle_wq);
391         INIT_DELAYED_WORK(&dev->powerstate_down, powerstate_down_handler);
392
393         if (dev->can_powergate)
394                 dev->powerstate = NVHOST_POWER_STATE_POWERGATED;
395         else
396                 dev->powerstate = NVHOST_POWER_STATE_CLOCKGATED;
397
398         if (dev->init)
399                 dev->init(dev);
400
401         return 0;
402 }
403
404 static int is_module_idle(struct nvhost_device *dev)
405 {
406         int count;
407         mutex_lock(&dev->lock);
408         count = dev->refcount;
409         mutex_unlock(&dev->lock);
410         return (count == 0);
411 }
412
413 static void debug_not_idle(struct nvhost_master *host)
414 {
415         int i;
416         bool lock_released = true;
417
418         for (i = 0; i < host->nb_channels; i++) {
419                 struct nvhost_device *dev = host->channels[i].dev;
420                 mutex_lock(&dev->lock);
421                 if (dev->name)
422                         dev_warn(&host->pdev->dev,
423                                         "tegra_grhost: %s: refcnt %d\n",
424                                         dev->name, dev->refcount);
425                 mutex_unlock(&dev->lock);
426         }
427
428         for (i = 0; i < host->nb_mlocks; i++) {
429                 int c = atomic_read(&host->cpuaccess.lock_counts[i]);
430                 if (c) {
431                         dev_warn(&host->pdev->dev,
432                                 "tegra_grhost: lock id %d: refcnt %d\n",
433                                 i, c);
434                         lock_released = false;
435                 }
436         }
437         if (lock_released)
438                 dev_dbg(&host->pdev->dev, "tegra_grhost: all locks released\n");
439 }
440
441 int nvhost_module_suspend(struct nvhost_device *dev, bool system_suspend)
442 {
443         int ret;
444         struct nvhost_master *host;
445
446         if (system_suspend) {
447                 host = dev->host;
448                 if (!is_module_idle(dev))
449                         debug_not_idle(host);
450         } else {
451                 host = dev->host;
452         }
453
454         ret = wait_event_timeout(dev->idle_wq, is_module_idle(dev),
455                         ACM_SUSPEND_WAIT_FOR_IDLE_TIMEOUT);
456         if (ret == 0) {
457                 dev_info(&dev->dev, "%s prevented suspend\n",
458                                 dev->name);
459                 return -EBUSY;
460         }
461
462         if (system_suspend)
463                 dev_dbg(&dev->dev, "tegra_grhost: entered idle\n");
464
465         mutex_lock(&dev->lock);
466         cancel_delayed_work(&dev->powerstate_down);
467         to_state_powergated_locked(dev);
468         mutex_unlock(&dev->lock);
469
470         if (dev->suspend)
471                 dev->suspend(dev);
472
473         return 0;
474 }
475
476 void nvhost_module_deinit(struct nvhost_device *dev)
477 {
478         int i;
479
480         if (dev->deinit)
481                 dev->deinit(dev);
482
483         nvhost_module_suspend(dev, false);
484         for (i = 0; i < dev->num_clks; i++)
485                 clk_put(dev->clk[i]);
486         dev->powerstate = NVHOST_POWER_STATE_DEINIT;
487 }
488