Ventana: KBC: Removing the KBC usage on ventana
[linux-2.6.git] / arch / arm / mach-tegra / cpuidle-t3.c
1 /*
2  * arch/arm/mach-tegra/cpuidle-t3.c
3  *
4  * CPU idle driver for Tegra3 CPUs
5  *
6  * Copyright (c) 2010-2011, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/kernel.h>
24 #include <linux/cpu.h>
25 #include <linux/cpuidle.h>
26 #include <linux/debugfs.h>
27 #include <linux/delay.h>
28 #include <linux/hrtimer.h>
29 #include <linux/init.h>
30 #include <linux/interrupt.h>
31 #include <linux/irq.h>
32 #include <linux/io.h>
33 #include <linux/ratelimit.h>
34 #include <linux/sched.h>
35 #include <linux/seq_file.h>
36 #include <linux/slab.h>
37 #include <linux/smp.h>
38 #include <linux/suspend.h>
39 #include <linux/tick.h>
40
41 #include <asm/cacheflush.h>
42 #include <asm/cpu_pm.h>
43 #include <asm/hardware/gic.h>
44 #include <asm/localtimer.h>
45
46 #include <mach/iomap.h>
47 #include <mach/irqs.h>
48
49 #include <trace/events/power.h>
50
51 #include "clock.h"
52 #include "cpuidle.h"
53 #include "dvfs.h"
54 #include "fuse.h"
55 #include "gic.h"
56 #include "pm.h"
57 #include "reset.h"
58 #include "sleep.h"
59 #include "timer.h"
60
61 #define CLK_RST_CONTROLLER_CPU_CMPLX_STATUS \
62         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x470)
63
64 #ifdef CONFIG_SMP
65 static s64 tegra_cpu_wake_by_time[4] = {
66         LLONG_MAX, LLONG_MAX, LLONG_MAX, LLONG_MAX };
67 #endif
68
69 static struct clk *cpu_clk_for_dvfs;
70
71 static struct {
72         unsigned int cpu_ready_count[5];
73         unsigned int tear_down_count[5];
74         unsigned long long cpu_wants_lp2_time[5];
75         unsigned long long in_lp2_time;
76         unsigned int lp2_count;
77         unsigned int lp2_completed_count;
78         unsigned int lp2_count_bin[32];
79         unsigned int lp2_completed_count_bin[32];
80         unsigned int lp2_int_count[NR_IRQS];
81         unsigned int last_lp2_int_count[NR_IRQS];
82 } idle_stats;
83
84 static inline unsigned int time_to_bin(unsigned int time)
85 {
86         return fls(time);
87 }
88
89 static inline void tegra_irq_unmask(int irq)
90 {
91         struct irq_data *data = irq_get_irq_data(irq);
92         data->chip->irq_unmask(data);
93 }
94
95 static inline unsigned int cpu_number(unsigned int n)
96 {
97         return is_lp_cluster() ? 4 : n;
98 }
99
100 void tegra3_cpu_idle_stats_lp2_ready(unsigned int cpu)
101 {
102         idle_stats.cpu_ready_count[cpu_number(cpu)]++;
103 }
104
105 void tegra3_cpu_idle_stats_lp2_time(unsigned int cpu, s64 us)
106 {
107         idle_stats.cpu_wants_lp2_time[cpu_number(cpu)] += us;
108 }
109
110 bool tegra3_lp2_is_allowed(struct cpuidle_device *dev,
111         struct cpuidle_state *state)
112 {
113         s64 request;
114
115         if (!tegra_all_cpus_booted)
116                 return false;
117
118         /* On A01, LP2 on slave CPU's cause ranhdom CPU hangs.
119          * Refer to Bug 804085.
120          */
121         if ((tegra_get_revision() == TEGRA_REVISION_A01) &&
122                 num_online_cpus() > 1)
123                 return false;
124
125         /* FIXME: All CPU's entering LP2 is not working.
126          * Don't let CPU0 enter LP2 when any secondary CPU is online.
127          */
128         if ((dev->cpu == 0) && (num_online_cpus() > 1))
129                 return false;
130
131         if (dev->cpu == 0) {
132                 u32 reg = readl(CLK_RST_CONTROLLER_CPU_CMPLX_STATUS);
133                 if ((reg & 0xE) != 0xE)
134                         return false;
135
136                 if (tegra_dvfs_rail_updating(cpu_clk_for_dvfs))
137                         return false;
138         }
139
140         request = ktime_to_us(tick_nohz_get_sleep_length());
141         if (request < state->target_residency) {
142                 /* Not enough time left to enter LP2 */
143                 return false;
144         }
145
146         return true;
147 }
148
149 static void tegra3_idle_enter_lp2_cpu_0(struct cpuidle_device *dev,
150                            struct cpuidle_state *state, s64 request)
151 {
152         ktime_t entry_time;
153         ktime_t exit_time;
154         bool sleep_completed = false;
155         int bin;
156
157         /* LP2 entry time */
158         entry_time = ktime_get();
159
160         if (request < state->target_residency) {
161                 /* Not enough time left to enter LP2 */
162                 tegra_cpu_wfi();
163                 return;
164         }
165
166 #ifdef CONFIG_SMP
167         if (!is_lp_cluster() && (num_online_cpus() > 1)) {
168                 s64 wake_time;
169                 unsigned int i;
170
171                 /* Disable the distributor -- this is the only way to
172                    prevent the other CPUs from responding to interrupts
173                    and potentially fiddling with the distributor
174                    registers while we're fiddling with them. */
175                 tegra_gic_dist_disable();
176
177                 /* Did an interrupt come in for another CPU before we
178                    could disable the distributor? */
179                 if (!tegra3_lp2_is_allowed(dev, state)) {
180                         /* Yes, re-enable the distributor and LP3. */
181                         tegra_gic_dist_enable();
182                         tegra_cpu_wfi();
183                         return;
184                 }
185
186                 /* Save and disable the affinity setting for the other
187                    CPUs and route all interrupts to CPU0. */
188                 tegra_gic_disable_affinity();
189
190                 /* Re-enable the distributor. */
191                 tegra_gic_dist_enable();
192
193                 /* LP2 initial targeted wake time */
194                 wake_time = ktime_to_us(entry_time) + request;
195
196                 /* CPU0 must wake up before any of the other CPUs. */
197                 smp_rmb();
198                 for (i = 1; i < CONFIG_NR_CPUS; i++)
199                         wake_time = min_t(s64, wake_time,
200                                 tegra_cpu_wake_by_time[i]);
201
202                 /* LP2 actual targeted wake time */
203                 request = wake_time - ktime_to_us(entry_time);
204                 BUG_ON(wake_time < 0LL);
205         }
206 #endif
207
208         if (request > state->target_residency) {
209                 s64 sleep_time = request - tegra_lp2_exit_latency;
210
211                 bin = time_to_bin((u32)request / 1000);
212                 idle_stats.tear_down_count[cpu_number(dev->cpu)]++;
213                 idle_stats.lp2_count++;
214                 idle_stats.lp2_count_bin[bin]++;
215
216                 trace_power_start(POWER_CSTATE, 2, dev->cpu);
217                 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
218
219                 if (tegra_idle_lp2_last(sleep_time, 0) == 0)
220                         sleep_completed = true;
221                 else {
222                         int irq = tegra_gic_pending_interrupt();
223                         idle_stats.lp2_int_count[irq]++;
224                 }
225
226                 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
227         }
228
229 #ifdef CONFIG_SMP
230         if (!is_lp_cluster() && (num_online_cpus() > 1)) {
231
232                 /* Disable the distributor. */
233                 tegra_gic_dist_disable();
234
235                 /* Restore the other CPU's interrupt affinity. */
236                 tegra_gic_restore_affinity();
237
238                 /* Re-enable the distributor. */
239                 tegra_gic_dist_enable();
240         }
241 #endif
242
243         exit_time = ktime_get();
244         if (sleep_completed) {
245                 /*
246                  * Stayed in LP2 for the full time until the next tick,
247                  * adjust the exit latency based on measurement
248                  */
249                 int offset = ktime_to_us(ktime_sub(exit_time, entry_time))
250                         - request;
251                 int latency = tegra_lp2_exit_latency + offset / 16;
252                 latency = clamp(latency, 0, 10000);
253                 tegra_lp2_exit_latency = latency;
254                 smp_wmb();
255
256                 idle_stats.lp2_completed_count++;
257                 idle_stats.lp2_completed_count_bin[bin]++;
258                 idle_stats.in_lp2_time += ktime_to_us(
259                         ktime_sub(exit_time, entry_time));
260
261                 pr_debug("%lld %lld %d %d\n", request,
262                         ktime_to_us(ktime_sub(exit_time, entry_time)),
263                         offset, bin);
264         }
265 }
266
267 static void tegra3_idle_enter_lp2_cpu_n(struct cpuidle_device *dev,
268                            struct cpuidle_state *state, s64 request)
269 {
270 #ifdef CONFIG_SMP
271         s64 sleep_time = request - tegra_lp2_exit_latency;
272         struct tegra_twd_context twd_context;
273
274         if (request < tegra_lp2_exit_latency) {
275                 /*
276                  * Not enough time left to enter LP2
277                  */
278                 tegra_cpu_wfi();
279                 return;
280         }
281
282         tegra_lp2_set_trigger(sleep_time);
283
284         idle_stats.tear_down_count[cpu_number(dev->cpu)]++;
285
286         trace_power_start(POWER_CSTATE, 2, dev->cpu);
287
288         /* Save time this CPU must be awakened by. */
289         tegra_cpu_wake_by_time[dev->cpu] = ktime_to_us(ktime_get()) + request;
290         smp_wmb();
291
292         tegra_twd_suspend(&twd_context);
293
294         tegra3_sleep_cpu_secondary(PLAT_PHYS_OFFSET - PAGE_OFFSET);
295
296         tegra_cpu_wake_by_time[dev->cpu] = LLONG_MAX;
297
298         tegra_twd_resume(&twd_context);
299
300         if (sleep_time)
301                 tegra_lp2_set_trigger(0);
302 #endif
303 }
304
305 void tegra3_idle_lp2(struct cpuidle_device *dev,
306                            struct cpuidle_state *state)
307 {
308         s64 request = ktime_to_us(tick_nohz_get_sleep_length());
309         bool last_cpu = tegra_set_cpu_in_lp2(dev->cpu);
310
311         cpu_pm_enter();
312
313         if (last_cpu && (dev->cpu == 0))
314                 tegra3_idle_enter_lp2_cpu_0(dev, state, request);
315         else
316                 tegra3_idle_enter_lp2_cpu_n(dev, state, request);
317
318         cpu_pm_exit();
319         tegra_clear_cpu_in_lp2(dev->cpu);
320 }
321
322 int tegra_cpudile_init_soc(void)
323 {
324         cpu_clk_for_dvfs = tegra_get_clock_by_name("cpu_g");
325         return 0;
326 }
327
328 #ifdef CONFIG_DEBUG_FS
329 int tegra3_lp2_debug_show(struct seq_file *s, void *data)
330 {
331         int bin;
332         int i;
333         seq_printf(s, "                                    cpu0     cpu1     cpu2     cpu3     cpulp\n");
334         seq_printf(s, "-----------------------------------------------------------------------------\n");
335         seq_printf(s, "cpu ready:                      %8u %8u %8u %8u %8u\n",
336                 idle_stats.cpu_ready_count[0],
337                 idle_stats.cpu_ready_count[1],
338                 idle_stats.cpu_ready_count[2],
339                 idle_stats.cpu_ready_count[3],
340                 idle_stats.cpu_ready_count[4]);
341         seq_printf(s, "tear down:                      %8u %8u %8u %8u %8u\n",
342                 idle_stats.tear_down_count[0],
343                 idle_stats.tear_down_count[1],
344                 idle_stats.tear_down_count[2],
345                 idle_stats.tear_down_count[3],
346                 idle_stats.tear_down_count[4]);
347         seq_printf(s, "lp2:            %8u\n", idle_stats.lp2_count);
348         seq_printf(s, "lp2 completed:  %8u %7u%%\n",
349                 idle_stats.lp2_completed_count,
350                 idle_stats.lp2_completed_count * 100 /
351                         (idle_stats.lp2_count ?: 1));
352
353         seq_printf(s, "\n");
354         seq_printf(s, "cpu ready time:                 %8llu %8llu %8llu %8llu %8llu ms\n",
355                 div64_u64(idle_stats.cpu_wants_lp2_time[0], 1000),
356                 div64_u64(idle_stats.cpu_wants_lp2_time[1], 1000),
357                 div64_u64(idle_stats.cpu_wants_lp2_time[2], 1000),
358                 div64_u64(idle_stats.cpu_wants_lp2_time[3], 1000),
359                 div64_u64(idle_stats.cpu_wants_lp2_time[4], 1000));
360
361         seq_printf(s, "lp2 time:       %8llu ms      %7d%% %7d%% %7d%% %7d%% %7d%%\n",
362                 div64_u64(idle_stats.in_lp2_time, 1000),
363                 (int)(idle_stats.cpu_wants_lp2_time[0] ?
364                         div64_u64(idle_stats.in_lp2_time * 100,
365                         idle_stats.cpu_wants_lp2_time[0]) : 0),
366                 (int)(idle_stats.cpu_wants_lp2_time[1] ?
367                         div64_u64(idle_stats.in_lp2_time * 100,
368                         idle_stats.cpu_wants_lp2_time[1]) : 0),
369                 (int)(idle_stats.cpu_wants_lp2_time[2] ?
370                         div64_u64(idle_stats.in_lp2_time * 100,
371                         idle_stats.cpu_wants_lp2_time[2]) : 0),
372                 (int)(idle_stats.cpu_wants_lp2_time[3] ?
373                         div64_u64(idle_stats.in_lp2_time * 100,
374                         idle_stats.cpu_wants_lp2_time[3]) : 0),
375                 (int)(idle_stats.cpu_wants_lp2_time[4] ?
376                         div64_u64(idle_stats.in_lp2_time * 100,
377                         idle_stats.cpu_wants_lp2_time[4]) : 0));
378         seq_printf(s, "\n");
379
380         seq_printf(s, "%19s %8s %8s %8s\n", "", "lp2", "comp", "%");
381         seq_printf(s, "-------------------------------------------------\n");
382         for (bin = 0; bin < 32; bin++) {
383                 if (idle_stats.lp2_count_bin[bin] == 0)
384                         continue;
385                 seq_printf(s, "%6u - %6u ms: %8u %8u %7u%%\n",
386                         1 << (bin - 1), 1 << bin,
387                         idle_stats.lp2_count_bin[bin],
388                         idle_stats.lp2_completed_count_bin[bin],
389                         idle_stats.lp2_completed_count_bin[bin] * 100 /
390                                 idle_stats.lp2_count_bin[bin]);
391         }
392
393         seq_printf(s, "\n");
394         seq_printf(s, "%3s %20s %6s %10s\n",
395                 "int", "name", "count", "last count");
396         seq_printf(s, "--------------------------------------------\n");
397         for (i = 0; i < NR_IRQS; i++) {
398                 if (idle_stats.lp2_int_count[i] == 0)
399                         continue;
400                 seq_printf(s, "%3d %20s %6d %10d\n",
401                         i, irq_to_desc(i)->action ?
402                                 irq_to_desc(i)->action->name ?: "???" : "???",
403                         idle_stats.lp2_int_count[i],
404                         idle_stats.lp2_int_count[i] -
405                                 idle_stats.last_lp2_int_count[i]);
406                 idle_stats.last_lp2_int_count[i] = idle_stats.lp2_int_count[i];
407         };
408         return 0;
409 }
410 #endif