ARM: tegra: fix dalmore-t114 compilation errors
[linux-3.10.git] / arch / arm / mach-tegra / latency_allowance.c
1 /*
2  * arch/arm/mach-tegra/latency_allowance.c
3  *
4  * Copyright (C) 2011-2012, NVIDIA CORPORATION. All rights reserved.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #include <linux/types.h>
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/debugfs.h>
21 #include <linux/seq_file.h>
22 #include <linux/err.h>
23 #include <linux/spinlock_types.h>
24 #include <linux/spinlock.h>
25 #include <linux/stringify.h>
26 #include <asm/bug.h>
27 #include <asm/io.h>
28 #include <asm/string.h>
29 #include <mach/iomap.h>
30 #include <mach/io.h>
31 #include <mach/latency_allowance.h>
32 #include "la_priv_common.h"
33 #ifdef CONFIG_ARCH_TEGRA_3x_SOC
34 #include "tegra3_la_priv.h"
35 #else
36 #include "tegra11_la_priv.h"
37 #endif
38
39 #define ENABLE_LA_DEBUG         0
40 #define TEST_LA_CODE            0
41
42 #define la_debug(fmt, ...) \
43         if (ENABLE_LA_DEBUG) { \
44                 printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__); \
45         }
46
47 /* Bug 995270 */
48 #define HACK_LA_FIFO 1
49
50 #if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
51 static DEFINE_SPINLOCK(safety_lock);
52 static struct dentry *latency_debug_dir;
53 static int la_scaling_enable_count;
54 static unsigned short id_to_index[ID(MAX_ID) + 1];
55 static struct la_scaling_info scaling_info[TEGRA_LA_MAX_ID];
56
57 #define VALIDATE_ID(id) \
58         do { \
59                 if (id >= TEGRA_LA_MAX_ID || id_to_index[id] == 0xFFFF) { \
60                         pr_err("%s: invalid Id=%d", __func__, id); \
61                         return -EINVAL; \
62                 } \
63                 BUG_ON(la_info_array[id_to_index[id]].id != id); \
64         } while (0)
65
66 #define VALIDATE_BW(bw_in_mbps) \
67         do { \
68                 if (bw_in_mbps >= 4096) \
69                         return -EINVAL; \
70         } while (0)
71
72 #define VALIDATE_THRESHOLDS(tl, tm, th) \
73         do { \
74                 if (tl > 100 || tm > 100 || th > 100) \
75                         return -EINVAL; \
76         } while (0)
77
78 static void set_thresholds(struct la_scaling_reg_info *info,
79                             enum tegra_la_id id)
80 {
81         unsigned long reg_read;
82         unsigned long reg_write;
83         unsigned int thresh_low;
84         unsigned int thresh_mid;
85         unsigned int thresh_high;
86         int la_set;
87         int idx = id_to_index[id];
88
89         reg_read = readl(la_info_array[idx].reg_addr);
90         la_set = (reg_read & la_info_array[idx].mask) >>
91                  la_info_array[idx].shift;
92         /* la should be set before enabling scaling. */
93         BUG_ON(la_set != scaling_info[idx].la_set);
94
95         thresh_low = (scaling_info[idx].threshold_low * la_set) / 100;
96         thresh_mid = (scaling_info[idx].threshold_mid * la_set) / 100;
97         thresh_high = (scaling_info[idx].threshold_high * la_set) / 100;
98         la_debug("%s: la_set=%d, thresh_low=%d(%d%%), thresh_mid=%d(%d%%),"
99                 " thresh_high=%d(%d%%) ", __func__, la_set,
100                 thresh_low, scaling_info[idx].threshold_low,
101                 thresh_mid, scaling_info[idx].threshold_mid,
102                 thresh_high, scaling_info[idx].threshold_high);
103
104         reg_read = readl(info->tl_reg_addr);
105         reg_write = (reg_read & ~info->tl_mask) |
106                 (thresh_low << info->tl_shift);
107         writel(reg_write, info->tl_reg_addr);
108         la_debug("reg_addr=0x%x, read=0x%x, write=0x%x",
109                 (u32)info->tl_reg_addr, (u32)reg_read, (u32)reg_write);
110
111         reg_read = readl(info->tm_reg_addr);
112         reg_write = (reg_read & ~info->tm_mask) |
113                 (thresh_mid << info->tm_shift);
114         writel(reg_write, info->tm_reg_addr);
115         la_debug("reg_addr=0x%x, read=0x%x, write=0x%x",
116                 (u32)info->tm_reg_addr, (u32)reg_read, (u32)reg_write);
117
118         reg_read = readl(info->th_reg_addr);
119         reg_write = (reg_read & ~info->th_mask) |
120                 (thresh_high << info->th_shift);
121         writel(reg_write, info->th_reg_addr);
122         la_debug("reg_addr=0x%x, read=0x%x, write=0x%x",
123                 (u32)info->th_reg_addr, (u32)reg_read, (u32)reg_write);
124 }
125
126 static void set_disp_latency_thresholds(enum tegra_la_id id)
127 {
128         set_thresholds(&disp_info[id - ID(DISPLAY_0A)], id);
129 }
130
131 static void set_vi_latency_thresholds(enum tegra_la_id id)
132 {
133         set_thresholds(&vi_info[id - ID(VI_WSB)], id);
134 }
135
136 /* Sets latency allowance based on clients memory bandwitdh requirement.
137  * Bandwidth passed is in mega bytes per second.
138  */
139 int tegra_set_latency_allowance(enum tegra_la_id id,
140                                 unsigned int bandwidth_in_mbps)
141 {
142         int ideal_la;
143         int la_to_set;
144         unsigned long reg_read;
145         unsigned long reg_write;
146         unsigned int fifo_size_in_atoms;
147         int bytes_per_atom = normal_atom_size;
148         const int fifo_scale = 4;               /* 25% of the FIFO */
149         struct la_client_info *ci;
150         int idx = id_to_index[id];
151
152         VALIDATE_ID(id);
153         VALIDATE_BW(bandwidth_in_mbps);
154
155         ci = &la_info_array[idx];
156         fifo_size_in_atoms = ci->fifo_size_in_atoms;
157
158 #if HACK_LA_FIFO
159         /* pretend that our FIFO is only as deep as the lowest fullness
160          * we expect to see */
161         if (id >= ID(DISPLAY_0A) && id <= ID(DISPLAY_HCB))
162                 fifo_size_in_atoms /= fifo_scale;
163 #endif
164
165         if (bandwidth_in_mbps == 0) {
166                 la_to_set = MC_LA_MAX_VALUE;
167         } else {
168                 ideal_la = (fifo_size_in_atoms * bytes_per_atom * 1000) /
169                            (bandwidth_in_mbps * ns_per_tick);
170                 la_to_set = ideal_la - (ci->expiration_in_ns/ns_per_tick) - 1;
171         }
172
173         la_debug("\n%s:id=%d,idx=%d, bw=%dmbps, la_to_set=%d",
174                 __func__, id, idx, bandwidth_in_mbps, la_to_set);
175         la_to_set = (la_to_set < 0) ? 0 : la_to_set;
176         la_to_set = (la_to_set > MC_LA_MAX_VALUE) ? MC_LA_MAX_VALUE : la_to_set;
177         scaling_info[idx].actual_la_to_set = la_to_set;
178
179         spin_lock(&safety_lock);
180         reg_read = readl(ci->reg_addr);
181         reg_write = (reg_read & ~ci->mask) |
182                         (la_to_set << ci->shift);
183         writel(reg_write, ci->reg_addr);
184         scaling_info[idx].la_set = la_to_set;
185         la_debug("reg_addr=0x%x, read=0x%x, write=0x%x",
186                 (u32)ci->reg_addr, (u32)reg_read, (u32)reg_write);
187         spin_unlock(&safety_lock);
188         return 0;
189 }
190
191 /* Thresholds for scaling are specified in % of fifo freeness.
192  * If threshold_low is specified as 20%, it means when the fifo free
193  * between 0 to 20%, use la as programmed_la.
194  * If threshold_mid is specified as 50%, it means when the fifo free
195  * between 20 to 50%, use la as programmed_la/2 .
196  * If threshold_high is specified as 80%, it means when the fifo free
197  * between 50 to 80%, use la as programmed_la/4.
198  * When the fifo is free between 80 to 100%, use la as 0(highest priority).
199  */
200 int tegra_enable_latency_scaling(enum tegra_la_id id,
201                                     unsigned int threshold_low,
202                                     unsigned int threshold_mid,
203                                     unsigned int threshold_high)
204 {
205         unsigned long reg;
206         void __iomem *scaling_enable_reg = (void __iomem *)(MC_RA(ARB_OVERRIDE));
207         int idx = id_to_index[id];
208
209         VALIDATE_ID(id);
210         VALIDATE_THRESHOLDS(threshold_low, threshold_mid, threshold_high);
211
212         if (la_info_array[idx].scaling_supported == false)
213                 goto exit;
214
215         spin_lock(&safety_lock);
216
217         la_debug("\n%s: id=%d, tl=%d, tm=%d, th=%d", __func__,
218                 id, threshold_low, threshold_mid, threshold_high);
219         scaling_info[idx].threshold_low = threshold_low;
220         scaling_info[idx].threshold_mid = threshold_mid;
221         scaling_info[idx].threshold_high = threshold_high;
222         scaling_info[idx].scaling_ref_count++;
223
224         if (id >= ID(DISPLAY_0A) && id <= ID(DISPLAY_1BB))
225                 set_disp_latency_thresholds(id);
226         else if (id >= ID(VI_WSB) && id <= ID(VI_WY))
227                 set_vi_latency_thresholds(id);
228         if (!la_scaling_enable_count++) {
229                 reg = readl(scaling_enable_reg);
230                 reg |= (1 << GLOBAL_LATENCY_SCALING_ENABLE_BIT);
231                 writel(reg,  scaling_enable_reg);
232                 la_debug("enabled scaling.");
233         }
234         spin_unlock(&safety_lock);
235 exit:
236         return 0;
237 }
238
239 void tegra_disable_latency_scaling(enum tegra_la_id id)
240 {
241         unsigned long reg;
242         void __iomem *scaling_enable_reg = (void __iomem *)(MC_RA(ARB_OVERRIDE));
243         int idx;
244
245         BUG_ON(id >= TEGRA_LA_MAX_ID);
246         idx = id_to_index[id];
247         BUG_ON(la_info_array[idx].id != id);
248
249         if (la_info_array[idx].scaling_supported == false)
250                 return;
251         spin_lock(&safety_lock);
252         la_debug("\n%s: id=%d", __func__, id);
253         scaling_info[idx].scaling_ref_count--;
254         BUG_ON(scaling_info[idx].scaling_ref_count < 0);
255
256         if (!--la_scaling_enable_count) {
257                 reg = readl(scaling_enable_reg);
258                 reg = reg & ~(1 << GLOBAL_LATENCY_SCALING_ENABLE_BIT);
259                 writel(reg, scaling_enable_reg);
260                 la_debug("disabled scaling.");
261         }
262         spin_unlock(&safety_lock);
263 }
264
265 void tegra_latency_allowance_update_tick_length(unsigned int new_ns_per_tick)
266 {
267         int i = 0;
268         int la;
269         unsigned long reg_read;
270         unsigned long reg_write;
271         unsigned long scale_factor = new_ns_per_tick / ns_per_tick;
272
273         if (scale_factor > 1) {
274                 spin_lock(&safety_lock);
275                 ns_per_tick = new_ns_per_tick;
276                 for (i = 0; i < ARRAY_SIZE(la_info_array) - 1; i++) {
277                         reg_read = readl(la_info_array[i].reg_addr);
278                         la = ((reg_read & la_info_array[i].mask) >>
279                                 la_info_array[i].shift) / scale_factor;
280
281                         reg_write = (reg_read & ~la_info_array[i].mask) |
282                                         (la << la_info_array[i].shift);
283                         writel(reg_write, la_info_array[i].reg_addr);
284                         scaling_info[i].la_set = la;
285                 }
286                 spin_unlock(&safety_lock);
287
288                 /* Re-scale G2PR, G2SR, G2DR, G2DW with updated ns_per_tick */
289                 tegra_set_latency_allowance(TEGRA_LA_G2PR, 20);
290                 tegra_set_latency_allowance(TEGRA_LA_G2SR, 20);
291                 tegra_set_latency_allowance(TEGRA_LA_G2DR, 20);
292                 tegra_set_latency_allowance(TEGRA_LA_G2DW, 20);
293         }
294 }
295
296 static int la_regs_show(struct seq_file *s, void *unused)
297 {
298         unsigned i;
299         unsigned long la;
300
301         /* iterate the list, but don't print MAX_ID */
302         for (i = 0; i < ARRAY_SIZE(la_info_array) - 1; i++) {
303                 la = (readl(la_info_array[i].reg_addr) & la_info_array[i].mask)
304                         >> la_info_array[i].shift;
305                 seq_printf(s, "%-16s: %4lu\n", la_info_array[i].name, la);
306         }
307
308         return 0;
309 }
310
311 static int dbg_la_regs_open(struct inode *inode, struct file *file)
312 {
313         return single_open(file, la_regs_show, inode->i_private);
314 }
315
316 static const struct file_operations regs_fops = {
317         .open           = dbg_la_regs_open,
318         .read           = seq_read,
319         .llseek         = seq_lseek,
320         .release        = single_release,
321 };
322
323 static int __init tegra_latency_allowance_debugfs_init(void)
324 {
325         if (latency_debug_dir)
326                 return 0;
327
328         latency_debug_dir = debugfs_create_dir("tegra_latency", NULL);
329
330         debugfs_create_file("la_info", S_IRUGO, latency_debug_dir, NULL,
331                 &regs_fops);
332
333         return 0;
334 }
335
336 late_initcall(tegra_latency_allowance_debugfs_init);
337
338 static int __init tegra_latency_allowance_init(void)
339 {
340         unsigned int i;
341
342         la_scaling_enable_count = 0;
343         memset(&id_to_index[0], 0xFF, sizeof(id_to_index));
344
345         for (i = 0; i < ARRAY_SIZE(la_info_array); i++)
346                 id_to_index[la_info_array[i].id] = i;
347
348         tegra_set_latency_allowance(TEGRA_LA_G2PR, 20);
349         tegra_set_latency_allowance(TEGRA_LA_G2SR, 20);
350         tegra_set_latency_allowance(TEGRA_LA_G2DR, 20);
351         tegra_set_latency_allowance(TEGRA_LA_G2DW, 20);
352         return 0;
353 }
354
355 core_initcall(tegra_latency_allowance_init);
356
357 #if TEST_LA_CODE
358 #define PRINT_ID_IDX_MAPPING 0
359 static int __init test_la(void)
360 {
361         int i;
362         int err;
363         enum tegra_la_id id = 0;
364         int repeat_count = 5;
365
366 #if PRINT_ID_IDX_MAPPING
367         for (i = 0; i < ID(MAX_ID); i++)
368                 pr_info("ID=0x%x, Idx=0x%x", i, id_to_index[i]);
369 #endif
370
371         do {
372                 for (id = 0; id < TEGRA_LA_MAX_ID; id++) {
373                         err = tegra_set_latency_allowance(id, 200);
374                         if (err)
375                                 la_debug("\n***tegra_set_latency_allowance,"
376                                         " err=%d", err);
377                 }
378
379                 for (id = 0; id < TEGRA_LA_MAX_ID; id++) {
380                         if (id >= ID(DISPLAY_0AB) && id <= ID(DISPLAY_HCB))
381                                 continue;
382                         if (id >= ID(VI_WSB) && id <= ID(VI_WY))
383                                 continue;
384                         err = tegra_enable_latency_scaling(id, 20, 50, 80);
385                         if (err)
386                                 la_debug("\n***tegra_enable_latency_scaling,"
387                                         " err=%d", err);
388                 }
389
390                 la_debug("la_scaling_enable_count =%d",
391                         la_scaling_enable_count);
392                 for (id = 0; id < TEGRA_LA_MAX_ID; id++) {
393                         if (id >= ID(DISPLAY_0AB) && id <= ID(DISPLAY_HCB))
394                                 continue;
395                         if (id >= ID(VI_WSB) && id <= ID(VI_WY))
396                                 continue;
397                         tegra_disable_latency_scaling(id);
398                 }
399                 la_debug("la_scaling_enable_count=%d",
400                         la_scaling_enable_count);
401         } while (--repeat_count);
402         return 0;
403 }
404
405 late_initcall(test_la);
406 #endif
407 #endif