ARM: tegra12: set CPU rate to 2.2GHz for sku 0x87
[linux-3.10.git] / arch / arm / mach-tegra / tegra_ptm.c
1 /*
2  * arch/arm/mach-tegra/tegra_ptm.c
3  *
4  * Copyright (c) 2012-2013, NVIDIA CORPORATION.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 #include <linux/kernel.h>
19 #include <linux/io.h>
20 #include <linux/sysrq.h>
21 #include <linux/fs.h>
22 #include <linux/uaccess.h>
23 #include <linux/miscdevice.h>
24 #include <linux/vmalloc.h>
25 #include <linux/mutex.h>
26 #include <linux/module.h>
27 #include <linux/platform_device.h>
28 #include <linux/syscore_ops.h>
29 #include <linux/kallsyms.h>
30 #include <linux/clk.h>
31 #include <asm/sections.h>
32 #include <linux/cpu.h>
33 #include "pm.h"
34 #include "tegra_ptm.h"
35
36 /*
37  * inside ETB trace buffer, each instruction can be identified by the CPU. For
38  * the LP cluster, we assign it to a different ID in order to differentiate it
39  * from CPU core 0.
40  */
41 #define LP_CLUSTER_CPU_ID 0x9
42
43 enum range_type {
44         NOT_USED = 0,
45         RANGE_EXCLUDE,
46         RANGE_INCLUDE,
47 };
48
49 /* PTM tracer state */
50 struct tracectx {
51         void __iomem    *tpiu_regs;
52         void __iomem    *funnel_regs;
53         void __iomem    *etb_regs;
54         int             *last_etb;
55         unsigned int    etb_depth;
56         void __iomem    *ptm_regs[8];
57         int             ptm_regs_count;
58         unsigned long   flags;
59         int             ncmppairs;
60         int             ptm_contextid_size;
61         u32             etb_fc;
62         unsigned long   range_start[8];
63         unsigned long   range_end[8];
64         enum range_type range_type[8];
65         bool            dump_initial_etb;
66         struct device   *dev;
67         struct mutex    mutex;
68         unsigned int    timestamp_interval;
69         struct clk      *coresight_clk;
70         struct clk      *orig_parent_clk;
71         int             orig_clk_rate;
72         struct clk      *pll_p;
73 };
74
75 static struct tracectx tracer = {
76         .range_start[0] = (unsigned long)_stext,
77         .range_end[0]   = (unsigned long)_etext,
78         .range_type[0]  = RANGE_INCLUDE,
79         .flags          = TRACER_BRANCHOUTPUT,
80         .etb_fc         = ETB_FF_CTRL_ENFTC,
81         .ptm_contextid_size     = 2,
82         .timestamp_interval = 0x8000,
83 };
84
85 static inline bool trace_isrunning(struct tracectx *t)
86 {
87         return !!(t->flags & TRACER_RUNNING);
88 }
89
90 static int ptm_set_power(struct tracectx *t, int id, bool on)
91 {
92         u32 v;
93
94         v = ptm_readl(t, id, PTM_CTRL);
95         if (on)
96                 v &= ~PTM_CTRL_POWERDOWN;
97         else
98                 v |= PTM_CTRL_POWERDOWN;
99         ptm_writel(t, id, v, PTM_CTRL);
100
101         return 0;
102 }
103
104 static int ptm_set_programming_bit(struct tracectx *t, int id, bool on)
105 {
106         u32 v;
107         unsigned long timeout = TRACER_TIMEOUT;
108
109         v = ptm_readl(t, id, PTM_CTRL);
110         if (on)
111                 v |= PTM_CTRL_PROGRAM;
112         else
113                 v &= ~PTM_CTRL_PROGRAM;
114         ptm_writel(t, id, v, PTM_CTRL);
115
116         while (--timeout) {
117                 if (on && ptm_progbit(t, id))
118                         break;
119                 if (!on && !ptm_progbit(t, id))
120                         break;
121         }
122         if (0 == timeout) {
123                 dev_err(t->dev, "PTM%d: %s progbit failed\n",
124                                 id, on ? "set" : "clear");
125                 return -EFAULT;
126         }
127         return 0;
128 }
129
130 static void trace_set_cpu_funnel_port(struct tracectx *t, int id, bool on)
131 {
132         int cpu_funnel_mask[] = { FUNNEL_CTRL_CPU0, FUNNEL_CTRL_CPU1,
133                                   FUNNEL_CTRL_CPU2, FUNNEL_CTRL_CPU3 };
134         int funnel_ports;
135
136         etb_regs_unlock(t);
137
138         funnel_ports = funnel_readl(t, FUNNEL_CTRL);
139         if (on)
140                 funnel_ports |= cpu_funnel_mask[id];
141         else
142                 funnel_ports &= ~cpu_funnel_mask[id];
143
144         funnel_writel(t, funnel_ports, FUNNEL_CTRL);
145
146         etb_regs_lock(t);
147 }
148
149
150 static int ptm_setup_address_range(struct tracectx *t, int ptm_id, int cmp_id,
151                         unsigned long start, unsigned long end)
152 {
153         u32 flags;
154
155         /*
156          * We need know:
157          * 1. PFT 1.0 or PFT 1.1, and
158          * 2. Security Extension is implemented or not, and
159          * 3. privilege mode or user mode tracing required, and
160          * 4. security or non-security state tracing
161          * in order to set correct matching mode and state for this register.
162          *
163          * However using PTM_ACC_TYPE_PFT10_IGN_SECURITY will enable matching
164          * all modes and states under PFT 1.0 and 1.1
165          */
166         flags = PTM_ACC_TYPE_IGN_CONTEXTID | PTM_ACC_TYPE_PFT10_IGN_SECURITY;
167
168         /* PTM only supports instruction execute */
169         flags |= PTM_ACC_TYPE_INSTR_ONLY;
170
171         if (cmp_id < 0 || cmp_id >= t->ncmppairs)
172                 return -EINVAL;
173
174         /* first comparator for the range */
175         ptm_writel(t, ptm_id, flags, PTM_COMP_ACC_TYPE(cmp_id * 2));
176         ptm_writel(t, ptm_id, start, PTM_COMP_VAL(cmp_id * 2));
177
178         /* second comparator is right next to it */
179         ptm_writel(t, ptm_id, flags, PTM_COMP_ACC_TYPE(cmp_id * 2 + 1));
180         ptm_writel(t, ptm_id, end, PTM_COMP_VAL(cmp_id * 2 + 1));
181
182         return 0;
183 }
184
185 static int trace_config_periodic_timestamp(struct tracectx *t, int id)
186 {
187         if (0 == (t->flags & TRACER_TIMESTAMP))
188                 return 0;
189
190         /* if the counter down value is 0, we disable periodic timestamp */
191         if (0 == t->timestamp_interval)
192                 return 0;
193
194         /* config counter0 to counter down: */
195
196         /* set all the load value and reload vlaue */
197         ptm_writel(t, id, t->timestamp_interval, PTMCNTVR(0));
198         ptm_writel(t, id, t->timestamp_interval, PTMCNTRLDVR(0));
199         /* reload the counter0 value if counter0 reach 0 */
200         ptm_writel(t, id, DEF_PTM_EVENT(LOGIC_A, 0, COUNTER0),
201                         PTMCNTRLDEVR(0));
202         /* config the timestamp trigger event if counter0 is 0 */
203         ptm_writel(t, id, DEF_PTM_EVENT(LOGIC_A, 0, COUNTER0),
204                         PTMTSEVR);
205         /* start the counter0 now */
206         ptm_writel(t, id, DEF_PTM_EVENT(LOGIC_A, 0, ALWAYS_TRUE),
207                         PTMCNTENR(0));
208         return 0;
209 }
210
211 static int trace_program_ptm(struct tracectx *t, int id)
212 {
213         u32 v;
214         int i;
215         int excl_flags = PTM_TRACE_ENABLE_EXCL_CTRL;
216         int incl_flags = PTM_TRACE_ENABLE_INCL_CTRL;
217
218         /* we must maintain programming bit here */
219         v = PTM_CTRL_PROGRAM;
220         v |= PTM_CTRL_CONTEXTIDSIZE(t->ptm_contextid_size);
221         if (t->flags & TRACER_CYCLE_ACC)
222                 v |= PTM_CTRL_CYCLEACCURATE;
223         if (t->flags & TRACER_BRANCHOUTPUT)
224                 v |= PTM_CTRL_BRANCH_OUTPUT;
225         if (t->flags & TRACER_TIMESTAMP)
226                 v |= PTM_CTRL_TIMESTAMP_EN;
227         if (t->flags & TRACER_RETURN_STACK)
228                 v |= PTM_CTRL_RETURN_STACK_EN;
229         ptm_writel(t, id, v, PTM_CTRL);
230
231         for (i = 0; i < ARRAY_SIZE(t->range_start); i++)
232                 if (t->range_type[i] != NOT_USED)
233                         ptm_setup_address_range(t, id, i,
234                                         t->range_start[i],
235                                         t->range_end[i]);
236
237         /*
238          * after the range is set up, we enable the comparators based on
239          * inc/exc flags
240          */
241         for (i = 0; i < ARRAY_SIZE(t->range_type); i++) {
242                 if (t->range_type[i] == RANGE_EXCLUDE) {
243                         excl_flags |= 1 << i;
244                         ptm_writel(t, id, excl_flags, PTM_TRACE_ENABLE_CTRL1);
245                 }
246                 if (t->range_type[i] == RANGE_INCLUDE) {
247                         incl_flags |= 1 << i;
248                         ptm_writel(t, id, incl_flags, PTM_TRACE_ENABLE_CTRL1);
249                 }
250         }
251
252         /* trace all permitted processor execution... */
253         ptm_writel(t, id, DEF_PTM_EVENT(LOGIC_A, 0, ALWAYS_TRUE),
254                         PTM_TRACE_ENABLE_EVENT);
255
256         /* assigning ATID for low power CPU */
257         if (is_lp_cluster())
258                 ptm_writel(t, 0, LP_CLUSTER_CPU_ID, PTM_TRACEIDR);
259         else
260                 ptm_writel(t, id, id, PTM_TRACEIDR);
261
262         /* programming the Isync packet frequency */
263         ptm_writel(t, id, 100, PTM_SYNC_FREQ);
264
265         trace_config_periodic_timestamp(t, id);
266
267         return 0;
268 }
269
270 static void trace_start_ptm(struct tracectx *t, int id)
271 {
272         int ret;
273
274         trace_set_cpu_funnel_port(t, id, true);
275
276         ptm_regs_unlock(t, id);
277
278         ptm_os_unlock(t, id);
279
280         ptm_set_power(t, id, true);
281
282         ptm_set_programming_bit(t, id, true);
283         ret = trace_program_ptm(t, id);
284         if (ret)
285                 dev_err(t->dev, "enable PTM%d failed\n", id);
286         ptm_set_programming_bit(t, id, false);
287
288         ptm_regs_lock(t, id);
289 }
290
291 static int trace_start(struct tracectx *t)
292 {
293         int id;
294         u32 etb_fc = t->etb_fc;
295         int ret;
296
297         t->orig_clk_rate = clk_get_rate(t->coresight_clk);
298         t->orig_parent_clk = clk_get_parent(t->coresight_clk);
299
300         ret = clk_set_parent(t->coresight_clk, t->pll_p);
301         if (ret < 0)
302                 return ret;
303         ret = clk_set_rate(t->coresight_clk, 144000000);
304         if (ret < 0)
305                 return ret;
306
307         etb_regs_unlock(t);
308
309         etb_writel(t, 0, ETB_WRITEADDR);
310         etb_writel(t, etb_fc, ETB_FF_CTRL);
311         etb_writel(t, TRACE_CAPATURE_ENABLE, ETB_CTRL);
312
313         t->dump_initial_etb = false;
314
315         etb_regs_lock(t);
316
317         /* configure ptm(s) */
318         for_each_online_cpu(id)
319                 trace_start_ptm(t, id);
320
321         t->flags |= TRACER_RUNNING;
322
323         return 0;
324 }
325
326 static int trace_stop_ptm(struct tracectx *t, int id)
327 {
328         ptm_regs_unlock(t, id);
329
330         /*
331          * when the programming bit is 1:
332          *  1. trace generation is stopped.
333          *  2. PTM FIFO is emptied
334          *  3. counter, sequencer and start/stop are held in current state.
335          *  4. external outputs are forced low.
336          */
337         ptm_set_programming_bit(t, id, true);
338
339         ptm_set_power(t, id, false);
340
341         ptm_os_lock(t, id);
342
343         ptm_regs_lock(t, id);
344
345         /*
346          * Per ARM errata 780121 requires to disable the funnel port after PTM
347          * is disabled
348          */
349         trace_set_cpu_funnel_port(t, id, false);
350
351         return 0;
352 }
353
354 static int trace_stop(struct tracectx *t)
355 {
356         int id;
357
358         if (!trace_isrunning(t))
359                 return 0;
360
361         for_each_online_cpu(id)
362                 trace_stop_ptm(t, id);
363
364         etb_regs_unlock(t);
365
366         etb_writel(t, TRACE_CAPATURE_DISABLE, ETB_CTRL);
367
368         etb_regs_lock(t);
369
370         clk_set_parent(t->coresight_clk, t->orig_parent_clk);
371
372         clk_set_rate(t->coresight_clk, t->orig_clk_rate);
373
374         t->flags &= ~TRACER_RUNNING;
375
376         return 0;
377 }
378
379 static int etb_getdatalen(struct tracectx *t)
380 {
381         u32 v;
382         int wp;
383
384         v = etb_readl(t, ETB_STATUS);
385
386         if (v & ETB_STATUS_FULL)
387                 return t->etb_depth;
388
389         wp = etb_readl(t, ETB_WRITEADDR);
390         return wp;
391 }
392
393 /* sysrq+v will always stop the running trace and leave it at that */
394 static void ptm_dump(void)
395 {
396         struct tracectx *t = &tracer;
397         u32 first = 0;
398         int length;
399
400         if (!t->etb_regs) {
401                 pr_info("No tracing hardware found\n");
402                 return;
403         }
404
405         if (trace_isrunning(t))
406                 trace_stop(t);
407
408         etb_regs_unlock(t);
409
410         length = etb_getdatalen(t);
411
412         if (length == t->etb_depth)
413                 first = etb_readl(t, ETB_WRITEADDR);
414
415         etb_writel(t, first, ETB_READADDR);
416
417         pr_info("Trace buffer contents length: %d\n", length);
418         pr_info("--- ETB buffer begin ---\n");
419         for (; length; length--)
420                 pr_cont("%08x", cpu_to_be32(etb_readl(t, ETB_READMEM)));
421         pr_info("\n--- ETB buffer end ---\n");
422
423         etb_regs_lock(t);
424 }
425
426 static int etb_open(struct inode *inode, struct file *file)
427 {
428         struct miscdevice *miscdev = file->private_data;
429         struct tracectx *t = dev_get_drvdata(miscdev->parent);
430
431         if (NULL == t->etb_regs)
432                 return -ENODEV;
433
434         file->private_data = t;
435
436         return nonseekable_open(inode, file);
437 }
438
439 static ssize_t etb_read(struct file *file, char __user *data,
440                 size_t len, loff_t *ppos)
441 {
442         int total, i;
443         long length;
444         struct tracectx *t = file->private_data;
445         u32 first = 0;
446         u32 *buf;
447         int wpos;
448         int skip;
449         long wlength;
450         loff_t pos = *ppos;
451
452         mutex_lock(&t->mutex);
453
454         if (trace_isrunning(t)) {
455                 length = 0;
456                 goto out;
457         }
458
459         etb_regs_unlock(t);
460
461         total = etb_getdatalen(t);
462         if (total == 0 && t->dump_initial_etb)
463                 total = t->etb_depth;
464         if (total == t->etb_depth)
465                 first = etb_readl(t, ETB_WRITEADDR);
466
467         if (pos > total * 4) {
468                 skip = 0;
469                 wpos = total;
470         } else {
471                 skip = (int)pos % 4;
472                 wpos = (int)pos / 4;
473         }
474         total -= wpos;
475         first = (first + wpos) % t->etb_depth;
476
477         etb_writel(t, first, ETB_READADDR);
478
479         wlength = min(total, DIV_ROUND_UP(skip + (int)len, 4));
480         length = min(total * 4 - skip, (int)len);
481         if (wlength == 0) {
482                 etb_regs_lock(t);
483                 mutex_unlock(&t->mutex);
484                 return 0;
485         }
486
487         buf = vmalloc(wlength * 4);
488
489         dev_dbg(t->dev, "ETB read %ld bytes to %lld from %ld words at %d\n",
490                         length, pos, wlength, first);
491         dev_dbg(t->dev, "ETB buffer length: %d\n", total + wpos);
492         dev_dbg(t->dev, "ETB status reg: %x\n", etb_readl(t, ETB_STATUS));
493         for (i = 0; i < wlength; i++)
494                 buf[i] = etb_readl(t, ETB_READMEM);
495
496         etb_regs_lock(t);
497
498         length -= copy_to_user(data, (u8 *)buf + skip, length);
499         vfree(buf);
500         *ppos = pos + length;
501 out:
502         mutex_unlock(&t->mutex);
503         return length;
504 }
505
506 /*
507  * this function will be called right after the PTM driver is initialized, it
508  * will save the ETB contents from up to last reset.
509  */
510 static ssize_t etb_save_last(struct tracectx *t)
511 {
512         u32 first = 0;
513         int i;
514         int total;
515
516         BUG_ON(!t->dump_initial_etb);
517
518
519         etb_regs_unlock(t);
520         /*
521          * not all ETB can maintain the ETB buffer write pointer after WDT
522          * reset, and we just read 0 for the write pointer; but we can still
523          * read/parse the ETB partially in this case.
524          */
525         total = etb_getdatalen(t);
526         if (total == 0 && t->dump_initial_etb)
527                 total = t->etb_depth;
528         first = 0;
529         if (total == t->etb_depth)
530                 first = etb_readl(t, ETB_WRITEADDR);
531
532         etb_writel(t, first, ETB_READADDR);
533         for (i = 0; i < t->etb_depth; i++)
534                 t->last_etb[i] = etb_readl(t, ETB_READMEM);
535
536         etb_regs_lock(t);
537
538         return 0;
539 }
540
541 static ssize_t last_etb_read(struct file *file, char __user *data,
542                 size_t len, loff_t *ppos)
543 {
544         struct tracectx *t = file->private_data;
545         size_t last_etb_size;
546         size_t ret;
547
548         mutex_lock(&t->mutex);
549
550         ret = 0;
551         last_etb_size = t->etb_depth * sizeof(*t->last_etb);
552         if (*ppos >= last_etb_size)
553                 goto out;
554         if (*ppos + len > last_etb_size)
555                 len = last_etb_size - *ppos;
556         if (copy_to_user(data, (char *) t->last_etb + *ppos, len)) {
557                 ret = -EFAULT;
558                 goto out;
559         }
560         *ppos += len;
561         ret = len;
562 out:
563         mutex_unlock(&t->mutex);
564         return ret;
565 }
566
567 static int etb_release(struct inode *inode, struct file *file)
568 {
569         /* there's nothing to do here, actually */
570         return 0;
571 }
572
573 /* use a sysfs file "trace_running" to start/stop tracing */
574 static ssize_t trace_running_show(struct kobject *kobj,
575                 struct kobj_attribute *attr,
576                 char *buf)
577 {
578         return sprintf(buf, "%x\n", trace_isrunning(&tracer));
579 }
580
581 static ssize_t trace_running_store(struct kobject *kobj,
582                 struct kobj_attribute *attr,
583                 const char *buf, size_t n)
584 {
585         unsigned int value;
586         int ret;
587
588         if (sscanf(buf, "%u", &value) != 1)
589                 return -EINVAL;
590
591         if (!tracer.etb_regs)
592                 return -ENODEV;
593
594         mutex_lock(&tracer.mutex);
595         if (value != 0)
596                 ret = trace_start(&tracer);
597         else
598                 ret = trace_stop(&tracer);
599         mutex_unlock(&tracer.mutex);
600
601         return ret ? : n;
602 }
603
604 static ssize_t trace_info_show(struct kobject *kobj,
605                 struct kobj_attribute *attr,
606                 char *buf)
607 {
608         u32 etb_wa, etb_ra, etb_st, etb_fc, ptm_ctrl, ptm_st;
609         int datalen;
610         int id;
611         int ret;
612
613         mutex_lock(&tracer.mutex);
614         if (tracer.etb_regs) {
615                 etb_regs_unlock(&tracer);
616                 datalen = etb_getdatalen(&tracer);
617                 etb_wa = etb_readl(&tracer, ETB_WRITEADDR);
618                 etb_ra = etb_readl(&tracer, ETB_READADDR);
619                 etb_st = etb_readl(&tracer, ETB_STATUS);
620                 etb_fc = etb_readl(&tracer, ETB_FF_CTRL);
621                 etb_regs_lock(&tracer);
622         } else {
623                 etb_wa = etb_ra = etb_st = etb_fc = ~0;
624                 datalen = -1;
625         }
626
627         ret = sprintf(buf, "Trace buffer len: %d\nComparator pairs: %d\n"
628                            "ETB_WRITEADDR:\t%08x\nETB_READADDR:\t%08x\n"
629                            "ETB_STATUS:\t%08x\nETB_FF_CTRL:\t%08x\n",
630                            datalen, tracer.ncmppairs,
631                            etb_wa, etb_ra,
632                            etb_st, etb_fc);
633
634         for (id = 0; id < tracer.ptm_regs_count; id++) {
635                 if (!cpu_online(id)) {
636                         ret += sprintf(buf + ret, "PTM_CTRL%d:\tOFFLINE\n"
637                                 "PTM_STATUS%d:\tOFFLINE\n", id, id);
638                         continue;
639                 }
640                 ptm_regs_unlock(&tracer, id);
641                 ptm_ctrl = ptm_readl(&tracer, id, PTM_CTRL);
642                 ptm_st = ptm_readl(&tracer, id, PTM_STATUS);
643                 ptm_regs_lock(&tracer, id);
644                 ret += sprintf(buf + ret, "PTM_CTRL%d:\t%08x\n"
645                         "PTM_STATUS%d:\t%08x\n", id, ptm_ctrl, id, ptm_st);
646         }
647         mutex_unlock(&tracer.mutex);
648
649         return ret;
650 }
651
652 static ssize_t trace_cycle_accurate_show(struct kobject *kobj,
653                 struct kobj_attribute *attr, char *buf)
654 {
655         return sprintf(buf, "%d\n", !!(tracer.flags & TRACER_CYCLE_ACC));
656 }
657
658 static ssize_t trace_cycle_accurate_store(struct kobject *kobj,
659                 struct kobj_attribute *attr, const char *buf, size_t n)
660 {
661         unsigned int cycacc;
662
663         if (sscanf(buf, "%u", &cycacc) != 1)
664                 return -EINVAL;
665
666         mutex_lock(&tracer.mutex);
667
668         if (cycacc)
669                 tracer.flags |= TRACER_CYCLE_ACC;
670         else
671                 tracer.flags &= ~TRACER_CYCLE_ACC;
672
673         mutex_unlock(&tracer.mutex);
674
675         return n;
676 }
677
678 static ssize_t trace_contextid_size_show(struct kobject *kobj,
679                 struct kobj_attribute *attr,
680                 char *buf)
681 {
682         /* 0: No context id tracing, 1: One byte, 2: Two bytes, 3: Four bytes */
683         return sprintf(buf, "%d\n", (1 << tracer.ptm_contextid_size) >> 1);
684 }
685
686 static ssize_t trace_contextid_size_store(struct kobject *kobj,
687                 struct kobj_attribute *attr,
688                 const char *buf, size_t n)
689 {
690         unsigned int contextid_size;
691
692
693         if (sscanf(buf, "%u", &contextid_size) != 1)
694                 return -EINVAL;
695
696         if (contextid_size == 3 || contextid_size > 4)
697                 return -EINVAL;
698
699         mutex_lock(&tracer.mutex);
700         tracer.ptm_contextid_size = fls(contextid_size);
701         mutex_unlock(&tracer.mutex);
702
703         return n;
704 }
705
706 static ssize_t trace_branch_output_show(struct kobject *kobj,
707                 struct kobj_attribute *attr,
708                 char *buf)
709 {
710         return sprintf(buf, "%d\n", !!(tracer.flags & TRACER_BRANCHOUTPUT));
711 }
712
713 static ssize_t trace_branch_output_store(struct kobject *kobj,
714                 struct kobj_attribute *attr,
715                 const char *buf, size_t n)
716 {
717         unsigned int branch_output;
718
719         if (sscanf(buf, "%u", &branch_output) != 1)
720                 return -EINVAL;
721
722         mutex_lock(&tracer.mutex);
723         if (branch_output) {
724                 tracer.flags |= TRACER_BRANCHOUTPUT;
725                 /* Branch broadcasting is incompatible with the return stack */
726                 if (tracer.flags == TRACER_RETURN_STACK)
727                         dev_err(tracer.dev, "Need turn off return stack too\n");
728                 tracer.flags &= ~TRACER_RETURN_STACK;
729         } else {
730                 tracer.flags &= ~TRACER_BRANCHOUTPUT;
731         }
732         mutex_unlock(&tracer.mutex);
733
734         return n;
735 }
736
737 static ssize_t trace_return_stack_show(struct kobject *kobj,
738                 struct kobj_attribute *attr,
739                 char *buf)
740 {
741         return sprintf(buf, "%d\n", !!(tracer.flags & TRACER_RETURN_STACK));
742 }
743
744 static ssize_t trace_return_stack_store(struct kobject *kobj,
745                 struct kobj_attribute *attr,
746                 const char *buf, size_t n)
747 {
748         unsigned int return_stack;
749
750         if (sscanf(buf, "%u", &return_stack) != 1)
751                 return -EINVAL;
752
753         mutex_lock(&tracer.mutex);
754         if (return_stack) {
755                 tracer.flags |= TRACER_RETURN_STACK;
756                 /* Return stack is incompatible with branch broadcasting */
757                 tracer.flags &= ~TRACER_BRANCHOUTPUT;
758         } else {
759                 tracer.flags &= ~TRACER_RETURN_STACK;
760         }
761         mutex_unlock(&tracer.mutex);
762
763         return n;
764 }
765
766 static ssize_t trace_timestamp_show(struct kobject *kobj,
767                 struct kobj_attribute *attr,
768                 char *buf)
769 {
770         return sprintf(buf, "%d\n", !!(tracer.flags & TRACER_TIMESTAMP));
771 }
772
773 static ssize_t trace_timestamp_store(struct kobject *kobj,
774                 struct kobj_attribute *attr,
775                 const char *buf, size_t n)
776 {
777         unsigned int timestamp;
778
779         if (sscanf(buf, "%d", &timestamp) < 1)
780                 return -EINVAL;
781
782         mutex_lock(&tracer.mutex);
783         if (timestamp)
784                 tracer.flags |= TRACER_TIMESTAMP;
785         else
786                 tracer.flags &= ~TRACER_TIMESTAMP;
787         mutex_unlock(&tracer.mutex);
788
789         return n;
790 }
791
792 static ssize_t trace_timestamp_interval_show(struct kobject *kobj,
793                 struct kobj_attribute *attr,
794                 char *buf)
795 {
796         return sprintf(buf, "%d\n", tracer.timestamp_interval);
797 }
798
799 static ssize_t trace_timestamp_interval_store(struct kobject *kobj,
800                 struct kobj_attribute *attr,
801                 const char *buf, size_t n)
802 {
803         if (sscanf(buf, "%d", &tracer.timestamp_interval) != 1)
804                 return -EINVAL;
805
806         return n;
807 }
808
809 static ssize_t trace_range_addr_show(struct kobject *kobj,
810                 struct kobj_attribute *attr, char *buf)
811 {
812         int id;
813
814         if (sscanf(attr->attr.name, "trace_range%d", &id) != 1)
815                 return -EINVAL;
816         if (id >= tracer.ncmppairs)
817                 return sprintf(buf, "invalid trace range comparator\n");
818
819         return sprintf(buf, "%08lx %08lx\n",
820                         tracer.range_start[id], tracer.range_end[id]);
821 }
822
823 static ssize_t trace_range_addr_store(struct kobject *kobj,
824                 struct kobj_attribute *attr, const char *buf, size_t n)
825 {
826         unsigned long range_start, range_end;
827         int id;
828
829         /* get the trace range ID */
830         if (sscanf(attr->attr.name, "trace_range%d", &id) != 1)
831                 return -EINVAL;
832
833         if (sscanf(buf, "%lx %lx", &range_start, &range_end) != 2)
834                 return -EINVAL;
835
836         if (id >= tracer.ncmppairs)
837                 return -EINVAL;
838
839         mutex_lock(&tracer.mutex);
840         tracer.range_start[id] = range_start;
841         tracer.range_end[id] = range_end;
842         mutex_unlock(&tracer.mutex);
843
844         return n;
845 }
846
847 static ssize_t trace_range_type_show(struct kobject *kobj,
848                 struct kobj_attribute *attr, char *buf)
849 {
850         int id;
851         char *str;
852
853         if (sscanf(attr->attr.name, "trace_range_type%d", &id) != 1)
854                 return -EINVAL;
855
856         if (id >= tracer.ncmppairs)
857                 return sprintf(buf, "invalid trace range comparator\n");
858
859         if (tracer.range_type[id] == NOT_USED)
860                 str = "disable";
861         if (tracer.range_type[id] == RANGE_EXCLUDE)
862                 str = "exclude";
863         if (tracer.range_type[id] == RANGE_INCLUDE)
864                 str = "include";
865         return sprintf(buf, "%s\n", str);
866 }
867
868 static ssize_t trace_range_type_store(struct kobject *kobj,
869                 struct kobj_attribute *attr, const char *buf, size_t n)
870 {
871         int id;
872         size_t size;
873
874         if (sscanf(attr->attr.name, "trace_range_type%d", &id) != 1)
875                 return -EINVAL;
876
877         if (id >= tracer.ncmppairs)
878                 return -EINVAL;
879
880         mutex_lock(&tracer.mutex);
881         size = n - 1;
882         if (0 == strncmp("disable", buf, size) || 0 == strncmp("0", buf, size))
883                 tracer.range_type[id] = NOT_USED;
884         else if (0 == strncmp("include", buf, size))
885                 tracer.range_type[id] = RANGE_INCLUDE;
886         else if (0 == strncmp("exclude", buf, size))
887                 tracer.range_type[id] = RANGE_EXCLUDE;
888         else
889                 n = -EINVAL;
890         mutex_unlock(&tracer.mutex);
891
892         return n;
893 }
894
895 static ssize_t trace_function_store(struct kobject *kobj,
896                 struct kobj_attribute *attr, const char *buf, size_t n)
897 {
898         unsigned long range_start, size, offset;
899         char name[KSYM_NAME_LEN];
900         char mod_name[KSYM_NAME_LEN];
901         int id;
902
903         if (sscanf(attr->attr.name, "trace_range_function%d", &id) != 1)
904                 return -EINVAL;
905         if (id >= tracer.ncmppairs)
906                 return -EINVAL;
907
908         n = min(n, sizeof(name));
909         strncpy(name, buf, n);
910         name[n - 1] = '\0';
911
912         if (strncmp("all", name, 3) == 0) {
913                 /* magic "all" for tracking the kernel */
914                 range_start = (unsigned long) _stext;
915                 size = (unsigned long) _etext - (unsigned long) _stext + 4;
916         } else {
917
918                 range_start = kallsyms_lookup_name(name);
919                 if (range_start == 0)
920                         return -EINVAL;
921
922                 if (0 > lookup_symbol_attrs(range_start, &size, &offset,
923                                                 mod_name, name))
924                         return -EINVAL;
925         }
926
927         mutex_lock(&tracer.mutex);
928         tracer.range_start[id] = range_start;
929         tracer.range_end[id] = range_start + size - 4;
930         mutex_unlock(&tracer.mutex);
931
932         return n;
933 }
934
935 static ssize_t trace_function_show(struct kobject *kobj,
936                 struct kobj_attribute *attr, char *buf)
937 {
938         unsigned long range_start, size, offset;
939         char name[KSYM_NAME_LEN];
940         char mod_name[KSYM_NAME_LEN];
941         int id;
942         int ret;
943
944         if (sscanf(attr->attr.name, "trace_range_function%d", &id) != 1)
945                 return -EINVAL;
946         if (id >= tracer.ncmppairs)
947                 return -EINVAL;
948
949         range_start = tracer.range_start[id];
950         ret = 0;
951         while (range_start <= tracer.range_end[id]) {
952                 if (0 > lookup_symbol_attrs(range_start, &size,
953                                         &offset, mod_name, name))
954                         return -EINVAL;
955                 range_start += size;
956                 ret += sprintf(buf + ret, "%s\n", name);
957                 if (ret > (PAGE_SIZE - KSYM_NAME_LEN)) {
958                         ret += sprintf(buf + ret, "...\nGood news, everyone!");
959                         ret += sprintf(buf + ret, " Too many to list\n");
960                         break;
961                 }
962         }
963         return ret;
964 }
965
966 #define A(a, b, c, d)   __ATTR(trace_##a, b, \
967                 trace_##c##_show, trace_##d##_store)
968 static const struct kobj_attribute trace_attr[] = {
969         __ATTR(trace_info,      0444, trace_info_show,  NULL),
970         A(running,              0644, running,          running),
971         A(range0,               0644, range_addr,       range_addr),
972         A(range1,               0644, range_addr,       range_addr),
973         A(range2,               0644, range_addr,       range_addr),
974         A(range3,               0644, range_addr,       range_addr),
975         A(range_function0,      0644, function,         function),
976         A(range_function1,      0644, function,         function),
977         A(range_function2,      0644, function,         function),
978         A(range_function3,      0644, function,         function),
979         A(range_type0,          0644, range_type,       range_type),
980         A(range_type1,          0644, range_type,       range_type),
981         A(range_type2,          0644, range_type,       range_type),
982         A(range_type3,          0644, range_type,       range_type),
983         A(cycle_accurate,       0644, cycle_accurate,   cycle_accurate),
984         A(contextid_size,       0644, contextid_size,   contextid_size),
985         A(branch_output,        0644, branch_output,    branch_output),
986         A(return_stack,         0644, return_stack,     return_stack),
987         A(timestamp,            0644, timestamp,        timestamp),
988         A(timestamp_interval,   0644, timestamp_interval, timestamp_interval)
989 };
990
991 #define clk_readl(reg)  __raw_readl(reg_clk_base + (reg))
992 #define clk_writel(value, reg) __raw_writel(value, reg_clk_base + (reg))
993 static void __iomem *reg_clk_base = IO_ADDRESS(TEGRA_CLK_RESET_BASE);
994
995 static int funnel_and_tpiu_init(struct tracectx *t)
996 {
997         u32 tmp;
998
999         /* Enable the trace clk for the TPIU */
1000         tmp = clk_readl(CLK_TPIU_OUT_ENB_U);
1001         tmp |= CLK_TPIU_OUT_ENB_U_TRACKCLK_IN;
1002         clk_writel(tmp, CLK_TPIU_OUT_ENB_U);
1003
1004         /* set trace clk of TPIU using the pll_p */
1005         tmp = clk_readl(CLK_TPIU_SRC_TRACECLKIN);
1006         tmp &= ~CLK_TPIU_SRC_TRACECLKIN_SRC_MASK;
1007         tmp |= CLK_TPIU_SRC_TRACECLKIN_PLLP;
1008         clk_writel(tmp, CLK_TPIU_SRC_TRACECLKIN);
1009
1010         /* disable TPIU */
1011         tpiu_regs_unlock(t);
1012         tpiu_writel(t, TPIU_FF_CTRL_STOPFL, TPIU_FF_CTRL);
1013         tpiu_writel(t, TPIU_FF_CTRL_STOPFL | TPIU_FF_CTRL_MANUAL_FLUSH,
1014                         TPIU_FF_CTRL);
1015         tpiu_regs_lock(t);
1016
1017         /* Disable TPIU clk */
1018         tmp = clk_readl(CLK_TPIU_OUT_ENB_U);
1019         tmp &= ~CLK_TPIU_OUT_ENB_U_TRACKCLK_IN;
1020         clk_writel(tmp, CLK_TPIU_OUT_ENB_U);
1021
1022         funnel_regs_unlock(t);
1023         /* enable CPU0 - 3 for funnel ports, also assign funnel hold time */
1024         funnel_writel(t, FUNNEL_MINIMUM_HOLD_TIME(0) |
1025                         FUNNEL_CTRL_CPU0 | FUNNEL_CTRL_CPU1 |
1026                         FUNNEL_CTRL_CPU2 | FUNNEL_CTRL_CPU3,
1027                         FUNNEL_CTRL);
1028
1029         /* Assign CPU0-3 funnel priority */
1030         funnel_writel(t, 0xFFFFFFFF, FUNNEL_PRIORITY);
1031         funnel_writel(t, FUNNEL_PRIORITY_CPU0(0) | FUNNEL_PRIORITY_CPU1(0) |
1032                          FUNNEL_PRIORITY_CPU2(0) | FUNNEL_PRIORITY_CPU3(0),
1033                          FUNNEL_PRIORITY);
1034         funnel_regs_lock(t);
1035
1036         return 0;
1037 }
1038
1039 static const struct file_operations etb_fops = {
1040         .owner = THIS_MODULE,
1041         .read = etb_read,
1042         .open = etb_open,
1043         .release = etb_release,
1044         .llseek = no_llseek,
1045 };
1046
1047 static const struct file_operations last_etb_fops = {
1048         .owner = THIS_MODULE,
1049         .read = last_etb_read,
1050         .open = etb_open,
1051         .release = etb_release,
1052         .llseek = no_llseek,
1053 };
1054
1055 static struct miscdevice etb_miscdev = {
1056         .name = "etb",
1057         .minor = MISC_DYNAMIC_MINOR,
1058         .fops = &etb_fops,
1059 };
1060
1061 static struct miscdevice last_etb_miscdev = {
1062         .name = "last_etb",
1063         .minor = MISC_DYNAMIC_MINOR,
1064         .fops = &last_etb_fops,
1065 };
1066
1067 void ptm_power_idle_resume(int cpu)
1068 {
1069         struct tracectx *t = &tracer;
1070
1071         if (trace_isrunning(&tracer))
1072                 trace_start_ptm(t, cpu);
1073 }
1074
1075 static int etb_init(struct tracectx *t)
1076 {
1077         int ret;
1078
1079         t->dump_initial_etb = true;
1080
1081         etb_regs_unlock(t);
1082
1083         t->etb_depth = etb_readl(t, ETB_DEPTH);
1084         /* make sure trace capture is disabled */
1085         etb_writel(t, TRACE_CAPATURE_DISABLE, ETB_CTRL);
1086         etb_writel(t, ETB_FF_CTRL_STOPFL, ETB_FF_CTRL);
1087
1088         etb_regs_lock(t);
1089
1090         t->last_etb = devm_kzalloc(t->dev, t->etb_depth * sizeof(*t->last_etb),
1091                                         GFP_KERNEL);
1092         if (NULL == t->last_etb) {
1093                 dev_err(t->dev, "failes to allocate memory to hold ETB\n");
1094                 return -ENOMEM;
1095         }
1096
1097         etb_miscdev.parent = t->dev;
1098         ret = misc_register(&etb_miscdev);
1099         if (ret) {
1100                 dev_err(t->dev, "failes to register /dev/etb\n");
1101                 return ret;
1102         }
1103         last_etb_miscdev.parent = t->dev;
1104         ret = misc_register(&last_etb_miscdev);
1105         if (ret) {
1106                 dev_err(t->dev, "failes to register /dev/last_etb\n");
1107                 return ret;
1108         }
1109
1110         dev_info(t->dev, "ETB is initialized.\n");
1111         return 0;
1112 }
1113
1114 static void tegra_ptm_enter_resume(void)
1115 {
1116 #ifdef CONFIG_PM_SLEEP
1117         if (trace_isrunning(&tracer)) {
1118                 /*
1119                  * On Tegra, LP0 will cut off VDD_CORE, and in that case
1120                  * TPIU and FUNNEL need to be initialized again.
1121                  */
1122                 if (!(TRACE_CAPATURE_ENABLE & etb_readl(&tracer, ETB_CTRL))) {
1123                         funnel_and_tpiu_init(&tracer);
1124                         trace_start(&tracer);
1125                 }
1126         }
1127 #endif
1128 }
1129
1130 static struct syscore_ops tegra_ptm_enter_syscore_ops = {
1131         .resume = tegra_ptm_enter_resume,
1132 };
1133
1134 static int tegra_ptm_cpu_notify(struct notifier_block *self,
1135                                     unsigned long action, void *cpu)
1136 {
1137         struct tracectx *t = &tracer;
1138
1139         /* re-initialize the PTM if the PTM's CPU back on online */
1140         switch (action) {
1141         case CPU_STARTING:
1142                 if (trace_isrunning(t))
1143                         trace_start_ptm(t, (int)cpu);
1144                 break;
1145         default:
1146                 break;
1147         }
1148
1149         return NOTIFY_OK;
1150 }
1151
1152 static struct notifier_block tegra_ptm_cpu_nb = {
1153         .notifier_call = tegra_ptm_cpu_notify,
1154 };
1155
1156 static int ptm_probe(struct platform_device *dev)
1157 {
1158         struct tracectx *t = &tracer;
1159         int ret = 0;
1160         int i;
1161
1162         mutex_lock(&t->mutex);
1163
1164         t->dev = &dev->dev;
1165         platform_set_drvdata(dev, t);
1166
1167         /* PLL_P can be used for CoreSight parent clock at high freq */
1168         t->pll_p = clk_get_sys(NULL, "pll_p");
1169         if (IS_ERR(t->pll_p)) {
1170                 dev_err(&dev->dev, "Could not get pll_p clock\n");
1171                 goto out;
1172         }
1173
1174         /* eanble the CoreSight(csite) clock for PTM/FUNNEL/ETB/TPIU */
1175         t->coresight_clk = clk_get_sys("csite", NULL);
1176         if (IS_ERR(t->coresight_clk)) {
1177                 dev_err(&dev->dev, "Could not get csite clock\n");
1178                 goto out;
1179         }
1180         clk_enable(t->coresight_clk);
1181
1182         /* get all PTM resrouces */
1183         t->ptm_regs_count = 0;
1184         ret = -ENOMEM;
1185         for (i = 0; i < dev->num_resources; i++) {
1186                 struct resource *res;
1187                 void __iomem *addr;
1188
1189                 res = platform_get_resource(dev, IORESOURCE_MEM, i);
1190                 if (NULL == res)
1191                         goto out;
1192                 addr = devm_ioremap_nocache(&dev->dev, res->start,
1193                                 resource_size(res));
1194                 if (NULL == addr)
1195                         goto out;
1196
1197                 if (0 == strncmp("ptm", res->name, 3)) {
1198                         t->ptm_regs[t->ptm_regs_count] = addr;
1199                         t->ptm_regs_count++;
1200                 }
1201                 if (0 == strncmp("etb", res->name, 3))
1202                         t->etb_regs = addr;
1203                 if (0 == strncmp("tpiu", res->name, 4))
1204                         t->tpiu_regs = addr;
1205                 if (0 == strncmp("funnel", res->name, 6))
1206                         t->funnel_regs = addr;
1207         }
1208         /* at least one PTM is required */
1209         if (t->ptm_regs[0] == NULL || t->etb_regs == NULL ||
1210             t->tpiu_regs == NULL   || t->funnel_regs == NULL) {
1211                 dev_err(&dev->dev, "Could not get PTM resources\n");
1212                 goto out;
1213         }
1214
1215         if (0x13 != (0xff & ptm_readl(t, 0, CORESIGHT_DEVTYPE))) {
1216                 dev_err(&dev->dev, "Did not find correct PTM device type\n");
1217                 goto out;
1218         }
1219
1220         t->ncmppairs = 0xf & ptm_readl(t, 0, PTM_CONFCODE);
1221
1222         /* initialize ETB, TPIU, FUNNEL and PTM */
1223         ret = etb_init(t);
1224         if (ret)
1225                 goto out;
1226
1227         ret = funnel_and_tpiu_init(t);
1228         if (ret)
1229                 goto out;
1230
1231         for (i = 0; i < t->ptm_regs_count; i++)
1232                 trace_stop_ptm(t, i);
1233
1234         /* create sysfs */
1235         for (i = 0; i < ARRAY_SIZE(trace_attr); i++) {
1236                 ret = sysfs_create_file(&dev->dev.kobj, &trace_attr[i].attr);
1237                 if (ret)
1238                         dev_err(&dev->dev, "failed to create %s\n",
1239                                         trace_attr[i].attr.name);
1240         }
1241
1242         /* register CPU PM/hotplug related callback */
1243         register_syscore_ops(&tegra_ptm_enter_syscore_ops);
1244         register_cpu_notifier(&tegra_ptm_cpu_nb);
1245
1246         dev_info(&dev->dev, "PTM driver initialized.\n");
1247
1248         etb_save_last(t);
1249
1250         /* start the PTM and ETB now */
1251         trace_start(t);
1252 out:
1253         dev_err(&dev->dev, "Failed to start the PTM device\n");
1254         mutex_unlock(&t->mutex);
1255         return ret;
1256 }
1257
1258 static int ptm_remove(struct platform_device *dev)
1259 {
1260         struct tracectx *t = &tracer;
1261         int i;
1262
1263         unregister_cpu_notifier(&tegra_ptm_cpu_nb);
1264         unregister_syscore_ops(&tegra_ptm_enter_syscore_ops);
1265         for (i = 0; i < ARRAY_SIZE(trace_attr); i++)
1266                 sysfs_remove_file(&dev->dev.kobj, &trace_attr[i].attr);
1267
1268         mutex_lock(&t->mutex);
1269
1270         devm_iounmap(&dev->dev, t->ptm_regs);
1271         devm_iounmap(&dev->dev, t->tpiu_regs);
1272         devm_iounmap(&dev->dev, t->funnel_regs);
1273         devm_iounmap(&dev->dev, t->etb_regs);
1274         t->etb_regs = NULL;
1275         clk_disable(t->coresight_clk);
1276         clk_put(t->coresight_clk);
1277
1278         mutex_unlock(&t->mutex);
1279
1280         return 0;
1281 }
1282
1283 static struct platform_driver ptm_driver = {
1284         .probe          = ptm_probe,
1285         .remove         = ptm_remove,
1286         .driver         = {
1287                 .name   = "ptm",
1288                 .owner  = THIS_MODULE,
1289         },
1290 };
1291
1292 static void sysrq_ptm_dump(int key)
1293 {
1294         if (!mutex_trylock(&tracer.mutex)) {
1295                 pr_info("Tracing hardware busy\n");
1296                 return;
1297         }
1298         dev_dbg(tracer.dev, "Dumping ETB buffer\n");
1299         ptm_dump();
1300         mutex_unlock(&tracer.mutex);
1301 }
1302
1303 static struct sysrq_key_op sysrq_ptm_op = {
1304         .handler = sysrq_ptm_dump,
1305         .help_msg = "PTM buffer dump(V)",
1306         .action_msg = "ptm",
1307 };
1308
1309 static int __init tegra_ptm_driver_init(void)
1310 {
1311         int retval;
1312
1313         mutex_init(&tracer.mutex);
1314
1315         retval = platform_driver_register(&ptm_driver);
1316         if (retval) {
1317                 pr_err("Failed to probe ptm\n");
1318                 return retval;
1319         }
1320
1321         /* not being able to install this handler is not fatal */
1322         (void)register_sysrq_key('v', &sysrq_ptm_op);
1323
1324         return 0;
1325 }
1326 device_initcall(tegra_ptm_driver_init);