]> nv-tegra.nvidia Code Review - linux-2.6.git/blobdiff - drivers/clocksource/sh_cmt.c
clocksource: improve sh_cmt clocksource overflow handling
[linux-2.6.git] / drivers / clocksource / sh_cmt.c
index 02bae3994abe90889687778e6e8e49462c38d67f..d607ac2d516bda0cddbee933b833e48735727707 100644 (file)
@@ -47,6 +47,7 @@ struct sh_cmt_priv {
        unsigned long rate;
        spinlock_t lock;
        struct clock_event_device ced;
+       struct clocksource cs;
        unsigned long total_cycles;
 };
 
@@ -110,16 +111,21 @@ static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p,
                                        int *has_wrapped)
 {
        unsigned long v1, v2, v3;
+       int o1, o2;
+
+       o1 = sh_cmt_read(p, CMCSR) & p->overflow_bit;
 
        /* Make sure the timer value is stable. Stolen from acpi_pm.c */
        do {
+               o2 = o1;
                v1 = sh_cmt_read(p, CMCNT);
                v2 = sh_cmt_read(p, CMCNT);
                v3 = sh_cmt_read(p, CMCNT);
-       } while (unlikely((v1 > v2 && v1 < v3) || (v2 > v3 && v2 < v1)
-                         || (v3 > v1 && v3 < v2)));
+               o1 = sh_cmt_read(p, CMCSR) & p->overflow_bit;
+       } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)
+                         || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)));
 
-       *has_wrapped = sh_cmt_read(p, CMCSR) & p->overflow_bit;
+       *has_wrapped = o1;
        return v2;
 }
 
@@ -376,6 +382,68 @@ static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag)
        spin_unlock_irqrestore(&p->lock, flags);
 }
 
+static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs)
+{
+       return container_of(cs, struct sh_cmt_priv, cs);
+}
+
+static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
+{
+       struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
+       unsigned long flags, raw;
+       unsigned long value;
+       int has_wrapped;
+
+       spin_lock_irqsave(&p->lock, flags);
+       value = p->total_cycles;
+       raw = sh_cmt_get_counter(p, &has_wrapped);
+
+       if (unlikely(has_wrapped))
+               raw += p->match_value;
+       spin_unlock_irqrestore(&p->lock, flags);
+
+       return value + raw;
+}
+
+static int sh_cmt_clocksource_enable(struct clocksource *cs)
+{
+       struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
+       int ret;
+
+       p->total_cycles = 0;
+
+       ret = sh_cmt_start(p, FLAG_CLOCKSOURCE);
+       if (ret)
+               return ret;
+
+       /* TODO: calculate good shift from rate and counter bit width */
+       cs->shift = 0;
+       cs->mult = clocksource_hz2mult(p->rate, cs->shift);
+       return 0;
+}
+
+static void sh_cmt_clocksource_disable(struct clocksource *cs)
+{
+       sh_cmt_stop(cs_to_sh_cmt(cs), FLAG_CLOCKSOURCE);
+}
+
+static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
+                                      char *name, unsigned long rating)
+{
+       struct clocksource *cs = &p->cs;
+
+       cs->name = name;
+       cs->rating = rating;
+       cs->read = sh_cmt_clocksource_read;
+       cs->enable = sh_cmt_clocksource_enable;
+       cs->disable = sh_cmt_clocksource_disable;
+       cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
+       cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
+       pr_info("sh_cmt: %s used as clock source\n", cs->name);
+       clocksource_register(cs);
+       return 0;
+}
+
 static struct sh_cmt_priv *ced_to_sh_cmt(struct clock_event_device *ced)
 {
        return container_of(ced, struct sh_cmt_priv, ced);
@@ -483,6 +551,9 @@ int sh_cmt_register(struct sh_cmt_priv *p, char *name,
        if (clockevent_rating)
                sh_cmt_register_clockevent(p, name, clockevent_rating);
 
+       if (clocksource_rating)
+               sh_cmt_register_clocksource(p, name, clocksource_rating);
+
        return 0;
 }