ARM: tegra: fuse: Separate fuse dma initialization
Colin Cross [Wed, 24 Nov 2010 22:44:09 +0000 (14:44 -0800)]
There is a dependency loop between fuses, clocks, and APBDMA.
If dma is enabled, fuse reads must go through APBDMA to avoid
corruption due to a hw bug.  APBDMA requires a clock to be
enabled.  Clocks must read a fuse to determine allowable cpu
frequencies.

Separate out the fuse DMA initialization, and allow the fuse
read and write functions to be called without using DMA before
the DMA initialization has been completed.  Access to the fuses
before APBDMA is initialized won't hit the hardware bug because
nothing else can be using DMA.

Change-Id: Ib5cb0f346488f2869e8314c5f3b24fd86873f4c3
Signed-off-by: Colin Cross <ccross@android.com>

Rebase-Id: R943ad08dddeb0b40c7e759e6bfcafa45212797b0

arch/arm/mach-tegra/fuse.c
arch/arm/mach-tegra/fuse.h

index 3ba5ec9..9191917 100644 (file)
 #define FUSE_SKU_INFO          0x110
 #define FUSE_SPARE_BIT         0x200
 
-DEFINE_MUTEX(lock);
+static DEFINE_MUTEX(tegra_fuse_dma_lock);
 
 #ifdef CONFIG_TEGRA_SYSTEM_DMA
-struct tegra_dma_channel *dma;
-u32 *fuse_bb;
-dma_addr_t fuse_bb_phys;
-struct completion rd_wait;
-struct completion wr_wait;
+static struct tegra_dma_channel *tegra_fuse_dma;
+static u32 *tegra_fuse_bb;
+static dma_addr_t tegra_fuse_bb_phys;
+static DECLARE_COMPLETION(tegra_fuse_wait);
 
 static void fuse_dma_complete(struct tegra_dma_req *req)
 {
-       if (req)
-               req->to_memory ? complete(&rd_wait) : complete(&wr_wait);
+       complete(&tegra_fuse_wait);
 }
 
 static inline u32 fuse_readl(unsigned long offset)
 {
        struct tegra_dma_req req;
+       int ret;
 
-       if (!dma)
-               return -EINVAL;
+       if (!tegra_fuse_dma)
+               return readl(IO_TO_VIRT(TEGRA_FUSE_BASE + offset));
 
-       mutex_lock(&lock);
+       mutex_lock(&tegra_fuse_dma_lock);
        req.complete = fuse_dma_complete;
        req.to_memory = 1;
-       req.dest_addr = fuse_bb_phys;
+       req.dest_addr = tegra_fuse_bb_phys;
        req.dest_bus_width = 32;
        req.dest_wrap = 1;
        req.source_addr = TEGRA_FUSE_BASE + offset;
@@ -69,43 +68,51 @@ static inline u32 fuse_readl(unsigned long offset)
        req.req_sel = 0;
        req.size = 4;
 
-       init_completion(&rd_wait);
-       tegra_dma_enqueue_req(dma, &req);
-       if (wait_for_completion_timeout(&rd_wait, msecs_to_jiffies(50)) == 0) {
-               WARN_ON(1);
-               mutex_unlock(&lock);
-               return 0;
-       }
+       INIT_COMPLETION(tegra_fuse_wait);
+
+       tegra_dma_enqueue_req(tegra_fuse_dma, &req);
+
+       ret = wait_for_completion_timeout(&tegra_fuse_wait,
+               msecs_to_jiffies(50));
 
-       mutex_unlock(&lock);
-       return *((u32 *)fuse_bb);
+       if (WARN(ret == 0, "fuse read dma timed out"))
+               *(u32 *)tegra_fuse_bb = 0;
+
+       mutex_unlock(&tegra_fuse_dma_lock);
+       return *((u32 *)tegra_fuse_bb);
 }
 
 static inline void fuse_writel(u32 value, unsigned long offset)
 {
        struct tegra_dma_req req;
+       int ret;
 
-       if (!dma || !fuse_bb)
+       if (!tegra_fuse_dma) {
+               writel(value, IO_TO_VIRT(TEGRA_FUSE_BASE + offset));
                return;
+       }
 
-       mutex_lock(&lock);
-       *((u32 *)fuse_bb) = value;
+       mutex_lock(&tegra_fuse_dma_lock);
+       *((u32 *)tegra_fuse_bb) = value;
        req.complete = fuse_dma_complete;
        req.to_memory = 0;
        req.dest_addr = TEGRA_FUSE_BASE + offset;
        req.dest_wrap = 4;
        req.dest_bus_width = 32;
-       req.source_addr = fuse_bb_phys;
+       req.source_addr = tegra_fuse_bb_phys;
        req.source_bus_width = 32;
        req.source_wrap = 1;
        req.req_sel = 0;
        req.size = 4;
 
-       init_completion(&wr_wait);
-       tegra_dma_enqueue_req(dma, &req);
-       if (wait_for_completion_timeout(&wr_wait, msecs_to_jiffies(50)) == 0)
-               WARN_ON(1);
-       mutex_unlock(&lock);
+       INIT_COMPLETION(tegra_fuse_wait);
+
+       tegra_dma_enqueue_req(tegra_fuse_dma, &req);
+
+       ret = wait_for_completion_timeout(&tegra_fuse_wait,
+               msecs_to_jiffies(50));
+
+       mutex_unlock(&tegra_fuse_dma_lock);
 }
 #else
 static inline u32 fuse_readl(unsigned long offset)
@@ -135,28 +142,30 @@ void tegra_init_fuse(void)
        reg |= 1 << 28;
        writel(reg, IO_TO_VIRT(TEGRA_CLK_RESET_BASE + 0x48));
 
+       pr_info("Tegra SKU: %d CPU Process: %d Core Process: %d\n",
+               tegra_sku_id(), tegra_cpu_process_id(),
+               tegra_core_process_id());
+}
+
+void tegra_init_fuse_dma(void)
+{
 #ifdef CONFIG_TEGRA_SYSTEM_DMA
-       dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT |
+       tegra_fuse_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT |
                TEGRA_DMA_SHARED);
-       if (!dma) {
+       if (!tegra_fuse_dma) {
                pr_err("%s: can not allocate dma channel\n", __func__);
                return;
        }
 
-       fuse_bb = dma_alloc_coherent(NULL, sizeof(u32),
-               &fuse_bb_phys, GFP_KERNEL);
-       if (!fuse_bb) {
+       tegra_fuse_bb = dma_alloc_coherent(NULL, sizeof(u32),
+               &tegra_fuse_bb_phys, GFP_KERNEL);
+       if (!tegra_fuse_bb) {
                pr_err("%s: can not allocate bounce buffer\n", __func__);
-               tegra_dma_free_channel(dma);
-               dma = NULL;
+               tegra_dma_free_channel(tegra_fuse_dma);
+               tegra_fuse_dma = NULL;
                return;
        }
-       mutex_init(&lock);
 #endif
-
-       pr_info("Tegra SKU: %d CPU Process: %d Core Process: %d\n",
-               tegra_sku_id(), tegra_cpu_process_id(),
-               tegra_core_process_id());
 }
 
 unsigned long long tegra_chip_uid(void)
index 8246103..624bbfa 100644 (file)
@@ -22,5 +22,6 @@ int tegra_sku_id(void);
 int tegra_cpu_process_id(void);
 int tegra_core_process_id(void);
 void tegra_init_fuse(void);
+void tegra_init_fuse_dma(void);
 u32 tegra_fuse_readl(unsigned long offset);
 void tegra_fuse_writel(u32 value, unsigned long offset);