rtc: tps80031: register as mfd sub device
[linux-2.6.git] / arch / arm / mach-tegra / apbio.c
1 /*
2  * arch/arm/mach-tegra/apbio.c
3  *
4  * Copyright (C) 2010 NVIDIA Corporation.
5  * Copyright (C) 2010 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 #include <linux/kernel.h>
19 #include <linux/io.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/spinlock.h>
22 #include <linux/completion.h>
23 #include <linux/sched.h>
24 #include <linux/mutex.h>
25
26 #include <mach/dma.h>
27 #include <mach/iomap.h>
28
29 #include "apbio.h"
30
31 static DEFINE_MUTEX(tegra_apb_dma_lock);
32
33 #if defined(CONFIG_TEGRA_SYSTEM_DMA) && defined(CONFIG_ARCH_TEGRA_2x_SOC)
34 static struct tegra_dma_channel *tegra_apb_dma;
35 static u32 *tegra_apb_bb;
36 static dma_addr_t tegra_apb_bb_phys;
37 static DECLARE_COMPLETION(tegra_apb_wait);
38
39 static void apb_dma_complete(struct tegra_dma_req *req)
40 {
41         complete(&tegra_apb_wait);
42 }
43
44 static void cancel_dma(struct tegra_dma_channel *dma_chan,
45                 struct tegra_dma_req *req)
46 {
47                 tegra_dma_cancel(dma_chan);
48                 if (req->status == -TEGRA_DMA_REQ_ERROR_ABORTED)
49                         req->complete(req);
50 }
51
52 static inline u32 apb_readl(unsigned long offset)
53 {
54         struct tegra_dma_req req;
55         int ret;
56
57         if (!tegra_apb_dma)
58                 return readl(IO_TO_VIRT(offset));
59
60         mutex_lock(&tegra_apb_dma_lock);
61         req.complete = apb_dma_complete;
62         req.to_memory = 1;
63         req.dest_addr = tegra_apb_bb_phys;
64         req.dest_bus_width = 32;
65         req.dest_wrap = 1;
66         req.source_addr = offset;
67         req.source_bus_width = 32;
68         req.source_wrap = 4;
69         req.req_sel = 0;
70         req.size = 4;
71         dma_sync_single_for_device(NULL, tegra_apb_bb_phys,
72                         sizeof(u32), DMA_FROM_DEVICE);
73
74         INIT_COMPLETION(tegra_apb_wait);
75
76         tegra_dma_enqueue_req(tegra_apb_dma, &req);
77
78         ret = wait_for_completion_timeout(&tegra_apb_wait,
79                 msecs_to_jiffies(400));
80
81         if (WARN(ret == 0, "apb read dma timed out")) {
82                 cancel_dma(tegra_apb_dma, &req);
83                 *(u32 *)tegra_apb_bb = 0;
84         }
85
86         dma_sync_single_for_cpu(NULL, tegra_apb_bb_phys,
87                         sizeof(u32), DMA_FROM_DEVICE);
88         mutex_unlock(&tegra_apb_dma_lock);
89         return *((u32 *)tegra_apb_bb);
90 }
91
92
93 static inline void apb_writel(u32 value, unsigned long offset)
94 {
95         struct tegra_dma_req req;
96         int ret;
97
98         if (!tegra_apb_dma) {
99                 writel(value, IO_TO_VIRT(offset));
100                 return;
101         }
102
103         mutex_lock(&tegra_apb_dma_lock);
104         dma_sync_single_for_cpu(NULL, tegra_apb_bb_phys,
105                         sizeof(u32), DMA_TO_DEVICE);
106         *((u32 *)tegra_apb_bb) = value;
107         req.complete = apb_dma_complete;
108         req.to_memory = 0;
109         req.dest_addr = offset;
110         req.dest_wrap = 4;
111         req.dest_bus_width = 32;
112         req.source_addr = tegra_apb_bb_phys;
113         req.source_bus_width = 32;
114         req.source_wrap = 1;
115         req.req_sel = 0;
116         req.size = 4;
117
118         INIT_COMPLETION(tegra_apb_wait);
119
120         dma_sync_single_for_device(NULL, tegra_apb_bb_phys,
121                         sizeof(u32), DMA_TO_DEVICE);
122         tegra_dma_enqueue_req(tegra_apb_dma, &req);
123
124         ret = wait_for_completion_timeout(&tegra_apb_wait,
125                 msecs_to_jiffies(400));
126
127         if (WARN(ret == 0, "apb write dma timed out"))
128                 cancel_dma(tegra_apb_dma, &req);
129
130         mutex_unlock(&tegra_apb_dma_lock);
131 }
132
133 u32 tegra_apb_readl(unsigned long offset)
134 {
135         return apb_readl(offset);
136 }
137
138 void tegra_apb_writel(u32 value, unsigned long offset)
139 {
140         apb_writel(value, offset);
141 }
142 #endif
143
144 static int tegra_init_apb_dma(void)
145 {
146 #if defined(CONFIG_TEGRA_SYSTEM_DMA) && defined(CONFIG_ARCH_TEGRA_2x_SOC)
147         tegra_apb_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT |
148                 TEGRA_DMA_SHARED, "apbio");
149         if (!tegra_apb_dma) {
150                 pr_err("%s: can not allocate dma channel\n", __func__);
151                 return -ENODEV;
152         }
153
154         tegra_apb_bb = dma_alloc_coherent(NULL, sizeof(u32),
155                 &tegra_apb_bb_phys, GFP_KERNEL);
156         if (!tegra_apb_bb) {
157                 pr_err("%s: can not allocate bounce buffer\n", __func__);
158                 tegra_dma_free_channel(tegra_apb_dma);
159                 tegra_apb_dma = NULL;
160                 return -ENOMEM;
161         }
162 #endif
163         return 0;
164 }
165 arch_initcall(tegra_init_apb_dma);