d0de494639939a68c735a58aaa58a91f7ef17b64
[linux-2.6.git] / drivers / video / tegra / host / host1x / host1x_syncpt.c
1 /*
2  * drivers/video/tegra/host/host1x/host1x_syncpt.c
3  *
4  * Tegra Graphics Host Syncpoints for HOST1X
5  *
6  * Copyright (c) 2010-2012, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #include <linux/nvhost_ioctl.h>
22 #include <linux/io.h>
23 #include "nvhost_syncpt.h"
24 #include "nvhost_acm.h"
25 #include "dev.h"
26 #include "host1x_syncpt.h"
27 #include "host1x_hardware.h"
28
29 /**
30  * Write the current syncpoint value back to hw.
31  */
32 static void t20_syncpt_reset(struct nvhost_syncpt *sp, u32 id)
33 {
34         struct nvhost_master *dev = syncpt_to_dev(sp);
35         int min = nvhost_syncpt_read_min(sp, id);
36         writel(min, dev->sync_aperture + (HOST1X_SYNC_SYNCPT_0 + id * 4));
37 }
38
39 /**
40  * Write the current waitbase value back to hw.
41  */
42 static void t20_syncpt_reset_wait_base(struct nvhost_syncpt *sp, u32 id)
43 {
44         struct nvhost_master *dev = syncpt_to_dev(sp);
45         writel(sp->base_val[id],
46                 dev->sync_aperture + (HOST1X_SYNC_SYNCPT_BASE_0 + id * 4));
47 }
48
49 /**
50  * Read waitbase value from hw.
51  */
52 static void t20_syncpt_read_wait_base(struct nvhost_syncpt *sp, u32 id)
53 {
54         struct nvhost_master *dev = syncpt_to_dev(sp);
55         sp->base_val[id] = readl(dev->sync_aperture +
56                                 (HOST1X_SYNC_SYNCPT_BASE_0 + id * 4));
57 }
58
59 /**
60  * Updates the last value read from hardware.
61  * (was nvhost_syncpt_update_min)
62  */
63 static u32 t20_syncpt_update_min(struct nvhost_syncpt *sp, u32 id)
64 {
65         struct nvhost_master *dev = syncpt_to_dev(sp);
66         void __iomem *sync_regs = dev->sync_aperture;
67         u32 old, live;
68
69         do {
70                 old = nvhost_syncpt_read_min(sp, id);
71                 live = readl(sync_regs + (HOST1X_SYNC_SYNCPT_0 + id * 4));
72         } while ((u32)atomic_cmpxchg(&sp->min_val[id], old, live) != old);
73
74         if (!nvhost_syncpt_check_max(sp, id, live))
75                 dev_err(&syncpt_to_dev(sp)->dev->dev,
76                                 "%s failed: id=%u, min=%d, max=%d\n",
77                                 __func__,
78                                 nvhost_syncpt_read_min(sp, id),
79                                 nvhost_syncpt_read_max(sp, id),
80                                 id);
81
82         return live;
83 }
84
85 /**
86  * Write a cpu syncpoint increment to the hardware, without touching
87  * the cache. Caller is responsible for host being powered.
88  */
89 static void t20_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id)
90 {
91         struct nvhost_master *dev = syncpt_to_dev(sp);
92         BUG_ON(!nvhost_module_powered(dev->dev));
93         if (!client_managed(id) && nvhost_syncpt_min_eq_max(sp, id)) {
94                 dev_err(&syncpt_to_dev(sp)->dev->dev,
95                         "Trying to increment syncpoint id %d beyond max\n",
96                         id);
97                 nvhost_debug_dump(syncpt_to_dev(sp));
98                 return;
99         }
100         writel(BIT(id), dev->sync_aperture + HOST1X_SYNC_SYNCPT_CPU_INCR);
101         wmb();
102 }
103
104 /* check for old WAITs to be removed (avoiding a wrap) */
105 static int t20_syncpt_wait_check(struct nvhost_syncpt *sp,
106                                  struct nvmap_client *nvmap,
107                                  u32 waitchk_mask,
108                                  struct nvhost_waitchk *wait,
109                                  int num_waitchk)
110 {
111         u32 idx;
112         int err = 0;
113
114         /* get current syncpt values */
115         for (idx = 0; idx < NV_HOST1X_SYNCPT_NB_PTS; idx++) {
116                 if (BIT(idx) & waitchk_mask)
117                         nvhost_syncpt_update_min(sp, idx);
118         }
119
120         BUG_ON(!wait && !num_waitchk);
121
122         /* compare syncpt vs wait threshold */
123         while (num_waitchk) {
124                 u32 override;
125
126                 BUG_ON(wait->syncpt_id >= NV_HOST1X_SYNCPT_NB_PTS);
127                 if (nvhost_syncpt_is_expired(sp,
128                                         wait->syncpt_id, wait->thresh)) {
129                         /*
130                          * NULL an already satisfied WAIT_SYNCPT host method,
131                          * by patching its args in the command stream. The
132                          * method data is changed to reference a reserved
133                          * (never given out or incr) NVSYNCPT_GRAPHICS_HOST
134                          * syncpt with a matching threshold value of 0, so
135                          * is guaranteed to be popped by the host HW.
136                          */
137                         dev_dbg(&syncpt_to_dev(sp)->dev->dev,
138                             "drop WAIT id %d (%s) thresh 0x%x, min 0x%x\n",
139                             wait->syncpt_id,
140                             syncpt_op(sp).name(sp, wait->syncpt_id),
141                             wait->thresh,
142                             nvhost_syncpt_read_min(sp, wait->syncpt_id));
143
144                         /* patch the wait */
145                         override = nvhost_class_host_wait_syncpt(
146                                         NVSYNCPT_GRAPHICS_HOST, 0);
147                         err = nvmap_patch_word(nvmap,
148                                         (struct nvmap_handle *)wait->mem,
149                                         wait->offset, override);
150                         if (err)
151                                 break;
152                 }
153
154                 wait++;
155                 num_waitchk--;
156         }
157         return err;
158 }
159
160
161 static const char *s_syncpt_names[32] = {
162         "gfx_host",
163         "", "", "", "", "", "", "",
164         "disp0_a", "disp1_a", "avp_0",
165         "csi_vi_0", "csi_vi_1",
166         "vi_isp_0", "vi_isp_1", "vi_isp_2", "vi_isp_3", "vi_isp_4",
167         "2d_0", "2d_1",
168         "disp0_b", "disp1_b",
169         "3d",
170         "mpe",
171         "disp0_c", "disp1_c",
172         "vblank0", "vblank1",
173         "mpe_ebm_eof", "mpe_wr_safe",
174         "2d_tinyblt",
175         "dsi"
176 };
177
178 static const char *t20_syncpt_name(struct nvhost_syncpt *s, u32 id)
179 {
180         BUG_ON(id >= ARRAY_SIZE(s_syncpt_names));
181         return s_syncpt_names[id];
182 }
183
184 static void t20_syncpt_debug(struct nvhost_syncpt *sp)
185 {
186         u32 i;
187         for (i = 0; i < NV_HOST1X_SYNCPT_NB_PTS; i++) {
188                 u32 max = nvhost_syncpt_read_max(sp, i);
189                 u32 min = nvhost_syncpt_update_min(sp, i);
190                 if (!max && !min)
191                         continue;
192                 dev_info(&syncpt_to_dev(sp)->dev->dev,
193                         "id %d (%s) min %d max %d\n",
194                         i, syncpt_op(sp).name(sp, i),
195                         min, max);
196
197         }
198
199         for (i = 0; i < NV_HOST1X_SYNCPT_NB_BASES; i++) {
200                 u32 base_val;
201                 t20_syncpt_read_wait_base(sp, i);
202                 base_val = sp->base_val[i];
203                 if (base_val)
204                         dev_info(&syncpt_to_dev(sp)->dev->dev,
205                                         "waitbase id %d val %d\n",
206                                         i, base_val);
207
208         }
209 }
210
211 static int syncpt_mutex_try_lock(struct nvhost_syncpt *sp,
212                 unsigned int idx)
213 {
214         void __iomem *sync_regs = syncpt_to_dev(sp)->sync_aperture;
215         /* mlock registers returns 0 when the lock is aquired.
216          * writing 0 clears the lock. */
217         return !!readl(sync_regs + (HOST1X_SYNC_MLOCK_0 + idx * 4));
218 }
219
220 static void syncpt_mutex_unlock(struct nvhost_syncpt *sp,
221                unsigned int idx)
222 {
223         void __iomem *sync_regs = syncpt_to_dev(sp)->sync_aperture;
224
225         writel(0, sync_regs + (HOST1X_SYNC_MLOCK_0 + idx * 4));
226 }
227
228 int host1x_init_syncpt_support(struct nvhost_master *host)
229 {
230
231         host->sync_aperture = host->aperture +
232                 (NV_HOST1X_CHANNEL0_BASE +
233                         HOST1X_CHANNEL_SYNC_REG_BASE);
234
235         host->op.syncpt.reset = t20_syncpt_reset;
236         host->op.syncpt.reset_wait_base = t20_syncpt_reset_wait_base;
237         host->op.syncpt.read_wait_base = t20_syncpt_read_wait_base;
238         host->op.syncpt.update_min = t20_syncpt_update_min;
239         host->op.syncpt.cpu_incr = t20_syncpt_cpu_incr;
240         host->op.syncpt.wait_check = t20_syncpt_wait_check;
241         host->op.syncpt.debug = t20_syncpt_debug;
242         host->op.syncpt.name = t20_syncpt_name;
243         host->op.syncpt.mutex_try_lock = syncpt_mutex_try_lock;
244         host->op.syncpt.mutex_unlock = syncpt_mutex_unlock;
245
246         host->syncpt.nb_pts = NV_HOST1X_SYNCPT_NB_PTS;
247         host->syncpt.nb_bases = NV_HOST1X_SYNCPT_NB_BASES;
248         host->syncpt.client_managed = NVSYNCPTS_CLIENT_MANAGED;
249         host->syncpt.nb_mlocks =  NV_HOST1X_SYNC_MLOCK_NUM;
250
251         return 0;
252 }