0fa6d3e1ce20cce392f4f84eba03044b17f5973f
[linux-2.6.git] / drivers / video / tegra / host / nvhost_syncpt.c
1 /*
2  * drivers/video/tegra/host/nvhost_syncpt.c
3  *
4  * Tegra Graphics Host Syncpoints
5  *
6  * Copyright (c) 2010-2011, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/nvhost_ioctl.h>
24 #include "nvhost_syncpt.h"
25 #include "dev.h"
26
27 #define MAX_STUCK_CHECK_COUNT 15
28
29 /**
30  * Resets syncpoint and waitbase values to sw shadows
31  */
32 void nvhost_syncpt_reset(struct nvhost_syncpt *sp)
33 {
34         u32 i;
35         BUG_ON(!(syncpt_op(sp).reset && syncpt_op(sp).reset_wait_base));
36
37         for (i = 0; i < sp->nb_pts; i++)
38                 syncpt_op(sp).reset(sp, i);
39         for (i = 0; i < sp->nb_bases; i++)
40                 syncpt_op(sp).reset_wait_base(sp, i);
41         wmb();
42 }
43
44 /**
45  * Updates sw shadow state for client managed registers
46  */
47 void nvhost_syncpt_save(struct nvhost_syncpt *sp)
48 {
49         u32 i;
50         BUG_ON(!(syncpt_op(sp).update_min && syncpt_op(sp).read_wait_base));
51
52         for (i = 0; i < sp->nb_pts; i++) {
53                 if (client_managed(i))
54                         syncpt_op(sp).update_min(sp, i);
55                 else
56                         BUG_ON(!nvhost_syncpt_min_eq_max(sp, i));
57         }
58
59         for (i = 0; i < sp->nb_bases; i++)
60                 syncpt_op(sp).read_wait_base(sp, i);
61 }
62
63 /**
64  * Updates the last value read from hardware.
65  */
66 u32 nvhost_syncpt_update_min(struct nvhost_syncpt *sp, u32 id)
67 {
68         BUG_ON(!syncpt_op(sp).update_min);
69
70         return syncpt_op(sp).update_min(sp, id);
71 }
72
73 /**
74  * Get the current syncpoint value
75  */
76 u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id)
77 {
78         u32 val;
79         BUG_ON(!syncpt_op(sp).update_min);
80         nvhost_module_busy(&syncpt_to_dev(sp)->mod);
81         val = syncpt_op(sp).update_min(sp, id);
82         nvhost_module_idle(&syncpt_to_dev(sp)->mod);
83         return val;
84 }
85
86 /**
87  * Get the current syncpoint base
88  */
89 u32 nvhost_syncpt_read_wait_base(struct nvhost_syncpt *sp, u32 id)
90 {
91         u32 val;
92         BUG_ON(!syncpt_op(sp).read_wait_base);
93         nvhost_module_busy(&syncpt_to_dev(sp)->mod);
94         syncpt_op(sp).read_wait_base(sp, id);
95         val = sp->base_val[id];
96         nvhost_module_idle(&syncpt_to_dev(sp)->mod);
97         return val;
98 }
99
100 /**
101  * Write a cpu syncpoint increment to the hardware, without touching
102  * the cache. Caller is responsible for host being powered.
103  */
104 void nvhost_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id)
105 {
106         BUG_ON(!syncpt_op(sp).cpu_incr);
107         syncpt_op(sp).cpu_incr(sp, id);
108 }
109
110 /**
111  * Increment syncpoint value from cpu, updating cache
112  */
113 void nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id)
114 {
115         nvhost_syncpt_incr_max(sp, id, 1);
116         nvhost_module_busy(&syncpt_to_dev(sp)->mod);
117         nvhost_syncpt_cpu_incr(sp, id);
118         nvhost_module_idle(&syncpt_to_dev(sp)->mod);
119 }
120
121 /**
122  * Main entrypoint for syncpoint value waits.
123  */
124 int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id,
125                         u32 thresh, u32 timeout, u32 *value)
126 {
127         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
128         void *ref;
129         void *waiter;
130         int err = 0, check_count = 0, low_timeout = 0;
131
132         if (value)
133                 *value = 0;
134
135         BUG_ON(!syncpt_op(sp).update_min);
136         if (!nvhost_syncpt_check_max(sp, id, thresh)) {
137                 dev_warn(&syncpt_to_dev(sp)->pdev->dev,
138                         "wait %d (%s) for (%d) wouldn't be met (max %d)\n",
139                         id, syncpt_op(sp).name(sp, id), thresh,
140                         nvhost_syncpt_read_max(sp, id));
141                 nvhost_debug_dump(syncpt_to_dev(sp));
142                 return -EINVAL;
143         }
144
145         /* first check cache */
146         if (nvhost_syncpt_min_cmp(sp, id, thresh)) {
147                 if (value)
148                         *value = nvhost_syncpt_read_min(sp, id);
149                 return 0;
150         }
151
152         /* keep host alive */
153         nvhost_module_busy(&syncpt_to_dev(sp)->mod);
154
155         if (client_managed(id) || !nvhost_syncpt_min_eq_max(sp, id)) {
156                 /* try to read from register */
157                 u32 val = syncpt_op(sp).update_min(sp, id);
158                 if ((s32)(val - thresh) >= 0) {
159                         if (value)
160                                 *value = val;
161                         goto done;
162                 }
163         }
164
165         if (!timeout) {
166                 err = -EAGAIN;
167                 goto done;
168         }
169
170         /* schedule a wakeup when the syncpoint value is reached */
171         waiter = nvhost_intr_alloc_waiter();
172         if (!waiter) {
173                 err = -ENOMEM;
174                 goto done;
175         }
176
177         err = nvhost_intr_add_action(&(syncpt_to_dev(sp)->intr), id, thresh,
178                                 NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE, &wq,
179                                 waiter,
180                                 &ref);
181         if (err)
182                 goto done;
183
184         err = -EAGAIN;
185         /* wait for the syncpoint, or timeout, or signal */
186         while (timeout) {
187                 u32 check = min_t(u32, SYNCPT_CHECK_PERIOD, timeout);
188                 int remain = wait_event_interruptible_timeout(wq,
189                                                 nvhost_syncpt_min_cmp(sp, id, thresh),
190                                                 check);
191                 if (remain > 0 || nvhost_syncpt_min_cmp(sp, id, thresh)) {
192                         if (value)
193                                 *value = nvhost_syncpt_read_min(sp, id);
194                         err = 0;
195                         break;
196                 }
197                 if (remain < 0) {
198                         err = remain;
199                         break;
200                 }
201                 if (timeout != NVHOST_NO_TIMEOUT) {
202                         if (timeout < SYNCPT_CHECK_PERIOD) {
203                                 /* Caller-specified timeout may be impractically low */
204                                 low_timeout = timeout;
205                         }
206                         timeout -= check;
207                 }
208                 if (timeout) {
209                         dev_warn(&syncpt_to_dev(sp)->pdev->dev,
210                                 "%s: syncpoint id %d (%s) stuck waiting %d, timeout=%d\n",
211                                  current->comm, id, syncpt_op(sp).name(sp, id),
212                                  thresh, timeout);
213                         syncpt_op(sp).debug(sp);
214                         if (check_count > MAX_STUCK_CHECK_COUNT) {
215                                 if (low_timeout) {
216                                         dev_warn(&syncpt_to_dev(sp)->pdev->dev,
217                                                 "is timeout %d too low?\n",
218                                                 low_timeout);
219                                 }
220                                 nvhost_debug_dump(syncpt_to_dev(sp));
221                                 BUG();
222                         }
223                         check_count++;
224                 }
225         }
226         nvhost_intr_put_ref(&(syncpt_to_dev(sp)->intr), ref);
227
228 done:
229         nvhost_module_idle(&syncpt_to_dev(sp)->mod);
230         return err;
231 }
232
233 void nvhost_syncpt_debug(struct nvhost_syncpt *sp)
234 {
235         syncpt_op(sp).debug(sp);
236 }
237
238 /* check for old WAITs to be removed (avoiding a wrap) */
239 int nvhost_syncpt_wait_check(struct nvhost_syncpt *sp,
240                              struct nvmap_client *nvmap,
241                              u32 waitchk_mask,
242                              struct nvhost_waitchk *wait,
243                              int num_waitchk)
244 {
245         return syncpt_op(sp).wait_check(sp, nvmap,
246                         waitchk_mask, wait, num_waitchk);
247 }