include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[linux-3.10.git] / drivers / gpu / drm / radeon / radeon_fence.c
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Dave Airlie
30  */
31 #include <linux/seq_file.h>
32 #include <asm/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/list.h>
35 #include <linux/kref.h>
36 #include <linux/slab.h>
37 #include "drmP.h"
38 #include "drm.h"
39 #include "radeon_reg.h"
40 #include "radeon.h"
41
42 int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
43 {
44         unsigned long irq_flags;
45
46         write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
47         if (fence->emited) {
48                 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
49                 return 0;
50         }
51         fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
52         if (!rdev->cp.ready) {
53                 /* FIXME: cp is not running assume everythings is done right
54                  * away
55                  */
56                 WREG32(rdev->fence_drv.scratch_reg, fence->seq);
57         } else
58                 radeon_fence_ring_emit(rdev, fence);
59
60         fence->emited = true;
61         fence->timeout = jiffies + ((2000 * HZ) / 1000);
62         list_del(&fence->list);
63         list_add_tail(&fence->list, &rdev->fence_drv.emited);
64         write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
65         return 0;
66 }
67
68 static bool radeon_fence_poll_locked(struct radeon_device *rdev)
69 {
70         struct radeon_fence *fence;
71         struct list_head *i, *n;
72         uint32_t seq;
73         bool wake = false;
74
75         if (rdev == NULL) {
76                 return true;
77         }
78         if (rdev->shutdown) {
79                 return true;
80         }
81         seq = RREG32(rdev->fence_drv.scratch_reg);
82         rdev->fence_drv.last_seq = seq;
83         n = NULL;
84         list_for_each(i, &rdev->fence_drv.emited) {
85                 fence = list_entry(i, struct radeon_fence, list);
86                 if (fence->seq == seq) {
87                         n = i;
88                         break;
89                 }
90         }
91         /* all fence previous to this one are considered as signaled */
92         if (n) {
93                 i = n;
94                 do {
95                         n = i->prev;
96                         list_del(i);
97                         list_add_tail(i, &rdev->fence_drv.signaled);
98                         fence = list_entry(i, struct radeon_fence, list);
99                         fence->signaled = true;
100                         i = n;
101                 } while (i != &rdev->fence_drv.emited);
102                 wake = true;
103         }
104         return wake;
105 }
106
107 static void radeon_fence_destroy(struct kref *kref)
108 {
109         unsigned long irq_flags;
110         struct radeon_fence *fence;
111
112         fence = container_of(kref, struct radeon_fence, kref);
113         write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
114         list_del(&fence->list);
115         fence->emited = false;
116         write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
117         kfree(fence);
118 }
119
120 int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
121 {
122         unsigned long irq_flags;
123
124         *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
125         if ((*fence) == NULL) {
126                 return -ENOMEM;
127         }
128         kref_init(&((*fence)->kref));
129         (*fence)->rdev = rdev;
130         (*fence)->emited = false;
131         (*fence)->signaled = false;
132         (*fence)->seq = 0;
133         INIT_LIST_HEAD(&(*fence)->list);
134
135         write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
136         list_add_tail(&(*fence)->list, &rdev->fence_drv.created);
137         write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
138         return 0;
139 }
140
141
142 bool radeon_fence_signaled(struct radeon_fence *fence)
143 {
144         unsigned long irq_flags;
145         bool signaled = false;
146
147         if (!fence)
148                 return true;
149
150         if (fence->rdev->gpu_lockup)
151                 return true;
152
153         write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
154         signaled = fence->signaled;
155         /* if we are shuting down report all fence as signaled */
156         if (fence->rdev->shutdown) {
157                 signaled = true;
158         }
159         if (!fence->emited) {
160                 WARN(1, "Querying an unemited fence : %p !\n", fence);
161                 signaled = true;
162         }
163         if (!signaled) {
164                 radeon_fence_poll_locked(fence->rdev);
165                 signaled = fence->signaled;
166         }
167         write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
168         return signaled;
169 }
170
171 int radeon_fence_wait(struct radeon_fence *fence, bool intr)
172 {
173         struct radeon_device *rdev;
174         unsigned long cur_jiffies;
175         unsigned long timeout;
176         bool expired = false;
177         int r;
178
179         if (fence == NULL) {
180                 WARN(1, "Querying an invalid fence : %p !\n", fence);
181                 return 0;
182         }
183         rdev = fence->rdev;
184         if (radeon_fence_signaled(fence)) {
185                 return 0;
186         }
187
188 retry:
189         cur_jiffies = jiffies;
190         timeout = HZ / 100;
191         if (time_after(fence->timeout, cur_jiffies)) {
192                 timeout = fence->timeout - cur_jiffies;
193         }
194
195         if (intr) {
196                 radeon_irq_kms_sw_irq_get(rdev);
197                 r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
198                                 radeon_fence_signaled(fence), timeout);
199                 radeon_irq_kms_sw_irq_put(rdev);
200                 if (unlikely(r < 0))
201                         return r;
202         } else {
203                 radeon_irq_kms_sw_irq_get(rdev);
204                 r = wait_event_timeout(rdev->fence_drv.queue,
205                          radeon_fence_signaled(fence), timeout);
206                 radeon_irq_kms_sw_irq_put(rdev);
207         }
208         if (unlikely(!radeon_fence_signaled(fence))) {
209                 if (unlikely(r == 0)) {
210                         expired = true;
211                 }
212                 if (unlikely(expired)) {
213                         timeout = 1;
214                         if (time_after(cur_jiffies, fence->timeout)) {
215                                 timeout = cur_jiffies - fence->timeout;
216                         }
217                         timeout = jiffies_to_msecs(timeout);
218                         if (timeout > 500) {
219                                 DRM_ERROR("fence(%p:0x%08X) %lums timeout "
220                                           "going to reset GPU\n",
221                                           fence, fence->seq, timeout);
222                                 radeon_gpu_reset(rdev);
223                                 WREG32(rdev->fence_drv.scratch_reg, fence->seq);
224                         }
225                 }
226                 goto retry;
227         }
228         if (unlikely(expired)) {
229                 rdev->fence_drv.count_timeout++;
230                 cur_jiffies = jiffies;
231                 timeout = 1;
232                 if (time_after(cur_jiffies, fence->timeout)) {
233                         timeout = cur_jiffies - fence->timeout;
234                 }
235                 timeout = jiffies_to_msecs(timeout);
236                 DRM_ERROR("fence(%p:0x%08X) %lums timeout\n",
237                           fence, fence->seq, timeout);
238                 DRM_ERROR("last signaled fence(0x%08X)\n",
239                           rdev->fence_drv.last_seq);
240         }
241         return 0;
242 }
243
244 int radeon_fence_wait_next(struct radeon_device *rdev)
245 {
246         unsigned long irq_flags;
247         struct radeon_fence *fence;
248         int r;
249
250         if (rdev->gpu_lockup) {
251                 return 0;
252         }
253         write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
254         if (list_empty(&rdev->fence_drv.emited)) {
255                 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
256                 return 0;
257         }
258         fence = list_entry(rdev->fence_drv.emited.next,
259                            struct radeon_fence, list);
260         radeon_fence_ref(fence);
261         write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
262         r = radeon_fence_wait(fence, false);
263         radeon_fence_unref(&fence);
264         return r;
265 }
266
267 int radeon_fence_wait_last(struct radeon_device *rdev)
268 {
269         unsigned long irq_flags;
270         struct radeon_fence *fence;
271         int r;
272
273         if (rdev->gpu_lockup) {
274                 return 0;
275         }
276         write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
277         if (list_empty(&rdev->fence_drv.emited)) {
278                 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
279                 return 0;
280         }
281         fence = list_entry(rdev->fence_drv.emited.prev,
282                            struct radeon_fence, list);
283         radeon_fence_ref(fence);
284         write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
285         r = radeon_fence_wait(fence, false);
286         radeon_fence_unref(&fence);
287         return r;
288 }
289
290 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
291 {
292         kref_get(&fence->kref);
293         return fence;
294 }
295
296 void radeon_fence_unref(struct radeon_fence **fence)
297 {
298         struct radeon_fence *tmp = *fence;
299
300         *fence = NULL;
301         if (tmp) {
302                 kref_put(&tmp->kref, &radeon_fence_destroy);
303         }
304 }
305
306 void radeon_fence_process(struct radeon_device *rdev)
307 {
308         unsigned long irq_flags;
309         bool wake;
310
311         write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
312         wake = radeon_fence_poll_locked(rdev);
313         write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
314         if (wake) {
315                 wake_up_all(&rdev->fence_drv.queue);
316         }
317 }
318
319 int radeon_fence_driver_init(struct radeon_device *rdev)
320 {
321         unsigned long irq_flags;
322         int r;
323
324         write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
325         r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
326         if (r) {
327                 dev_err(rdev->dev, "fence failed to get scratch register\n");
328                 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
329                 return r;
330         }
331         WREG32(rdev->fence_drv.scratch_reg, 0);
332         atomic_set(&rdev->fence_drv.seq, 0);
333         INIT_LIST_HEAD(&rdev->fence_drv.created);
334         INIT_LIST_HEAD(&rdev->fence_drv.emited);
335         INIT_LIST_HEAD(&rdev->fence_drv.signaled);
336         rdev->fence_drv.count_timeout = 0;
337         init_waitqueue_head(&rdev->fence_drv.queue);
338         rdev->fence_drv.initialized = true;
339         write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
340         if (radeon_debugfs_fence_init(rdev)) {
341                 dev_err(rdev->dev, "fence debugfs file creation failed\n");
342         }
343         return 0;
344 }
345
346 void radeon_fence_driver_fini(struct radeon_device *rdev)
347 {
348         unsigned long irq_flags;
349
350         if (!rdev->fence_drv.initialized)
351                 return;
352         wake_up_all(&rdev->fence_drv.queue);
353         write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
354         radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg);
355         write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
356         rdev->fence_drv.initialized = false;
357 }
358
359
360 /*
361  * Fence debugfs
362  */
363 #if defined(CONFIG_DEBUG_FS)
364 static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
365 {
366         struct drm_info_node *node = (struct drm_info_node *)m->private;
367         struct drm_device *dev = node->minor->dev;
368         struct radeon_device *rdev = dev->dev_private;
369         struct radeon_fence *fence;
370
371         seq_printf(m, "Last signaled fence 0x%08X\n",
372                    RREG32(rdev->fence_drv.scratch_reg));
373         if (!list_empty(&rdev->fence_drv.emited)) {
374                    fence = list_entry(rdev->fence_drv.emited.prev,
375                                       struct radeon_fence, list);
376                    seq_printf(m, "Last emited fence %p with 0x%08X\n",
377                               fence,  fence->seq);
378         }
379         return 0;
380 }
381
382 static struct drm_info_list radeon_debugfs_fence_list[] = {
383         {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
384 };
385 #endif
386
387 int radeon_debugfs_fence_init(struct radeon_device *rdev)
388 {
389 #if defined(CONFIG_DEBUG_FS)
390         return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);
391 #else
392         return 0;
393 #endif
394 }