nvhost: Fix tegra_host/status debug output
[linux-2.6.git] / drivers / video / tegra / host / t20 / cdma_t20.c
1 /*
2  * drivers/video/tegra/host/t20/cdma_t20.c
3  *
4  * Tegra Graphics Host Command DMA
5  *
6  * Copyright (c) 2010-2011, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/slab.h>
24 #include "../nvhost_cdma.h"
25 #include "../dev.h"
26
27 #include "hardware_t20.h"
28
29 /*
30  * push_buffer
31  *
32  * The push buffer is a circular array of words to be fetched by command DMA.
33  * Note that it works slightly differently to the sync queue; fence == cur
34  * means that the push buffer is full, not empty.
35  */
36
37
38 /**
39  * Reset to empty push buffer
40  */
41 static void t20_push_buffer_reset(struct push_buffer *pb)
42 {
43         pb->fence = PUSH_BUFFER_SIZE - 8;
44         pb->cur = 0;
45 }
46
47 /**
48  * Init push buffer resources
49  */
50 static int t20_push_buffer_init(struct push_buffer *pb)
51 {
52         struct nvhost_cdma *cdma = pb_to_cdma(pb);
53         struct nvmap_client *nvmap = cdma_to_nvmap(cdma);
54         pb->mem = NULL;
55         pb->mapped = NULL;
56         pb->phys = 0;
57         pb->handles = NULL;
58
59         BUG_ON(!cdma_pb_op(cdma).reset);
60         cdma_pb_op(cdma).reset(pb);
61
62         /* allocate and map pushbuffer memory */
63         pb->mem = nvmap_alloc(nvmap, PUSH_BUFFER_SIZE + 4, 32,
64                               NVMAP_HANDLE_WRITE_COMBINE);
65         if (IS_ERR_OR_NULL(pb->mem)) {
66                 pb->mem = NULL;
67                 goto fail;
68         }
69         pb->mapped = nvmap_mmap(pb->mem);
70         if (pb->mapped == NULL)
71                 goto fail;
72
73         /* pin pushbuffer and get physical address */
74         pb->phys = nvmap_pin(nvmap, pb->mem);
75         if (pb->phys >= 0xfffff000) {
76                 pb->phys = 0;
77                 goto fail;
78         }
79
80         /* memory for storing nvmap handles for each opcode pair */
81         pb->handles = kzalloc(PUSH_BUFFER_SIZE/2, GFP_KERNEL);
82         if (!pb->handles)
83                 goto fail;
84
85         /* put the restart at the end of pushbuffer memory */
86         *(pb->mapped + (PUSH_BUFFER_SIZE >> 2)) = nvhost_opcode_restart(pb->phys);
87
88         return 0;
89
90 fail:
91         cdma_pb_op(cdma).destroy(pb);
92         return -ENOMEM;
93 }
94
95 /**
96  * Clean up push buffer resources
97  */
98 static void t20_push_buffer_destroy(struct push_buffer *pb)
99 {
100         struct nvhost_cdma *cdma = pb_to_cdma(pb);
101         struct nvmap_client *nvmap = cdma_to_nvmap(cdma);
102         if (pb->mapped)
103                 nvmap_munmap(pb->mem, pb->mapped);
104
105         if (pb->phys != 0)
106                 nvmap_unpin(nvmap, pb->mem);
107
108         if (pb->mem)
109                 nvmap_free(nvmap, pb->mem);
110
111         kfree(pb->handles);
112
113         pb->mem = NULL;
114         pb->mapped = NULL;
115         pb->phys = 0;
116         pb->handles = 0;
117 }
118
119 /**
120  * Push two words to the push buffer
121  * Caller must ensure push buffer is not full
122  */
123 static void t20_push_buffer_push_to(struct push_buffer *pb,
124                         struct nvmap_handle *handle, u32 op1, u32 op2)
125 {
126         u32 cur = pb->cur;
127         u32 *p = (u32*)((u32)pb->mapped + cur);
128         BUG_ON(cur == pb->fence);
129         *(p++) = op1;
130         *(p++) = op2;
131         pb->handles[cur/8] = handle;
132         pb->cur = (cur + 8) & (PUSH_BUFFER_SIZE - 1);
133         /* printk("push_to_push_buffer: op1=%08x; op2=%08x; cur=%x\n", op1, op2, pb->cur); */
134 }
135
136 /**
137  * Pop a number of two word slots from the push buffer
138  * Caller must ensure push buffer is not empty
139  */
140 static void t20_push_buffer_pop_from(struct push_buffer *pb, unsigned int slots)
141 {
142         pb->fence = (pb->fence + slots * 8) & (PUSH_BUFFER_SIZE - 1);
143 }
144
145 /**
146  * Return the number of two word slots free in the push buffer
147  */
148 static u32 t20_push_buffer_space(struct push_buffer *pb)
149 {
150         return ((pb->fence - pb->cur) & (PUSH_BUFFER_SIZE - 1)) / 8;
151 }
152
153 static u32 t20_push_buffer_putptr(struct push_buffer *pb)
154 {
155         return pb->phys + pb->cur;
156 }
157
158
159 /**
160  * Start channel DMA
161  */
162 static void t20_cdma_start(struct nvhost_cdma *cdma)
163 {
164         void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
165
166         if (cdma->running)
167                 return;
168
169         BUG_ON(!cdma_pb_op(cdma).putptr);
170
171         cdma->last_put = cdma_pb_op(cdma).putptr(&cdma->push_buffer);
172
173         writel(nvhost_channel_dmactrl(true, false, false),
174                 chan_regs + HOST1X_CHANNEL_DMACTRL);
175
176         /* set base, put, end pointer (all of memory) */
177         writel(0, chan_regs + HOST1X_CHANNEL_DMASTART);
178         writel(cdma->last_put, chan_regs + HOST1X_CHANNEL_DMAPUT);
179         writel(0xFFFFFFFF, chan_regs + HOST1X_CHANNEL_DMAEND);
180
181         /* reset GET */
182         writel(nvhost_channel_dmactrl(true, true, true),
183                 chan_regs + HOST1X_CHANNEL_DMACTRL);
184
185         /* start the command DMA */
186         writel(nvhost_channel_dmactrl(false, false, false),
187                 chan_regs + HOST1X_CHANNEL_DMACTRL);
188
189         cdma->running = true;
190 }
191
192 /**
193  * Kick channel DMA into action by writing its PUT offset (if it has changed)
194  */
195 static void t20_cdma_kick(struct nvhost_cdma *cdma)
196 {
197         u32 put;
198         BUG_ON(!cdma_pb_op(cdma).putptr);
199
200         put = cdma_pb_op(cdma).putptr(&cdma->push_buffer);
201
202         if (put != cdma->last_put) {
203                 void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
204                 wmb();
205                 writel(put, chan_regs + HOST1X_CHANNEL_DMAPUT);
206                 cdma->last_put = put;
207         }
208 }
209
210 static void t20_cdma_stop(struct nvhost_cdma *cdma)
211 {
212         void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
213
214         mutex_lock(&cdma->lock);
215         if (cdma->running) {
216                 nvhost_cdma_wait(cdma, CDMA_EVENT_SYNC_QUEUE_EMPTY);
217                 writel(nvhost_channel_dmactrl(true, false, false),
218                         chan_regs + HOST1X_CHANNEL_DMACTRL);
219                 cdma->running = false;
220         }
221         mutex_unlock(&cdma->lock);
222 }
223
224 /**
225  * Retrieve the op pair at a slot offset from a DMA address
226  */
227 void t20_cdma_peek(struct nvhost_cdma *cdma,
228                           u32 dmaget, int slot, u32 *out)
229 {
230         u32 offset = dmaget - cdma->push_buffer.phys;
231         u32 *p = cdma->push_buffer.mapped;
232
233         offset = ((offset + slot * 8) & (PUSH_BUFFER_SIZE - 1)) >> 2;
234         out[0] = p[offset];
235         out[1] = p[offset + 1];
236 }
237
238 int nvhost_init_t20_cdma_support(struct nvhost_master *host)
239 {
240         host->op.cdma.start = t20_cdma_start;
241         host->op.cdma.stop = t20_cdma_stop;
242         host->op.cdma.kick = t20_cdma_kick;
243
244         host->sync_queue_size = NVHOST_SYNC_QUEUE_SIZE;
245
246         host->op.push_buffer.reset = t20_push_buffer_reset;
247         host->op.push_buffer.init = t20_push_buffer_init;
248         host->op.push_buffer.destroy = t20_push_buffer_destroy;
249         host->op.push_buffer.push_to = t20_push_buffer_push_to;
250         host->op.push_buffer.pop_from = t20_push_buffer_pop_from;
251         host->op.push_buffer.space = t20_push_buffer_space;
252         host->op.push_buffer.putptr = t20_push_buffer_putptr;
253
254         return 0;
255 }