video: tegra: host: Exclude suspend/resume if disabled
[linux-3.10.git] / drivers / video / tegra / host / gr3d / gr3d.c
1 /*
2  * drivers/video/tegra/host/gr3d/gr3d.c
3  *
4  * Tegra Graphics Host 3D
5  *
6  * Copyright (c) 2012 NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #include <linux/slab.h>
22 #include <linux/export.h>
23 #include <linux/module.h>
24 #include <mach/gpufuse.h>
25
26 #include "t20/t20.h"
27 #include "host1x/host1x01_hardware.h"
28 #include "nvhost_hwctx.h"
29 #include "dev.h"
30 #include "gr3d.h"
31 #include "gr3d_t20.h"
32 #include "gr3d_t30.h"
33 #include "gr3d_t114.h"
34 #include "scale3d.h"
35 #include "bus_client.h"
36 #include "nvhost_channel.h"
37 #include "nvhost_memmgr.h"
38 #include "chip_support.h"
39
40 void nvhost_3dctx_restore_begin(struct host1x_hwctx_handler *p, u32 *ptr)
41 {
42         /* set class to host */
43         ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
44                                         host1x_uclass_incr_syncpt_base_r(), 1);
45         /* increment sync point base */
46         ptr[1] = nvhost_class_host_incr_syncpt_base(p->waitbase,
47                         p->restore_incrs);
48         /* set class to 3D */
49         ptr[2] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
50         /* program PSEQ_QUAD_ID */
51         ptr[3] = nvhost_opcode_imm(AR3D_PSEQ_QUAD_ID, 0);
52 }
53
54 void nvhost_3dctx_restore_direct(u32 *ptr, u32 start_reg, u32 count)
55 {
56         ptr[0] = nvhost_opcode_incr(start_reg, count);
57 }
58
59 void nvhost_3dctx_restore_indirect(u32 *ptr, u32 offset_reg, u32 offset,
60                         u32 data_reg, u32 count)
61 {
62         ptr[0] = nvhost_opcode_imm(offset_reg, offset);
63         ptr[1] = nvhost_opcode_nonincr(data_reg, count);
64 }
65
66 void nvhost_3dctx_restore_end(struct host1x_hwctx_handler *p, u32 *ptr)
67 {
68         /* syncpt increment to track restore gather. */
69         ptr[0] = nvhost_opcode_imm_incr_syncpt(
70                         host1x_uclass_incr_syncpt_cond_op_done_v(), p->syncpt);
71 }
72
73 /*** ctx3d ***/
74 struct host1x_hwctx *nvhost_3dctx_alloc_common(struct host1x_hwctx_handler *p,
75                 struct nvhost_channel *ch, bool map_restore)
76 {
77         struct mem_mgr *memmgr = nvhost_get_host(ch->dev)->memmgr;
78         struct host1x_hwctx *ctx;
79
80         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
81         if (!ctx)
82                 return NULL;
83         ctx->restore = mem_op().alloc(memmgr, p->restore_size * 4, 32,
84                 map_restore ? mem_mgr_flag_write_combine
85                             : mem_mgr_flag_uncacheable);
86         if (IS_ERR_OR_NULL(ctx->restore))
87                 goto fail;
88
89         if (map_restore) {
90                 ctx->restore_virt = mem_op().mmap(ctx->restore);
91                 if (!ctx->restore_virt)
92                         goto fail;
93         } else
94                 ctx->restore_virt = NULL;
95
96         kref_init(&ctx->hwctx.ref);
97         ctx->hwctx.h = &p->h;
98         ctx->hwctx.channel = ch;
99         ctx->hwctx.valid = false;
100         ctx->save_incrs = p->save_incrs;
101         ctx->save_thresh = p->save_thresh;
102         ctx->save_slots = p->save_slots;
103         ctx->restore_phys = mem_op().pin(memmgr, ctx->restore);
104         if (IS_ERR_VALUE(ctx->restore_phys))
105                 goto fail;
106
107         ctx->restore_size = p->restore_size;
108         ctx->restore_incrs = p->restore_incrs;
109         return ctx;
110
111 fail:
112         if (map_restore && ctx->restore_virt) {
113                 mem_op().munmap(ctx->restore, ctx->restore_virt);
114                 ctx->restore_virt = NULL;
115         }
116         mem_op().put(memmgr, ctx->restore);
117         ctx->restore = NULL;
118         kfree(ctx);
119         return NULL;
120 }
121
122 void nvhost_3dctx_get(struct nvhost_hwctx *ctx)
123 {
124         kref_get(&ctx->ref);
125 }
126
127 void nvhost_3dctx_free(struct kref *ref)
128 {
129         struct nvhost_hwctx *nctx = container_of(ref, struct nvhost_hwctx, ref);
130         struct host1x_hwctx *ctx = to_host1x_hwctx(nctx);
131         struct mem_mgr *memmgr = nvhost_get_host(nctx->channel->dev)->memmgr;
132
133         if (ctx->restore_virt) {
134                 mem_op().munmap(ctx->restore, ctx->restore_virt);
135                 ctx->restore_virt = NULL;
136         }
137         mem_op().unpin(memmgr, ctx->restore);
138         ctx->restore_phys = 0;
139         mem_op().put(memmgr, ctx->restore);
140         ctx->restore = NULL;
141         kfree(ctx);
142 }
143
144 void nvhost_3dctx_put(struct nvhost_hwctx *ctx)
145 {
146         kref_put(&ctx->ref, nvhost_3dctx_free);
147 }
148
149 int nvhost_gr3d_prepare_power_off(struct nvhost_device *dev)
150 {
151         return nvhost_channel_save_context(dev->channel);
152 }
153
154 enum gr3d_ip_ver {
155         gr3d_01 = 1,
156         gr3d_02,
157         gr3d_03,
158 };
159
160 struct gr3d_desc {
161         void (*finalize_poweron)(struct nvhost_device *dev);
162         void (*busy)(struct nvhost_device *);
163         void (*idle)(struct nvhost_device *);
164         void (*suspend_ndev)(struct nvhost_device *);
165         void (*init)(struct nvhost_device *dev);
166         void (*deinit)(struct nvhost_device *dev);
167         int (*prepare_poweroff)(struct nvhost_device *dev);
168         struct nvhost_hwctx_handler *(*alloc_hwctx_handler)(u32 syncpt,
169                         u32 waitbase, struct nvhost_channel *ch);
170 };
171
172 static const struct gr3d_desc gr3d[] = {
173         [gr3d_01] = {
174                 .finalize_poweron = NULL,
175                 .busy = NULL,
176                 .idle = NULL,
177                 .suspend_ndev = NULL,
178                 .init = NULL,
179                 .deinit = NULL,
180                 .prepare_poweroff = nvhost_gr3d_prepare_power_off,
181                 .alloc_hwctx_handler = nvhost_gr3d_t20_ctxhandler_init,
182         },
183         [gr3d_02] = {
184                 .finalize_poweron = NULL,
185                 .busy = nvhost_scale3d_notify_busy,
186                 .idle = nvhost_scale3d_notify_idle,
187                 .suspend_ndev = nvhost_scale3d_suspend,
188                 .init = nvhost_scale3d_init,
189                 .deinit = nvhost_scale3d_deinit,
190                 .prepare_poweroff = nvhost_gr3d_prepare_power_off,
191                 .alloc_hwctx_handler = nvhost_gr3d_t30_ctxhandler_init,
192         },
193         [gr3d_03] = {
194                 .finalize_poweron = NULL,
195                 .busy = nvhost_scale3d_notify_busy,
196                 .idle = nvhost_scale3d_notify_idle,
197                 .suspend_ndev = nvhost_scale3d_suspend,
198                 .init = nvhost_scale3d_init,
199                 .deinit = nvhost_scale3d_deinit,
200                 .prepare_poweroff = nvhost_gr3d_prepare_power_off,
201                 .alloc_hwctx_handler = nvhost_gr3d_t114_ctxhandler_init,
202         },
203 };
204
205 static struct nvhost_device_id gr3d_id[] = {
206         { "gr3d", gr3d_01 },
207         { "gr3d", gr3d_02 },
208         { "gr3d", gr3d_03 },
209         { },
210 };
211
212 MODULE_DEVICE_TABLE(nvhost, gr3d_id);
213
214 static int gr3d_probe(struct nvhost_device *dev,
215         struct nvhost_device_id *id_table)
216 {
217         int index = 0;
218         struct nvhost_driver *drv = to_nvhost_driver(dev->dev.driver);
219
220         index = id_table->version;
221
222         drv->finalize_poweron           = gr3d[index].finalize_poweron;
223         drv->busy                       = gr3d[index].busy;
224         drv->idle                       = gr3d[index].idle;
225         drv->suspend_ndev               = gr3d[index].suspend_ndev;
226         drv->init                       = gr3d[index].init;
227         drv->deinit                     = gr3d[index].deinit;
228         drv->prepare_poweroff           = gr3d[index].prepare_poweroff;
229         drv->alloc_hwctx_handler        = gr3d[index].alloc_hwctx_handler;
230
231         nvhost_set_register_sets(tegra_gpu_register_sets());
232         return nvhost_client_device_init(dev);
233 }
234
235 static int __exit gr3d_remove(struct nvhost_device *dev)
236 {
237         /* Add clean-up */
238         return 0;
239 }
240
241 #ifdef CONFIG_PM
242 static int gr3d_suspend(struct nvhost_device *dev, pm_message_t state)
243 {
244         return nvhost_client_device_suspend(dev);
245 }
246
247 static int gr3d_resume(struct nvhost_device *dev)
248 {
249         dev_info(&dev->dev, "resuming\n");
250         return 0;
251 }
252 #endif
253
254 static struct nvhost_driver gr3d_driver = {
255         .probe = gr3d_probe,
256         .remove = __exit_p(gr3d_remove),
257 #ifdef CONFIG_PM
258         .suspend = gr3d_suspend,
259         .resume = gr3d_resume,
260 #endif
261         .driver = {
262                 .owner = THIS_MODULE,
263                 .name = "gr3d",
264         },
265         .id_table = gr3d_id,
266 };
267
268 static int __init gr3d_init(void)
269 {
270         return nvhost_driver_register(&gr3d_driver);
271 }
272
273 static void __exit gr3d_exit(void)
274 {
275         nvhost_driver_unregister(&gr3d_driver);
276 }
277
278 module_init(gr3d_init);
279 module_exit(gr3d_exit);