video: tegra: host: Support multi-syncpt submits
[linux-3.10.git] / drivers / video / tegra / host / bus_client.c
1 /*
2  * drivers/video/tegra/host/bus_client.c
3  *
4  * Tegra Graphics Host Client Module
5  *
6  * Copyright (c) 2010-2013, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #include <linux/slab.h>
22 #include <linux/string.h>
23 #include <linux/spinlock.h>
24 #include <linux/fs.h>
25 #include <linux/cdev.h>
26 #include <linux/uaccess.h>
27 #include <linux/file.h>
28 #include <linux/clk.h>
29 #include <linux/hrtimer.h>
30 #include <linux/export.h>
31 #include <linux/firmware.h>
32
33 #include <trace/events/nvhost.h>
34
35 #include <linux/io.h>
36 #include <linux/string.h>
37
38 #include <linux/nvhost.h>
39 #include <linux/nvhost_ioctl.h>
40
41 #include <mach/gpufuse.h>
42 #include <mach/hardware.h>
43
44 #include "debug.h"
45 #include "bus_client.h"
46 #include "dev.h"
47 #include "nvhost_memmgr.h"
48 #include "chip_support.h"
49 #include "nvhost_acm.h"
50
51 #include "nvhost_syncpt.h"
52 #include "nvhost_channel.h"
53 #include "nvhost_job.h"
54 #include "nvhost_hwctx.h"
55 #include "user_hwctx.h"
56
57 static int validate_reg(struct platform_device *ndev, u32 offset, int count)
58 {
59         struct resource *r = platform_get_resource(ndev, IORESOURCE_MEM, 0);
60         int err = 0;
61
62         if (offset + 4 * count > resource_size(r)
63                         || (offset + 4 * count < offset))
64                 err = -EPERM;
65
66         return err;
67 }
68
69 int nvhost_read_module_regs(struct platform_device *ndev,
70                         u32 offset, int count, u32 *values)
71 {
72         struct nvhost_device_data *pdata = platform_get_drvdata(ndev);
73         void __iomem *p = pdata->aperture[0] + offset;
74         int err;
75
76         /* verify offset */
77         err = validate_reg(ndev, offset, count);
78         if (err)
79                 return err;
80
81         nvhost_module_busy(ndev);
82         while (count--) {
83                 *(values++) = readl(p);
84                 p += 4;
85         }
86         rmb();
87         nvhost_module_idle(ndev);
88
89         return 0;
90 }
91
92 int nvhost_write_module_regs(struct platform_device *ndev,
93                         u32 offset, int count, const u32 *values)
94 {
95         void __iomem *p;
96         int err;
97         struct nvhost_device_data *pdata = platform_get_drvdata(ndev);
98
99         p = pdata->aperture[0] + offset;
100
101         /* verify offset */
102         err = validate_reg(ndev, offset, count);
103         if (err)
104                 return err;
105
106         nvhost_module_busy(ndev);
107         while (count--) {
108                 writel(*(values++), p);
109                 p += 4;
110         }
111         wmb();
112         nvhost_module_idle(ndev);
113
114         return 0;
115 }
116
117 struct nvhost_channel_userctx {
118         struct nvhost_channel *ch;
119         struct nvhost_hwctx *hwctx;
120         struct nvhost_submit_hdr_ext hdr;
121         int num_relocshifts;
122         struct nvhost_job *job;
123         struct mem_mgr *memmgr;
124         u32 timeout;
125         u32 priority;
126         int clientid;
127         bool timeout_debug_dump;
128 };
129
130 static int nvhost_channelrelease(struct inode *inode, struct file *filp)
131 {
132         struct nvhost_channel_userctx *priv = filp->private_data;
133
134         trace_nvhost_channel_release(dev_name(&priv->ch->dev->dev));
135
136         filp->private_data = NULL;
137
138         nvhost_module_remove_client(priv->ch->dev, priv);
139         nvhost_putchannel(priv->ch, priv->hwctx);
140
141         if (priv->hwctx)
142                 priv->hwctx->h->put(priv->hwctx);
143
144         if (priv->job)
145                 nvhost_job_put(priv->job);
146
147         nvhost_memmgr_put_mgr(priv->memmgr);
148         kfree(priv);
149         return 0;
150 }
151
152 static int nvhost_channelopen(struct inode *inode, struct file *filp)
153 {
154         struct nvhost_channel_userctx *priv;
155         struct nvhost_channel *ch;
156
157         ch = container_of(inode->i_cdev, struct nvhost_channel, cdev);
158         ch = nvhost_getchannel(ch);
159         if (!ch)
160                 return -ENOMEM;
161         trace_nvhost_channel_open(dev_name(&ch->dev->dev));
162
163         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
164         if (!priv) {
165                 nvhost_putchannel(ch, NULL);
166                 return -ENOMEM;
167         }
168         filp->private_data = priv;
169         priv->ch = ch;
170         if(nvhost_module_add_client(ch->dev, priv))
171                 goto fail;
172
173         if (ch->ctxhandler && ch->ctxhandler->alloc) {
174                 priv->hwctx = ch->ctxhandler->alloc(ch->ctxhandler, ch);
175                 if (!priv->hwctx)
176                         goto fail;
177         }
178         priv->priority = NVHOST_PRIORITY_MEDIUM;
179         priv->clientid = atomic_add_return(1,
180                         &nvhost_get_host(ch->dev)->clientid);
181         priv->timeout = CONFIG_TEGRA_GRHOST_DEFAULT_TIMEOUT;
182         priv->timeout_debug_dump = true;
183         if (tegra_platform_is_linsim())
184                 priv->timeout = 0;
185
186         return 0;
187 fail:
188         nvhost_channelrelease(inode, filp);
189         return -ENOMEM;
190 }
191
192 static int set_submit(struct nvhost_channel_userctx *ctx)
193 {
194         struct platform_device *ndev = ctx->ch->dev;
195         struct nvhost_master *host = nvhost_get_host(ndev);
196
197         /* submit should have at least 1 cmdbuf */
198         if (!ctx->hdr.num_cmdbufs ||
199                         !nvhost_syncpt_is_valid(&host->syncpt,
200                                 ctx->hdr.syncpt_id))
201                 return -EIO;
202
203         if (!ctx->memmgr) {
204                 dev_err(&ndev->dev, "no nvmap context set\n");
205                 return -EFAULT;
206         }
207
208         if (ctx->job) {
209                 dev_warn(&ndev->dev, "performing channel submit when a job already exists\n");
210                 nvhost_job_put(ctx->job);
211         }
212         ctx->job = nvhost_job_alloc(ctx->ch,
213                         ctx->hwctx,
214                         ctx->hdr.num_cmdbufs,
215                         ctx->hdr.num_relocs,
216                         ctx->hdr.num_waitchks,
217                         1,
218                         ctx->memmgr);
219         if (!ctx->job)
220                 return -ENOMEM;
221         ctx->job->timeout = ctx->timeout;
222         ctx->job->sp->id = ctx->hdr.syncpt_id;
223         ctx->job->sp->incrs = ctx->hdr.syncpt_incrs;
224         ctx->job->hwctx_syncpt_idx = 0;
225         ctx->job->num_syncpts = 1;
226         ctx->job->priority = ctx->priority;
227         ctx->job->clientid = ctx->clientid;
228         ctx->job->timeout_debug_dump = ctx->timeout_debug_dump;
229
230         if (ctx->hdr.submit_version >= NVHOST_SUBMIT_VERSION_V2)
231                 ctx->num_relocshifts = ctx->hdr.num_relocs;
232
233         return 0;
234 }
235
236 static void reset_submit(struct nvhost_channel_userctx *ctx)
237 {
238         ctx->hdr.num_cmdbufs = 0;
239         ctx->hdr.num_relocs = 0;
240         ctx->num_relocshifts = 0;
241         ctx->hdr.num_waitchks = 0;
242
243         if (ctx->job) {
244                 nvhost_job_put(ctx->job);
245                 ctx->job = NULL;
246         }
247 }
248
249 static ssize_t nvhost_channelwrite(struct file *filp, const char __user *buf,
250                                 size_t count, loff_t *offp)
251 {
252         struct nvhost_channel_userctx *priv = filp->private_data;
253         size_t remaining = count;
254         int err = 0;
255         struct nvhost_job *job = priv->job;
256         struct nvhost_submit_hdr_ext *hdr = &priv->hdr;
257         const char *chname = priv->ch->dev->name;
258
259         if (!job)
260                 return -EIO;
261
262         while (remaining) {
263                 size_t consumed;
264                 if (!hdr->num_relocs &&
265                     !priv->num_relocshifts &&
266                     !hdr->num_cmdbufs &&
267                     !hdr->num_waitchks) {
268                         consumed = sizeof(struct nvhost_submit_hdr);
269                         if (remaining < consumed)
270                                 break;
271                         if (copy_from_user(hdr, buf, consumed)) {
272                                 err = -EFAULT;
273                                 break;
274                         }
275                         hdr->submit_version = NVHOST_SUBMIT_VERSION_V0;
276                         err = set_submit(priv);
277                         if (err)
278                                 break;
279                         trace_nvhost_channel_write_submit(chname,
280                           count, hdr->num_cmdbufs, hdr->num_relocs,
281                           hdr->syncpt_id, hdr->syncpt_incrs);
282                 } else if (hdr->num_cmdbufs) {
283                         struct nvhost_cmdbuf cmdbuf;
284                         consumed = sizeof(cmdbuf);
285                         if (remaining < consumed)
286                                 break;
287                         if (copy_from_user(&cmdbuf, buf, consumed)) {
288                                 err = -EFAULT;
289                                 break;
290                         }
291                         trace_nvhost_channel_write_cmdbuf(chname,
292                                 cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
293                         nvhost_job_add_gather(job,
294                                 cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
295                         hdr->num_cmdbufs--;
296                 } else if (hdr->num_relocs) {
297                         int numrelocs = remaining / sizeof(struct nvhost_reloc);
298                         if (!numrelocs)
299                                 break;
300                         numrelocs = min_t(int, numrelocs, priv->hdr.num_relocs);
301                         consumed = numrelocs * sizeof(struct nvhost_reloc);
302                         if (copy_from_user(&job->relocarray[job->num_relocs],
303                                         buf, consumed)) {
304                                 err = -EFAULT;
305                                 break;
306                         }
307                         while (numrelocs) {
308                                 struct nvhost_reloc *reloc =
309                                         &job->relocarray[job->num_relocs];
310                                 trace_nvhost_channel_write_reloc(chname,
311                                         reloc->cmdbuf_mem,
312                                         reloc->cmdbuf_offset,
313                                         reloc->target,
314                                         reloc->target_offset);
315                                 job->num_relocs++;
316                                 hdr->num_relocs--;
317                                 numrelocs--;
318                         }
319                 } else if (hdr->num_waitchks) {
320                         int numwaitchks =
321                                 (remaining / sizeof(struct nvhost_waitchk));
322                         if (!numwaitchks)
323                                 break;
324                         numwaitchks = min_t(int,
325                                 numwaitchks, hdr->num_waitchks);
326                         consumed = numwaitchks * sizeof(struct nvhost_waitchk);
327                         if (copy_from_user(&job->waitchk[job->num_waitchk],
328                                         buf, consumed)) {
329                                 err = -EFAULT;
330                                 break;
331                         }
332                         trace_nvhost_channel_write_waitchks(
333                           chname, numwaitchks);
334                         job->num_waitchk += numwaitchks;
335                         hdr->num_waitchks -= numwaitchks;
336                 } else if (priv->num_relocshifts) {
337                         int next_shift =
338                                 job->num_relocs - priv->num_relocshifts;
339                         int num =
340                                 (remaining / sizeof(struct nvhost_reloc_shift));
341                         if (!num)
342                                 break;
343                         num = min_t(int, num, priv->num_relocshifts);
344                         consumed = num * sizeof(struct nvhost_reloc_shift);
345                         if (copy_from_user(&job->relocshiftarray[next_shift],
346                                         buf, consumed)) {
347                                 err = -EFAULT;
348                                 break;
349                         }
350                         priv->num_relocshifts -= num;
351                 } else {
352                         err = -EFAULT;
353                         break;
354                 }
355                 remaining -= consumed;
356                 buf += consumed;
357         }
358
359         if (err < 0) {
360                 dev_err(&priv->ch->dev->dev, "channel write error\n");
361                 reset_submit(priv);
362                 return err;
363         }
364
365         return count - remaining;
366 }
367
368 static int nvhost_ioctl_channel_flush(
369         struct nvhost_channel_userctx *ctx,
370         struct nvhost_get_param_args *args,
371         int null_kickoff)
372 {
373         struct platform_device *ndev = to_platform_device(&ctx->ch->dev->dev);
374         int err;
375
376         trace_nvhost_ioctl_channel_flush(ctx->ch->dev->name);
377
378         if (!ctx->job ||
379             ctx->hdr.num_relocs ||
380             ctx->hdr.num_cmdbufs ||
381             ctx->hdr.num_waitchks) {
382                 reset_submit(ctx);
383                 dev_err(&ndev->dev, "channel submit out of sync\n");
384                 return -EFAULT;
385         }
386
387         err = nvhost_job_pin(ctx->job, &nvhost_get_host(ndev)->syncpt);
388         if (err) {
389                 dev_warn(&ndev->dev, "nvhost_job_pin failed: %d\n", err);
390                 goto fail;
391         }
392
393         if (nvhost_debug_null_kickoff_pid == current->tgid)
394                 null_kickoff = 1;
395         ctx->job->null_kickoff = null_kickoff;
396
397         if ((nvhost_debug_force_timeout_pid == current->tgid) &&
398             (nvhost_debug_force_timeout_channel == ctx->ch->chid)) {
399                 ctx->timeout = nvhost_debug_force_timeout_val;
400         }
401
402         /* context switch if needed, and submit user's gathers to the channel */
403         err = nvhost_channel_submit(ctx->job);
404         args->value = ctx->job->sp->fence;
405
406 fail:
407         if (err)
408                 nvhost_job_unpin(ctx->job);
409
410         nvhost_job_put(ctx->job);
411         ctx->job = NULL;
412
413         return err;
414 }
415
416 static int nvhost_ioctl_channel_submit(struct nvhost_channel_userctx *ctx,
417                 struct nvhost_submit_args *args)
418 {
419         struct nvhost_job *job;
420         int num_cmdbufs = args->num_cmdbufs;
421         int num_relocs = args->num_relocs;
422         int num_waitchks = args->num_waitchks;
423         int num_syncpt_incrs = args->num_syncpt_incrs;
424         struct nvhost_cmdbuf __user *cmdbufs = args->cmdbufs;
425         struct nvhost_reloc __user *relocs = args->relocs;
426         struct nvhost_reloc_shift __user *reloc_shifts = args->reloc_shifts;
427         struct nvhost_waitchk __user *waitchks = args->waitchks;
428         struct nvhost_syncpt_incr __user *syncpt_incrs = args->syncpt_incrs;
429         u32 __user *waitbases = args->waitbases;
430         u32 __user *fences = args->fences;
431
432         struct nvhost_master *host = nvhost_get_host(ctx->ch->dev);
433         u32 *local_waitbases = NULL;
434         int err, i, hwctx_syncpt_idx = -1;
435
436         if (num_syncpt_incrs > host->info.nb_pts)
437                 return -EINVAL;
438
439         job = nvhost_job_alloc(ctx->ch,
440                         ctx->hwctx,
441                         num_cmdbufs,
442                         num_relocs,
443                         num_waitchks,
444                         num_syncpt_incrs,
445                         ctx->memmgr);
446         if (!job)
447                 return -ENOMEM;
448
449         job->num_relocs = args->num_relocs;
450         job->num_waitchk = args->num_waitchks;
451         job->num_syncpts = args->num_syncpt_incrs;
452         job->priority = ctx->priority;
453         job->clientid = ctx->clientid;
454
455         while (num_cmdbufs) {
456                 struct nvhost_cmdbuf cmdbuf;
457                 err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf));
458                 if (err)
459                         goto fail;
460                 nvhost_job_add_gather(job,
461                                 cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
462                 num_cmdbufs--;
463                 cmdbufs++;
464         }
465
466         err = copy_from_user(job->relocarray,
467                         relocs, sizeof(*relocs) * num_relocs);
468         if (err)
469                 goto fail;
470
471         err = copy_from_user(job->relocshiftarray,
472                         reloc_shifts, sizeof(*reloc_shifts) * num_relocs);
473         if (err)
474                 goto fail;
475
476         err = copy_from_user(job->waitchk,
477                         waitchks, sizeof(*waitchks) * num_waitchks);
478         if (err)
479                 goto fail;
480
481         /* mass copy waitbases */
482         if (args->waitbases) {
483                 local_waitbases = kzalloc(sizeof(u32) * num_syncpt_incrs,
484                         GFP_KERNEL);
485                 err = copy_from_user(local_waitbases, waitbases,
486                         sizeof(u32) * num_syncpt_incrs);
487                 if (err) {
488                         err = -EINVAL;
489                         goto fail;
490                 }
491         }
492
493         /* set valid id for hwctx_syncpt_idx if no hwctx is present */
494         if (!ctx->hwctx)
495                 hwctx_syncpt_idx = 0;
496
497         /*
498          * Go through each syncpoint from userspace. Here we:
499          * - Copy syncpoint information
500          * - Validate each syncpoint
501          * - Determine waitbase for each syncpoint
502          * - Determine the index of hwctx syncpoint in the table
503          */
504
505         for (i = 0; i < num_syncpt_incrs; ++i) {
506                 u32 waitbase;
507                 struct nvhost_syncpt_incr sp;
508
509                 /* Copy */
510                 err = copy_from_user(&sp, syncpt_incrs + i, sizeof(sp));
511                 if (err)
512                         goto fail;
513
514                 /* Validate */
515                 if (sp.syncpt_id > host->info.nb_pts) {
516                         err = -EINVAL;
517                         goto fail;
518                 }
519
520                 /* Determine waitbase */
521                 if (waitbases && local_waitbases[i] != NVSYNCPT_INVALID)
522                         waitbase = local_waitbases[i];
523                 else
524                         waitbase = nvhost_syncpt_get_waitbase(job->ch,
525                                 sp.syncpt_id);
526
527                 /* Store */
528                 job->sp[i].id = sp.syncpt_id;
529                 job->sp[i].incrs = sp.syncpt_incrs;
530                 job->sp[i].waitbase = waitbase;
531
532                 /* Find hwctx syncpoint */
533                 if (ctx->hwctx && (job->sp[i].id == ctx->hwctx->h->syncpt))
534                         hwctx_syncpt_idx = i;
535         }
536
537         /* not needed anymore */
538         kfree(local_waitbases);
539         local_waitbases = NULL;
540
541         /* Is hwctx_syncpt_idx valid? */
542         if (hwctx_syncpt_idx == -1) {
543                 err = -EINVAL;
544                 goto fail;
545         }
546
547         job->hwctx_syncpt_idx = hwctx_syncpt_idx;
548
549         trace_nvhost_channel_submit(ctx->ch->dev->name,
550                 job->num_gathers, job->num_relocs, job->num_waitchk,
551                 job->sp[job->hwctx_syncpt_idx].id,
552                 job->sp[job->hwctx_syncpt_idx].incrs);
553
554         err = nvhost_job_pin(job, &nvhost_get_host(ctx->ch->dev)->syncpt);
555         if (err)
556                 goto fail;
557
558         if (args->timeout)
559                 job->timeout = min(ctx->timeout, args->timeout);
560         else
561                 job->timeout = ctx->timeout;
562         job->timeout_debug_dump = ctx->timeout_debug_dump;
563
564         err = nvhost_channel_submit(job);
565         if (err)
566                 goto fail_submit;
567
568         /* Deliver multiple fences back to the userspace */
569         if (fences)
570                 for (i = 0; i < num_syncpt_incrs; ++i) {
571                         u32 fence = job->sp[i].fence;
572                         err = copy_to_user(fences, &fence, sizeof(u32));
573                         if (err)
574                                 break;
575                         fences++;
576                 }
577
578         args->fence = job->sp[job->hwctx_syncpt_idx].fence;
579
580         nvhost_job_put(job);
581
582         return 0;
583
584 fail_submit:
585         nvhost_job_unpin(job);
586 fail:
587         nvhost_job_put(job);
588         kfree(local_waitbases);
589         return err;
590 }
591
592 static int nvhost_ioctl_channel_set_ctxswitch(
593                 struct nvhost_channel_userctx *ctx,
594                 struct nvhost_set_ctxswitch_args *args)
595 {
596         struct nvhost_cmdbuf cmdbuf_save;
597         struct nvhost_cmdbuf cmdbuf_restore;
598         struct nvhost_syncpt_incr save_incr, restore_incr;
599         u32 save_waitbase, restore_waitbase;
600         struct nvhost_reloc reloc;
601         struct nvhost_hwctx_handler *ctxhandler = NULL;
602         struct nvhost_hwctx *nhwctx = NULL;
603         struct user_hwctx *hwctx;
604         struct nvhost_device_data *pdata = platform_get_drvdata(ctx->ch->dev);
605         int err;
606
607         /* Only channels with context support */
608         if (!ctx->hwctx)
609                 return -EFAULT;
610
611         /* We don't yet support other than one nvhost_syncpt_incrs per submit */
612         if (args->num_cmdbufs_save != 1
613                         || args->num_cmdbufs_restore != 1
614                         || args->num_save_incrs != 1
615                         || args->num_restore_incrs != 1
616                         || args->num_relocs != 1)
617                 return -EINVAL;
618
619         err = copy_from_user(&cmdbuf_save,
620                         args->cmdbuf_save, sizeof(cmdbuf_save));
621         if (err)
622                 goto fail;
623
624         err = copy_from_user(&cmdbuf_restore,
625                         args->cmdbuf_restore, sizeof(cmdbuf_restore));
626         if (err)
627                 goto fail;
628
629         err = copy_from_user(&reloc, args->relocs, sizeof(reloc));
630         if (err)
631                 goto fail;
632
633         err = copy_from_user(&save_incr,
634                         args->save_incrs, sizeof(save_incr));
635         if (err)
636                 goto fail;
637         err = copy_from_user(&save_waitbase,
638                         args->save_waitbases, sizeof(save_waitbase));
639
640         err = copy_from_user(&restore_incr,
641                         args->restore_incrs, sizeof(restore_incr));
642         if (err)
643                 goto fail;
644         err = copy_from_user(&restore_waitbase,
645                         args->restore_waitbases, sizeof(restore_waitbase));
646
647         if (save_incr.syncpt_id != pdata->syncpts[0]
648                         || restore_incr.syncpt_id != pdata->syncpts[0]
649                         || save_waitbase != pdata->waitbases[0]
650                         || restore_waitbase != pdata->waitbases[0]) {
651                 err = -EINVAL;
652                 goto fail;
653         }
654         ctxhandler = user_ctxhandler_init(save_incr.syncpt_id,
655                         save_waitbase, ctx->ch);
656         if (!ctxhandler) {
657                 err = -ENOMEM;
658                 goto fail;
659         }
660
661         nhwctx = ctxhandler->alloc(ctxhandler, ctx->ch);
662         if (!nhwctx) {
663                 err = -ENOMEM;
664                 goto fail_hwctx;
665         }
666         hwctx = to_user_hwctx(nhwctx);
667
668         trace_nvhost_ioctl_channel_set_ctxswitch(ctx->ch->dev->name, nhwctx,
669                         cmdbuf_save.mem, cmdbuf_save.offset, cmdbuf_save.words,
670                         cmdbuf_restore.mem, cmdbuf_restore.offset,
671                         cmdbuf_restore.words,
672                         pdata->syncpts[0], pdata->waitbases[0],
673                         save_incr.syncpt_incrs, restore_incr.syncpt_incrs);
674
675         nhwctx->memmgr = ctx->hwctx->memmgr;
676         err = user_hwctx_set_restore(hwctx, cmdbuf_restore.mem,
677                         cmdbuf_restore.offset, cmdbuf_restore.words);
678         if (err)
679                 goto fail_set_restore;
680
681         err = user_hwctx_set_save(hwctx, cmdbuf_save.mem,
682                         cmdbuf_save.offset, cmdbuf_save.words, &reloc);
683         if (err)
684                 goto fail_set_save;
685
686         hwctx->hwctx.save_incrs = save_incr.syncpt_incrs;
687         hwctx->hwctx.restore_incrs = restore_incr.syncpt_incrs;
688
689         /* Free old context */
690         ctx->hwctx->h->put(ctx->hwctx);
691         ctx->hwctx = nhwctx;
692
693         return 0;
694
695 fail_set_save:
696 fail_set_restore:
697         ctxhandler->put(&hwctx->hwctx);
698 fail_hwctx:
699         user_ctxhandler_free(ctxhandler);
700 fail:
701         return err;
702 }
703
704 static int nvhost_ioctl_channel_read_3d_reg(struct nvhost_channel_userctx *ctx,
705         struct nvhost_read_3d_reg_args *args)
706 {
707         return nvhost_channel_read_reg(ctx->ch, ctx->hwctx,
708                         args->offset, &args->value);
709 }
710
711 static int moduleid_to_index(struct platform_device *dev, u32 moduleid)
712 {
713         int i;
714         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
715
716         for (i = 0; i < NVHOST_MODULE_MAX_CLOCKS; i++) {
717                 if (pdata->clocks[i].moduleid == moduleid)
718                         return i;
719         }
720
721         /* Old user space is sending a random number in args. Return clock
722          * zero in these cases. */
723         return 0;
724 }
725
726 static int nvhost_ioctl_channel_set_rate(struct nvhost_channel_userctx *ctx,
727         struct nvhost_clk_rate_args *arg)
728 {
729         u32 moduleid = (arg->moduleid >> NVHOST_MODULE_ID_BIT_POS)
730                         & ((1 << NVHOST_MODULE_ID_BIT_WIDTH) - 1);
731         u32 attr = (arg->moduleid >> NVHOST_CLOCK_ATTR_BIT_POS)
732                         & ((1 << NVHOST_CLOCK_ATTR_BIT_WIDTH) - 1);
733         int index = moduleid ?
734                         moduleid_to_index(ctx->ch->dev, moduleid) : 0;
735
736         return nvhost_module_set_rate(ctx->ch->dev,
737                         ctx, arg->rate, index, attr);
738 }
739
740 static int nvhost_ioctl_channel_get_rate(struct nvhost_channel_userctx *ctx,
741         u32 moduleid, u32 *rate)
742 {
743         int index = moduleid ? moduleid_to_index(ctx->ch->dev, moduleid) : 0;
744
745         return nvhost_module_get_rate(ctx->ch->dev,
746                         (unsigned long *)rate, index);
747 }
748
749 static int nvhost_ioctl_channel_module_regrdwr(
750         struct nvhost_channel_userctx *ctx,
751         struct nvhost_ctrl_module_regrdwr_args *args)
752 {
753         u32 num_offsets = args->num_offsets;
754         u32 *offsets = args->offsets;
755         u32 *values = args->values;
756         u32 vals[64];
757         struct platform_device *ndev;
758
759         trace_nvhost_ioctl_channel_module_regrdwr(args->id,
760                 args->num_offsets, args->write);
761
762         /* Check that there is something to read and that block size is
763          * u32 aligned */
764         if (num_offsets == 0 || args->block_size & 3)
765                 return -EINVAL;
766
767         ndev = ctx->ch->dev;
768
769         while (num_offsets--) {
770                 int err;
771                 u32 offs;
772                 int remaining = args->block_size >> 2;
773
774                 if (get_user(offs, offsets))
775                         return -EFAULT;
776
777                 offsets++;
778                 while (remaining) {
779                         int batch = min(remaining, 64);
780                         if (args->write) {
781                                 if (copy_from_user(vals, values,
782                                                 batch * sizeof(u32)))
783                                         return -EFAULT;
784
785                                 err = nvhost_write_module_regs(ndev,
786                                         offs, batch, vals);
787                                 if (err)
788                                         return err;
789                         } else {
790                                 err = nvhost_read_module_regs(ndev,
791                                                 offs, batch, vals);
792                                 if (err)
793                                         return err;
794
795                                 if (copy_to_user(values, vals,
796                                                 batch * sizeof(u32)))
797                                         return -EFAULT;
798                         }
799
800                         remaining -= batch;
801                         offs += batch * sizeof(u32);
802                         values += batch;
803                 }
804         }
805
806         return 0;
807 }
808
809 static u32 create_mask(u32 *words, int num)
810 {
811         int i;
812         u32 word = 0;
813         for (i = 0; i < num && words[i] && words[i] < BITS_PER_LONG; i++)
814                 word |= BIT(words[i]);
815
816         return word;
817 }
818
819 static long nvhost_channelctl(struct file *filp,
820         unsigned int cmd, unsigned long arg)
821 {
822         struct nvhost_channel_userctx *priv = filp->private_data;
823         u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
824         int err = 0;
825
826         if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
827                 (_IOC_NR(cmd) == 0) ||
828                 (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST) ||
829                 (_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE))
830                 return -EFAULT;
831
832         if (_IOC_DIR(cmd) & _IOC_WRITE) {
833                 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
834                         return -EFAULT;
835         }
836
837         switch (cmd) {
838         case NVHOST_IOCTL_CHANNEL_FLUSH:
839                 err = nvhost_ioctl_channel_flush(priv, (void *)buf, 0);
840                 break;
841         case NVHOST_IOCTL_CHANNEL_NULL_KICKOFF:
842                 err = nvhost_ioctl_channel_flush(priv, (void *)buf, 1);
843                 break;
844         case NVHOST_IOCTL_CHANNEL_SUBMIT_EXT:
845         {
846                 struct nvhost_submit_hdr_ext *hdr;
847
848                 if (priv->hdr.num_relocs ||
849                     priv->num_relocshifts ||
850                     priv->hdr.num_cmdbufs ||
851                     priv->hdr.num_waitchks) {
852                         reset_submit(priv);
853                         dev_err(&priv->ch->dev->dev,
854                                 "channel submit out of sync\n");
855                         err = -EIO;
856                         break;
857                 }
858
859                 hdr = (struct nvhost_submit_hdr_ext *)buf;
860                 if (hdr->submit_version > NVHOST_SUBMIT_VERSION_MAX_SUPPORTED) {
861                         dev_err(&priv->ch->dev->dev,
862                                 "submit version %d > max supported %d\n",
863                                 hdr->submit_version,
864                                 NVHOST_SUBMIT_VERSION_MAX_SUPPORTED);
865                         err = -EINVAL;
866                         break;
867                 }
868                 memcpy(&priv->hdr, hdr, sizeof(struct nvhost_submit_hdr_ext));
869                 err = set_submit(priv);
870                 trace_nvhost_ioctl_channel_submit(priv->ch->dev->name,
871                         priv->hdr.submit_version,
872                         priv->hdr.num_cmdbufs, priv->hdr.num_relocs,
873                         priv->hdr.num_waitchks,
874                         priv->hdr.syncpt_id, priv->hdr.syncpt_incrs);
875                 break;
876         }
877         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
878         {
879                 struct nvhost_device_data *pdata = \
880                         platform_get_drvdata(priv->ch->dev);
881                 ((struct nvhost_get_param_args *)buf)->value =
882                         create_mask(pdata->syncpts, NVHOST_MODULE_MAX_SYNCPTS);
883                 break;
884         }
885         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINT:
886         {
887                 struct nvhost_device_data *pdata = \
888                         platform_get_drvdata(priv->ch->dev);
889                 struct nvhost_get_param_arg *arg =
890                         (struct nvhost_get_param_arg *)buf;
891                 if (arg->param >= NVHOST_MODULE_MAX_SYNCPTS
892                                 || !pdata->syncpts[arg->param])
893                         return -EINVAL;
894                 arg->value = pdata->syncpts[arg->param];
895                 break;
896         }
897         case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
898         {
899                 struct nvhost_device_data *pdata = \
900                         platform_get_drvdata(priv->ch->dev);
901                 ((struct nvhost_get_param_args *)buf)->value =
902                         create_mask(pdata->waitbases,
903                                         NVHOST_MODULE_MAX_WAITBASES);
904                 break;
905         }
906         case NVHOST_IOCTL_CHANNEL_GET_WAITBASE:
907         {
908                 struct nvhost_device_data *pdata = \
909                         platform_get_drvdata(priv->ch->dev);
910                 struct nvhost_get_param_arg *arg =
911                         (struct nvhost_get_param_arg *)buf;
912                 if (arg->param >= NVHOST_MODULE_MAX_WAITBASES
913                                 || !pdata->waitbases[arg->param])
914                         return -EINVAL;
915                 arg->value = pdata->waitbases[arg->param];
916                 break;
917         }
918         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
919         {
920                 struct nvhost_device_data *pdata = \
921                         platform_get_drvdata(priv->ch->dev);
922                 ((struct nvhost_get_param_args *)buf)->value =
923                         create_mask(pdata->modulemutexes,
924                                         NVHOST_MODULE_MAX_MODMUTEXES);
925                 break;
926         }
927         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEX:
928         {
929                 struct nvhost_device_data *pdata = \
930                         platform_get_drvdata(priv->ch->dev);
931                 struct nvhost_get_param_arg *arg =
932                         (struct nvhost_get_param_arg *)buf;
933                 if (arg->param >= NVHOST_MODULE_MAX_MODMUTEXES
934                                 || !pdata->modulemutexes[arg->param])
935                         return -EINVAL;
936                 arg->value = pdata->modulemutexes[arg->param];
937                 break;
938         }
939         case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
940         {
941                 int fd = (int)((struct nvhost_set_nvmap_fd_args *)buf)->fd;
942                 struct mem_mgr *new_client = nvhost_memmgr_get_mgr_file(fd);
943
944                 if (IS_ERR(new_client)) {
945                         err = PTR_ERR(new_client);
946                         break;
947                 }
948
949                 if (priv->memmgr)
950                         nvhost_memmgr_put_mgr(priv->memmgr);
951
952                 priv->memmgr = new_client;
953
954                 if (priv->hwctx)
955                         priv->hwctx->memmgr = new_client;
956
957                 break;
958         }
959         case NVHOST_IOCTL_CHANNEL_READ_3D_REG:
960                 err = nvhost_ioctl_channel_read_3d_reg(priv, (void *)buf);
961                 break;
962         case NVHOST_IOCTL_CHANNEL_GET_CLK_RATE:
963         {
964                 struct nvhost_clk_rate_args *arg =
965                                 (struct nvhost_clk_rate_args *)buf;
966
967                 err = nvhost_ioctl_channel_get_rate(priv,
968                                 arg->moduleid, &arg->rate);
969                 break;
970         }
971         case NVHOST_IOCTL_CHANNEL_SET_CLK_RATE:
972         {
973                 struct nvhost_clk_rate_args *arg =
974                                 (struct nvhost_clk_rate_args *)buf;
975
976                 err = nvhost_ioctl_channel_set_rate(priv, arg);
977                 break;
978         }
979         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT:
980                 priv->timeout =
981                         (u32)((struct nvhost_set_timeout_args *)buf)->timeout;
982                 dev_dbg(&priv->ch->dev->dev,
983                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
984                         __func__, priv->timeout, priv);
985                 break;
986         case NVHOST_IOCTL_CHANNEL_GET_TIMEDOUT:
987                 ((struct nvhost_get_param_args *)buf)->value =
988                                 priv->hwctx->has_timedout;
989                 break;
990         case NVHOST_IOCTL_CHANNEL_SET_PRIORITY:
991                 priv->priority =
992                         (u32)((struct nvhost_set_priority_args *)buf)->priority;
993                 break;
994         case NVHOST_IOCTL_CHANNEL_MODULE_REGRDWR:
995                 err = nvhost_ioctl_channel_module_regrdwr(priv, (void *)buf);
996                 break;
997         case NVHOST_IOCTL_CHANNEL_SUBMIT:
998                 err = nvhost_ioctl_channel_submit(priv, (void *)buf);
999                 break;
1000         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT_EX:
1001                 priv->timeout = (u32)
1002                         ((struct nvhost_set_timeout_ex_args *)buf)->timeout;
1003                 priv->timeout_debug_dump = !((u32)
1004                         ((struct nvhost_set_timeout_ex_args *)buf)->flags &
1005                         (1 << NVHOST_TIMEOUT_FLAG_DISABLE_DUMP));
1006                 dev_dbg(&priv->ch->dev->dev,
1007                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
1008                         __func__, priv->timeout, priv);
1009                 break;
1010         case NVHOST_IOCTL_CHANNEL_SET_CTXSWITCH:
1011                 err = nvhost_ioctl_channel_set_ctxswitch(priv, (void *)buf);
1012                 break;
1013         default:
1014                 err = -ENOTTY;
1015                 break;
1016         }
1017
1018         if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
1019                 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
1020
1021         return err;
1022 }
1023
1024 static const struct file_operations nvhost_channelops = {
1025         .owner = THIS_MODULE,
1026         .release = nvhost_channelrelease,
1027         .open = nvhost_channelopen,
1028         .write = nvhost_channelwrite,
1029         .unlocked_ioctl = nvhost_channelctl
1030 };
1031
1032 int nvhost_client_user_init(struct platform_device *dev)
1033 {
1034         int err, devno;
1035         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1036
1037         struct nvhost_channel *ch = pdata->channel;
1038         err = alloc_chrdev_region(&devno, 0, 1, IFACE_NAME);
1039         if (err < 0) {
1040                 dev_err(&dev->dev, "failed to allocate devno\n");
1041                 goto fail;
1042         }
1043
1044         cdev_init(&ch->cdev, &nvhost_channelops);
1045         ch->cdev.owner = THIS_MODULE;
1046
1047         err = cdev_add(&ch->cdev, devno, 1);
1048         if (err < 0) {
1049                 dev_err(&dev->dev,
1050                         "failed to add chan %i cdev\n", pdata->index);
1051                 goto fail;
1052         }
1053         ch->node = device_create(nvhost_get_host(dev)->nvhost_class,
1054                         NULL, devno, NULL,
1055                         IFACE_NAME "-%s", dev_name(&dev->dev));
1056         if (IS_ERR(ch->node)) {
1057                 err = PTR_ERR(ch->node);
1058                 dev_err(&dev->dev,
1059                         "failed to create %s channel device\n",
1060                         dev_name(&dev->dev));
1061                 goto fail;
1062         }
1063
1064         return 0;
1065 fail:
1066         return err;
1067 }
1068
1069 int nvhost_client_device_init(struct platform_device *dev)
1070 {
1071         int err;
1072         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1073         struct nvhost_channel *ch;
1074         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1075
1076         ch = nvhost_alloc_channel(dev);
1077         if (ch == NULL)
1078                 return -ENODEV;
1079
1080         /* store the pointer to this device for channel */
1081         ch->dev = dev;
1082
1083         err = nvhost_channel_init(ch, nvhost_master, pdata->index);
1084         if (err)
1085                 goto fail;
1086
1087         err = nvhost_client_user_init(dev);
1088         if (err)
1089                 goto fail;
1090
1091         err = nvhost_module_init(dev);
1092         if (err)
1093                 goto fail;
1094
1095         if (tickctrl_op().init_channel)
1096                 tickctrl_op().init_channel(dev);
1097
1098         err = nvhost_device_list_add(dev);
1099         if (err)
1100                 goto fail;
1101
1102         if (pdata->scaling_init)
1103                 pdata->scaling_init(dev);
1104
1105         nvhost_device_debug_init(dev);
1106
1107         /* reset syncpoint values for this unit */
1108         nvhost_module_busy(nvhost_master->dev);
1109         nvhost_syncpt_reset_client(dev);
1110         nvhost_module_idle(nvhost_master->dev);
1111
1112         dev_info(&dev->dev, "initialized\n");
1113
1114         return 0;
1115
1116 fail:
1117         /* Add clean-up */
1118         nvhost_free_channel(ch);
1119         return err;
1120 }
1121
1122 int nvhost_client_device_suspend(struct platform_device *dev)
1123 {
1124         int ret = 0;
1125         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1126
1127         ret = nvhost_channel_suspend(pdata->channel);
1128         if (ret)
1129                 return ret;
1130
1131         dev_info(&dev->dev, "suspend status: %d\n", ret);
1132
1133         return ret;
1134 }
1135
1136 int nvhost_client_device_get_resources(struct platform_device *dev)
1137 {
1138         int i;
1139         void __iomem *regs = NULL;
1140         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1141
1142         for (i = 0; i < dev->num_resources; i++) {
1143                 struct resource *r = NULL;
1144
1145                 r = platform_get_resource(dev, IORESOURCE_MEM, i);
1146                 /* We've run out of mem resources */
1147                 if (!r)
1148                         break;
1149
1150                 regs = devm_request_and_ioremap(&dev->dev, r);
1151                 if (!regs)
1152                         goto fail;
1153
1154                 pdata->aperture[i] = regs;
1155         }
1156
1157         return 0;
1158
1159 fail:
1160         dev_err(&dev->dev, "failed to get register memory\n");
1161
1162         return -ENXIO;
1163 }
1164
1165 /* This is a simple wrapper around request_firmware that takes
1166  * 'fw_name' and if available applies a SOC relative path prefix to it.
1167  * The caller is responsible for calling release_firmware later.
1168  */
1169 const struct firmware *
1170 nvhost_client_request_firmware(struct platform_device *dev, const char *fw_name)
1171 {
1172         struct nvhost_chip_support *op = nvhost_get_chip_ops();
1173         const struct firmware *fw;
1174         char *fw_path = NULL;
1175         int path_len, err;
1176
1177         if (!fw_name)
1178                 return NULL;
1179
1180         if (op->soc_name) {
1181                 path_len = strlen(fw_name) + strlen(op->soc_name);
1182                 path_len += 2; /* for the path separator and zero terminator*/
1183
1184                 fw_path = kzalloc(sizeof(*fw_path) * path_len,
1185                                      GFP_KERNEL);
1186                 if (!fw_path)
1187                         return NULL;
1188
1189                 sprintf(fw_path, "%s/%s", op->soc_name, fw_name);
1190                 fw_name = fw_path;
1191         }
1192
1193         err = request_firmware(&fw, fw_name, &dev->dev);
1194         kfree(fw_path);
1195         if (err) {
1196                 dev_err(&dev->dev, "failed to get firmware\n");
1197                 return NULL;
1198         }
1199
1200         /* note: caller must release_firmware */
1201         return fw;
1202 }