clocks: tegra12: Use static CPU-EMC co-relation
[linux-3.10.git] / drivers / iommu / hv_tegra-smmu-lib.c
1 /*
2  * IVC based Library for SMMU services.
3  *
4  * Copyright (c) 2014-2015, NVIDIA CORPORATION.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  */
15
16 #include <linux/platform_device.h>
17 #include <linux/resource.h>
18 #include <linux/of.h>
19 #include <linux/sched.h>
20 #include <linux/wait.h>
21 #include <linux/tegra-ivc.h>
22 #include <linux/spinlock.h>
23 #include <linux/hardirq.h>
24 #include <asm/io.h>
25
26 #include "hv_tegra-smmu-lib.h"
27
28 struct tegra_hv_smmu_comm_dev *saved_smmu_comm_dev;
29
30 static void ivc_rx(struct tegra_hv_ivc_cookie *ivck);
31 static int ivc_poll = 1;
32 void print_server_error(const struct device *dev, int err);
33
34 static int ivc_send(struct tegra_hv_smmu_comm_chan *comm_chan,
35                         struct smmu_ivc_msg *msg, int size)
36 {
37         unsigned long flags = 0;
38         int ret = 0;
39
40         if (!comm_chan || !comm_chan->ivck || !msg || !size)
41                 return -EINVAL;
42
43         spin_lock_irqsave(&saved_smmu_comm_dev->ivck_tx_lock, flags);
44
45         if (!tegra_hv_ivc_can_write(comm_chan->ivck)) {
46                 ret = -EBUSY;
47                 goto fail;
48         }
49
50         ret = tegra_hv_ivc_write(comm_chan->ivck, msg, size);
51         /* Assumption here is server will not reset the ivc channel
52          * for a active guest.
53          * If we have reached here that means ivc chanel went to
54          * established state.
55          */
56         BUG_ON(ret == -ECONNRESET);
57
58         if (ret != size) {
59                 ret = -EIO;
60                 goto fail;
61         }
62
63 fail:
64         spin_unlock_irqrestore(&saved_smmu_comm_dev->ivck_tx_lock, flags);
65         return ret;
66 }
67
68 static int ivc_recv_sync(struct tegra_hv_smmu_comm_chan *comm_chan,
69                         struct smmu_ivc_msg *msg, int size)
70 {
71         int err = 0;
72
73         if (!comm_chan || !comm_chan->ivck || !msg || !size)
74                 return -EINVAL;
75
76         /* Make sure some response is pending */
77         BUG_ON((comm_chan->rx_state != RX_PENDING) &&
78                         (comm_chan->rx_state != RX_AVAIL));
79
80         /* Poll for response from server
81          * This is not the best way as reponse from server
82          * can get delayed and we are wasting cpu cycles.
83          *
84          * Linux drivers can call dma_map/unmap calls from
85          * atomic contexts and it's not possible to block
86          * from those contexts and reason for using polling
87          *
88          * This will change once hypervisor will support
89          * guest timestealing approach for IVC
90          */
91
92         if (ivc_poll) {
93                 /* Loop for server response */
94                 while (comm_chan->rx_state != RX_AVAIL)
95                         ivc_rx(comm_chan->ivck);
96         } else {
97                 /* Implementation not used */
98                 err = wait_event_timeout(comm_chan->wait,
99                         comm_chan->rx_state == RX_AVAIL,
100                         msecs_to_jiffies(comm_chan->timeout));
101
102                 if (!err) {
103                         err = -ETIMEDOUT;
104                         goto fail;
105                 }
106         }
107
108         err = size;
109         memcpy(msg, &comm_chan->rx_msg, size);
110 fail:
111         comm_chan->rx_state = RX_INIT;
112         return err;
113 }
114
115 /* Send request and wait for response */
116 int ivc_send_recv(enum smmu_msg_t msg,
117                         struct tegra_hv_smmu_comm_chan *comm_chan,
118                         struct smmu_ivc_msg *tx_msg,
119                         struct smmu_ivc_msg *rx_msg)
120 {
121         int err = -EINVAL;
122         unsigned long flags;
123         struct device *dev;
124
125         if (!comm_chan || !tx_msg || !rx_msg)
126                 return err;
127
128         dev = comm_chan->hv_comm_dev->dev;
129
130         /* Serialize requests per ASID */
131         spin_lock_irqsave(&comm_chan->lock, flags);
132
133         /* No outstanding for this ASID */
134         BUG_ON(comm_chan->rx_state != RX_INIT);
135
136         tx_msg->msg = msg;
137         tx_msg->comm_chan_id = comm_chan->id;
138         tx_msg->s_marker = 0xDEADBEAF;
139         tx_msg->e_marker = 0xBEAFDEAD;
140         comm_chan->rx_state = RX_PENDING;
141         err = ivc_send(comm_chan, tx_msg, sizeof(struct smmu_ivc_msg));
142         if (err < 0) {
143                 dev_err(dev, "ivc_send failed err %d\n", err);
144                 goto fail;
145         }
146
147         err = ivc_recv_sync(comm_chan, rx_msg, sizeof(struct smmu_ivc_msg));
148         if (err < 0) {
149                 dev_err(dev, "ivc_recv failed err %d\n", err);
150                 goto fail;
151         }
152
153         /* Return the server error code to the caller
154          * Using positive error codes for server status
155          * Using negative error codes for IVC comm errors
156          */
157         if (rx_msg->err) {
158                 dev_err(dev, "server error %d", rx_msg->err);
159                 print_server_error(dev, rx_msg->err);
160         }
161         err = rx_msg->err;
162 fail:
163         spin_unlock_irqrestore(&comm_chan->lock, flags);
164         return err;
165 }
166
167
168 void ivc_rx(struct tegra_hv_ivc_cookie *ivck)
169 {
170         unsigned long flags;
171
172         if (!ivck || !saved_smmu_comm_dev)
173                 BUG();
174
175         spin_lock_irqsave(&saved_smmu_comm_dev->ivck_rx_lock, flags);
176
177         if (tegra_hv_ivc_can_read(ivck)) {
178                 /* Message available */
179                 struct tegra_hv_smmu_comm_chan *comm_chan = NULL;
180                 int comm_chan_id;
181                 int ret = 0;
182                 struct smmu_ivc_msg rx_msg;
183
184                 memset(&rx_msg, 0, sizeof(struct smmu_ivc_msg));
185                 ret = tegra_hv_ivc_read(ivck, &rx_msg,
186                                         sizeof(struct smmu_ivc_msg));
187                 /* Assumption here is server will not reset the ivc channel
188                  * for a active guest.
189                  * If we have reached here that means ivc chanel went to
190                  * established state.
191                  */
192                 BUG_ON(ret == -ECONNRESET);
193
194                 if (ret != sizeof(struct smmu_ivc_msg)) {
195                         dev_err(saved_smmu_comm_dev->dev,
196                                 "IVC read failure (msg size error)\n");
197                         goto fail;
198                 }
199
200                 if ((rx_msg.s_marker != 0xDEADBEAF) ||
201                                         (rx_msg.e_marker != 0xBEAFDEAD)) {
202                         dev_err(saved_smmu_comm_dev->dev,
203                                 "IVC read failure (invalid markers)\n");
204                         goto fail;
205                 }
206
207                 comm_chan_id = rx_msg.comm_chan_id;
208
209                 comm_chan = saved_smmu_comm_dev->hv_comm_chan[comm_chan_id];
210                 if (!comm_chan || comm_chan->id != comm_chan_id) {
211                         dev_err(saved_smmu_comm_dev->dev,
212                                 "Invalid channel from server %d\n",
213                                                         comm_chan_id);
214                         goto fail;
215                 }
216
217                 if (comm_chan->rx_state != RX_PENDING) {
218                         dev_err(saved_smmu_comm_dev->dev,
219                                 "Spurious message from server asid %d\n",
220                                                                 comm_chan_id);
221                         goto fail;
222                 }
223
224                 /* Copy the message to consumer*/
225                 memcpy(&comm_chan->rx_msg, &rx_msg,
226                                         sizeof(struct smmu_ivc_msg));
227                 comm_chan->rx_state = RX_AVAIL;
228                 /* Not used */
229                 wake_up(&comm_chan->wait);
230         }
231 fail:
232         spin_unlock_irqrestore(&saved_smmu_comm_dev->ivck_rx_lock, flags);
233         return;
234 }
235
236 /* Not used with polling based implementation */
237 static const struct tegra_hv_ivc_ops ivc_ops = { ivc_rx, NULL };
238
239 int tegra_hv_smmu_comm_chan_alloc(void)
240 {
241         struct tegra_hv_smmu_comm_chan *comm_chan = NULL;
242         unsigned long flags;
243         int err = 0;
244         struct tegra_hv_smmu_comm_dev *comm_dev = saved_smmu_comm_dev;
245         int chan_id;
246
247         if (!comm_dev || !comm_dev->ivck)
248                 return -EINVAL;
249
250         spin_lock_irqsave(&comm_dev->lock, flags);
251
252         comm_chan = devm_kzalloc(comm_dev->dev, sizeof(*comm_chan),
253                                                 GFP_KERNEL);
254         if (!comm_chan) {
255                 err = -ENOMEM;
256                 goto out;
257         }
258
259         comm_chan->ivck = comm_dev->ivck;
260         /* Find a free channel number */
261         for (chan_id = 0; chan_id < MAX_COMM_CHANS; chan_id++) {
262                 if (comm_dev->hv_comm_chan[chan_id] == NULL)
263                         break;
264         }
265
266         if (chan_id >= MAX_COMM_CHANS) {
267                 err = -ENOMEM;
268                 goto fail_cleanup;
269         }
270
271         comm_chan->id = chan_id;
272         init_waitqueue_head(&comm_chan->wait);
273         comm_chan->timeout = 250; /* Not used in polling */
274         comm_chan->rx_state = RX_INIT;
275         comm_chan->hv_comm_dev = comm_dev;
276         spin_lock_init(&comm_chan->lock);
277
278         /* Already provisioned */
279         if (comm_dev->hv_comm_chan[comm_chan->id]) {
280                 err = -EINVAL;
281                 goto fail_cleanup;
282         }
283
284         comm_dev->hv_comm_chan[comm_chan->id] = comm_chan;
285         spin_unlock_irqrestore(&comm_dev->lock, flags);
286         return comm_chan->id;
287 fail_cleanup:
288         devm_kfree(comm_dev->dev, comm_chan);
289 out:
290         spin_unlock_irqrestore(&comm_dev->lock, flags);
291         return err;
292 }
293
294 /* hook to domain destroy */
295 void tegra_hv_smmu_comm_chan_free(int comm_chan_id)
296 {
297         unsigned long flags;
298         struct tegra_hv_smmu_comm_dev *comm_dev = saved_smmu_comm_dev;
299         struct tegra_hv_smmu_comm_chan *comm_chan =
300                                 comm_dev->hv_comm_chan[comm_chan_id];
301
302         spin_lock_irqsave(&comm_dev->lock, flags);
303
304         if (comm_chan)
305                 devm_kfree(comm_dev->dev, comm_chan);
306         else
307                 dev_err(comm_dev->dev, "Trying to free unallocated channel\n");
308
309         comm_dev->hv_comm_chan[comm_chan_id] = NULL;
310
311         spin_unlock_irqrestore(&comm_dev->lock, flags);
312 }
313
314 int tegra_hv_smmu_comm_init(struct device *dev)
315 {
316         int err, ivc_queue, ivm_id;
317         struct device_node *dn, *hv_dn;
318         struct tegra_hv_ivc_cookie *ivck;
319         struct tegra_hv_ivm_cookie *ivm;
320         struct tegra_hv_smmu_comm_dev *smmu_comm_dev;
321
322         dn = dev->of_node;
323         if (dn == NULL) {
324                 dev_err(dev, "No OF data\n");
325                 return -EINVAL;
326         }
327
328         hv_dn = of_parse_phandle(dn, "ivc_queue", 0);
329         if (hv_dn == NULL) {
330                 dev_err(dev, "Failed to parse phandle of ivc prop\n");
331                 return -EINVAL;
332         }
333
334         err = of_property_read_u32_index(dn, "ivc_queue", 1, &ivc_queue);
335         if (err != 0) {
336                 dev_err(dev, "Failed to read IVC property ID\n");
337                 of_node_put(hv_dn);
338                 return -EINVAL;
339         }
340
341         ivck = tegra_hv_ivc_reserve(hv_dn, ivc_queue, NULL /*&ivc_ops*/);
342         if (IS_ERR_OR_NULL(ivck)) {
343                 dev_err(dev, "Failed to reserve ivc queue %d\n", ivc_queue);
344                 return -EINVAL;
345         }
346
347         err = of_property_read_u32_index(dn, "mempool_id", 1, &ivm_id);
348         if (err != 0) {
349                 dev_err(dev, "Failed to read ivc mempool property\n");
350                 err = -EINVAL;
351                 goto fail_reserve;
352         }
353
354         ivm = tegra_hv_mempool_reserve(hv_dn, ivm_id);
355         if (IS_ERR_OR_NULL(ivm)) {
356                 dev_err(dev, "Failed to reserve mempool id %d\n", ivm_id);
357                 err = -EINVAL;
358                 goto fail_reserve;
359         }
360
361         smmu_comm_dev = devm_kzalloc(dev, sizeof(*smmu_comm_dev), GFP_KERNEL);
362         if (!smmu_comm_dev) {
363                 err = -ENOMEM;
364                 goto fail_mempool_reserve;
365         }
366
367         smmu_comm_dev->ivck = ivck;
368         smmu_comm_dev->ivm = ivm;
369         smmu_comm_dev->dev = dev;
370
371         smmu_comm_dev->virt_ivm_base = ioremap_cached(ivm->ipa, ivm->size);
372
373         /* set ivc channel to invalid state */
374         tegra_hv_ivc_channel_reset(ivck);
375
376         /* Poll to enter established state
377          * Sync -> Established
378          * Client driver is polling based so we can't move forward till
379          * ivc queue communication path is established.
380          */
381         while (tegra_hv_ivc_channel_notified(ivck))
382                 ;
383
384         spin_lock_init(&smmu_comm_dev->ivck_rx_lock);
385         spin_lock_init(&smmu_comm_dev->ivck_tx_lock);
386         spin_lock_init(&smmu_comm_dev->lock);
387         saved_smmu_comm_dev = smmu_comm_dev;
388         return 0;
389
390 fail_mempool_reserve:
391         tegra_hv_mempool_unreserve(ivm);
392 fail_reserve:
393         tegra_hv_ivc_unreserve(ivck);
394         return err;
395 }
396
397 void print_server_error(const struct device *dev, int err)
398 {
399         switch (err) {
400         case SMMU_ERR_SERVER_STATE:
401                 dev_err(dev, "invalid server state\n");
402                 break;
403         case SMMU_ERR_PERMISSION:
404                 dev_err(dev, "permission denied\n");
405                 break;
406         case SMMU_ERR_ARGS:
407                 dev_err(dev, "invalid args passed to server\n");
408                 break;
409         case SMMU_ERR_REQ:
410                 dev_err(dev, "invalid request to server\n");
411                 break;
412         case SMMU_ERR_UNSUPPORTED_REQ:
413                 dev_err(dev, "unsupported message to server\n");
414                 break;
415         default:
416                 dev_err(dev, "unknown error\n");
417                 return;
418         }
419         return;
420 }
421
422 /* get mempool base address used for storing sg entries
423  */
424 int libsmmu_get_mempool_params(void **base, int *size)
425 {
426         struct tegra_hv_smmu_comm_dev *comm_dev = saved_smmu_comm_dev;
427
428         *base = comm_dev->virt_ivm_base;
429         *size = comm_dev->ivm->size;
430
431         return 0;
432 }
433
434 /* get the max number of supported asid's from server
435  * Asid range (0 - num_asids)
436  */
437 int libsmmu_get_num_asids(int comm_chan_id, unsigned int *num_asids)
438 {
439         struct smmu_info *info = NULL;
440         int err = 0;
441         struct smmu_ivc_msg tx_msg, rx_msg;
442         struct tegra_hv_smmu_comm_dev *comm_dev = saved_smmu_comm_dev;
443         struct tegra_hv_smmu_comm_chan *comm_chan = NULL;
444         const struct device *dev = NULL;
445
446         if (!comm_dev)
447                 return -EINVAL;
448
449         comm_chan = comm_dev->hv_comm_chan[comm_chan_id];
450         if (!comm_chan || comm_chan->id != comm_chan_id)
451                 return -EINVAL;
452
453         dev = comm_chan->hv_comm_dev->dev;
454
455         memset(&tx_msg, 0, sizeof(struct smmu_ivc_msg));
456         memset(&rx_msg, 0, sizeof(struct smmu_ivc_msg));
457
458         err = ivc_send_recv(SMMU_INFO, comm_chan, &tx_msg, &rx_msg);
459         if (err)
460                 return -EIO;
461
462         info = &rx_msg.sinfo;
463         *num_asids = info->as_pool.end - info->as_pool.start;
464
465         return err;
466 }
467
468 int libsmmu_get_dma_window(int comm_chan_id, u64 *base, size_t *size)
469 {
470         struct smmu_info *info;
471         int err;
472         struct smmu_ivc_msg tx_msg, rx_msg;
473         struct tegra_hv_smmu_comm_dev *comm_dev = saved_smmu_comm_dev;
474         struct tegra_hv_smmu_comm_chan *comm_chan = NULL;
475         const struct device *dev = NULL;
476
477         if (!comm_dev)
478                 return -EINVAL;
479
480         comm_chan = comm_dev->hv_comm_chan[comm_chan_id];
481         if (!comm_chan || comm_chan->id != comm_chan_id)
482                 return -EINVAL;
483
484         dev = comm_chan->hv_comm_dev->dev;
485
486         memset(&tx_msg, 0, sizeof(struct smmu_ivc_msg));
487         memset(&rx_msg, 0, sizeof(struct smmu_ivc_msg));
488
489         err = ivc_send_recv(SMMU_INFO, comm_chan, &tx_msg, &rx_msg);
490         if (err)
491                 return -EIO;
492
493         info = &rx_msg.sinfo;
494         *base = info->window.start;
495         *size = info->window.end;
496
497         return err;
498 }
499
500 /* connect to server and reset all mappings to default */
501 int libsmmu_connect(int comm_chan_id)
502 {
503         int err;
504         struct smmu_ivc_msg tx_msg, rx_msg;
505         struct tegra_hv_smmu_comm_dev *comm_dev = saved_smmu_comm_dev;
506         struct tegra_hv_smmu_comm_chan *comm_chan = NULL;
507         const struct device *dev = NULL;
508
509         if (!comm_dev)
510                 return -EINVAL;
511
512         comm_chan = comm_dev->hv_comm_chan[comm_chan_id];
513         if (!comm_chan || comm_chan->id != comm_chan_id)
514                 return -EINVAL;
515
516         dev = comm_chan->hv_comm_dev->dev;
517
518         memset(&tx_msg, 0, sizeof(struct smmu_ivc_msg));
519         memset(&rx_msg, 0, sizeof(struct smmu_ivc_msg));
520
521         err = ivc_send_recv(CONNECT, comm_chan, &tx_msg, &rx_msg);
522         if (err)
523                 return -EIO;
524
525         return err;
526 }
527
528 /* get mask of all the hwgroups this guest owns */
529 int libsmmu_get_swgids(int comm_chan_id, u64 *swgids)
530 {
531         struct smmu_info *info;
532         int err;
533         struct smmu_ivc_msg tx_msg, rx_msg;
534         struct tegra_hv_smmu_comm_dev *comm_dev = saved_smmu_comm_dev;
535         struct tegra_hv_smmu_comm_chan *comm_chan = NULL;
536         const struct device *dev = NULL;
537
538         if (!comm_dev)
539                 return -EINVAL;
540
541         comm_chan = comm_dev->hv_comm_chan[comm_chan_id];
542         if (!comm_chan || comm_chan->id != comm_chan_id)
543                 return -EINVAL;
544
545         dev = comm_chan->hv_comm_dev->dev;
546
547         memset(&tx_msg, 0, sizeof(struct smmu_ivc_msg));
548         memset(&rx_msg, 0, sizeof(struct smmu_ivc_msg));
549
550         err = ivc_send_recv(SMMU_INFO, comm_chan, &tx_msg, &rx_msg);
551         if (err)
552                 return -EIO;
553
554         info = &rx_msg.sinfo;
555         *swgids = info->swgids;
556         return err;
557 }
558
559 /* Detach the hwgroup from default address space and attach
560  * it to address space (asid) specified
561  */
562 int libsmmu_attach_hwgrp(int comm_chan_id, u32 hwgrp, u32 asid)
563 {
564         int err;
565         struct smmu_ivc_msg tx_msg, rx_msg;
566         struct drv_ctxt *dctxt = &tx_msg.dctxt;
567         struct tegra_hv_smmu_comm_dev *comm_dev = saved_smmu_comm_dev;
568         struct tegra_hv_smmu_comm_chan *comm_chan = NULL;
569         const struct device *dev = NULL;
570
571         if (!comm_dev)
572                 return -EINVAL;
573
574         comm_chan = comm_dev->hv_comm_chan[comm_chan_id];
575         if (!comm_chan || comm_chan->id != comm_chan_id)
576                 return -EINVAL;
577
578         dev = comm_chan->hv_comm_dev->dev;
579
580         memset(&tx_msg, 0, sizeof(struct smmu_ivc_msg));
581         memset(&rx_msg, 0, sizeof(struct smmu_ivc_msg));
582
583         /* server driver data */
584         dctxt->asid = asid;
585         dctxt->hwgrp = hwgrp;
586
587         err = ivc_send_recv(ATTACH, comm_chan, &tx_msg, &rx_msg);
588         if (err)
589                 return -EIO;
590
591         return err;
592 }
593
594 /* Detach the hwgroup from the guest specified address space
595  * and attach back to default address space (asid)
596  */
597 int libsmmu_detach_hwgrp(int comm_chan_id, u32 hwgrp)
598 {
599         int err;
600         struct smmu_ivc_msg tx_msg, rx_msg;
601         struct drv_ctxt *dctxt = &tx_msg.dctxt;
602         struct tegra_hv_smmu_comm_dev *comm_dev = saved_smmu_comm_dev;
603         struct tegra_hv_smmu_comm_chan *comm_chan = NULL;
604         const struct device *dev = NULL;
605
606         if (!comm_dev)
607                 return -EINVAL;
608
609         comm_chan = comm_dev->hv_comm_chan[comm_chan_id];
610         if (!comm_chan || comm_chan->id != comm_chan_id)
611                 return -EINVAL;
612
613         dev = comm_chan->hv_comm_dev->dev;
614
615         memset(&tx_msg, 0, sizeof(struct smmu_ivc_msg));
616         memset(&rx_msg, 0, sizeof(struct smmu_ivc_msg));
617
618         dctxt->hwgrp = hwgrp;
619         err = ivc_send_recv(DETACH, comm_chan, &tx_msg, &rx_msg);
620         if (err)
621                 return -EIO;
622
623         return err;
624 }
625
626 /* Map section
627  * Create iova -> pa tranlations
628  * Guest specified ipa is converted to pa
629  */
630 int libsmmu_map_large_page(int comm_chan_id, u32 asid, u64 iova, u64 ipa,
631                                                                 int attr)
632 {
633         int err;
634         struct smmu_ivc_msg tx_msg, rx_msg;
635         struct drv_ctxt *dctxt = &tx_msg.dctxt;
636         struct tegra_hv_smmu_comm_dev *comm_dev = saved_smmu_comm_dev;
637         struct tegra_hv_smmu_comm_chan *comm_chan = NULL;
638         const struct device *dev = NULL;
639
640         if (!comm_dev)
641                 return -EINVAL;
642
643         comm_chan = comm_dev->hv_comm_chan[comm_chan_id];
644         if (!comm_chan || comm_chan->id != comm_chan_id)
645                 return -EINVAL;
646
647         dev = comm_chan->hv_comm_dev->dev;
648
649         memset(&tx_msg, 0, sizeof(struct smmu_ivc_msg));
650         memset(&rx_msg, 0, sizeof(struct smmu_ivc_msg));
651
652         dctxt->asid = asid;
653         dctxt->iova = iova;
654         dctxt->ipa = ipa;
655         dctxt->attr = attr;
656
657         err = ivc_send_recv(MAP_LARGE_PAGE, comm_chan, &tx_msg, &rx_msg);
658         if (err)
659                 return -EIO;
660
661         BUG_ON(rx_msg.dctxt.asid != tx_msg.dctxt.asid);
662         return err;
663 }
664
665 /* Map 4K page
666  * Create iova -> pa tranlations
667  * Guest specified ipa is converted to pa
668  */
669 int libsmmu_map_page(int comm_chan_id, u32 asid, u64 iova,
670                                 int num_ent, int attr)
671 {
672         int err;
673         struct smmu_ivc_msg tx_msg, rx_msg;
674         struct drv_ctxt *dctxt = &tx_msg.dctxt;
675         struct tegra_hv_smmu_comm_dev *comm_dev = saved_smmu_comm_dev;
676         struct tegra_hv_smmu_comm_chan *comm_chan = NULL;
677         const struct device *dev = NULL;
678
679         if (!comm_dev)
680                 return -EINVAL;
681
682         comm_chan = comm_dev->hv_comm_chan[comm_chan_id];
683         if (!comm_chan || comm_chan->id != comm_chan_id)
684                 return -EINVAL;
685
686         dev = comm_chan->hv_comm_dev->dev;
687
688         memset(&tx_msg, 0, sizeof(struct smmu_ivc_msg));
689         memset(&rx_msg, 0, sizeof(struct smmu_ivc_msg));
690
691         dctxt->asid = asid;
692         dctxt->iova = iova;
693         dctxt->ipa = 0;
694         /* count of number of entries in mempool */
695         dctxt->count = num_ent;
696         dctxt->attr = attr;
697
698         err = ivc_send_recv(MAP_PAGE, comm_chan, &tx_msg, &rx_msg);
699         if (err)
700                 return -EIO;
701
702         BUG_ON(rx_msg.dctxt.asid != tx_msg.dctxt.asid);
703         return err;
704 }
705
706 /* Unmap specified number of bytes starting from iova */
707 int libsmmu_unmap(int comm_chan_id, u32 asid, u64 iova, u64 bytes)
708 {
709         int err;
710         struct smmu_ivc_msg tx_msg, rx_msg;
711         struct drv_ctxt *dctxt = &tx_msg.dctxt;
712         struct tegra_hv_smmu_comm_dev *comm_dev = saved_smmu_comm_dev;
713         struct tegra_hv_smmu_comm_chan *comm_chan = NULL;
714         const struct device *dev = NULL;
715
716         if (!comm_dev)
717                 return -EINVAL;
718
719         comm_chan = comm_dev->hv_comm_chan[comm_chan_id];
720         if (!comm_chan || comm_chan->id != comm_chan_id)
721                 return -EINVAL;
722
723         dev = comm_chan->hv_comm_dev->dev;
724
725         memset(&tx_msg, 0, sizeof(struct smmu_ivc_msg));
726         memset(&rx_msg, 0, sizeof(struct smmu_ivc_msg));
727
728         dctxt->asid = asid;
729         dctxt->iova = iova;
730         dctxt->ipa = bytes;
731
732         err = ivc_send_recv(UNMAP_PAGE, comm_chan, &tx_msg, &rx_msg);
733         if (err)
734                 return -EIO;
735
736         BUG_ON(rx_msg.dctxt.asid != tx_msg.dctxt.asid);
737         return bytes;
738 }
739
740 /* get ipa for the specified iova */
741 int libsmmu_iova_to_phys(int comm_chan_id, u32 asid, u64 iova, u64 *ipa)
742 {
743         int err;
744         struct smmu_ivc_msg tx_msg, rx_msg;
745         struct drv_ctxt *dctxt = &tx_msg.dctxt;
746         struct tegra_hv_smmu_comm_dev *comm_dev = saved_smmu_comm_dev;
747         struct tegra_hv_smmu_comm_chan *comm_chan = NULL;
748         const struct device *dev = NULL;
749
750         if (!comm_dev)
751                 return -EINVAL;
752
753         comm_chan = comm_dev->hv_comm_chan[comm_chan_id];
754         if (!comm_chan || comm_chan->id != comm_chan_id)
755                 return -EINVAL;
756
757         dev = comm_chan->hv_comm_dev->dev;
758
759         memset(&tx_msg, 0, sizeof(struct smmu_ivc_msg));
760         memset(&rx_msg, 0, sizeof(struct smmu_ivc_msg));
761
762         dctxt->asid = asid;
763         dctxt->iova = iova;
764
765         err = ivc_send_recv(IPA, comm_chan, &tx_msg, &rx_msg);
766         if (err)
767                 return -EIO;
768
769         *ipa = rx_msg.dctxt.ipa;
770         return err;
771 }
772
773 int libsmmu_debug_op(int comm_chan_id, u32 op, u64 op_data_in, u64 *op_data_out)
774 {
775         int err;
776         struct smmu_ivc_msg tx_msg, rx_msg;
777         struct drv_ctxt *dctxt = &tx_msg.dctxt;
778         struct tegra_hv_smmu_comm_dev *comm_dev = saved_smmu_comm_dev;
779         struct tegra_hv_smmu_comm_chan *comm_chan = NULL;
780         const struct device *dev = NULL;
781
782         if (!comm_dev)
783                 return -EINVAL;
784
785         comm_chan = comm_dev->hv_comm_chan[comm_chan_id];
786         if (!comm_chan || comm_chan->id != comm_chan_id)
787                 return -EINVAL;
788
789         dev = comm_chan->hv_comm_dev->dev;
790
791         memset(&tx_msg, 0, sizeof(struct smmu_ivc_msg));
792         memset(&rx_msg, 0, sizeof(struct smmu_ivc_msg));
793
794         /* Hack asid member for debug operation
795          * Hack iova member for debug data
796          */
797         dctxt->asid = op;
798         dctxt->iova = op_data_in;
799
800         err = ivc_send_recv(DEBUG_OP, comm_chan, &tx_msg, &rx_msg);
801         if (err)
802                 return -EIO;
803
804         /* Hack ipa member for debug data out */
805         *op_data_out = rx_msg.dctxt.ipa;
806         return err;
807 }