tegra: hv: Tegra Hypervisor manager and IVC
Pantelis Antoniou [Fri, 18 Apr 2014 08:12:38 +0000 (11:12 +0300)]
Implement a Tegra hypervisor manager and inter-virtual-comm
framework.

For details please see Documentation/tegra_hv/tegra_hv.txt

Bug 1417482

Change-Id: I9cad031441f87a822bc344ddc15b50995a87ced6
Signed-off-by: Pantelis Antoniou <pantoniou@nvidia.com>
Signed-off-by: Aingara Paramakuru <aparamakuru@nvidia.com>
Signed-off-by: Vlad Buzov <vbuzov@nvidia.com>
Reviewed-on: http://git-master/r/428393
Reviewed-by: Automatic_Commit_Validation_User
GVS: Gerrit_Virtual_Submit
Reviewed-by: Alexander Van Brunt <avanbrunt@nvidia.com>

14 files changed:
Documentation/devicetree/bindings/tegra_hv/tegra_hv.txt [new file with mode: 0644]
Documentation/tegra_hv/tegra_hv.txt [new file with mode: 0644]
drivers/virt/Kconfig
drivers/virt/Makefile
drivers/virt/tegra/Kconfig [new file with mode: 0644]
drivers/virt/tegra/Makefile [new file with mode: 0644]
drivers/virt/tegra/cbuf.c [new file with mode: 0644]
drivers/virt/tegra/cbuf.h [new file with mode: 0644]
drivers/virt/tegra/hyp_syscall.S [new file with mode: 0644]
drivers/virt/tegra/sys.c [new file with mode: 0644]
drivers/virt/tegra/syscalls.h [new file with mode: 0644]
drivers/virt/tegra/tegra_hv.c [new file with mode: 0644]
include/linux/tegra-ivc-config.h [new file with mode: 0644]
include/linux/tegra-ivc.h [new file with mode: 0644]

diff --git a/Documentation/devicetree/bindings/tegra_hv/tegra_hv.txt b/Documentation/devicetree/bindings/tegra_hv/tegra_hv.txt
new file mode 100644 (file)
index 0000000..9cc89a7
--- /dev/null
@@ -0,0 +1,78 @@
+* Tegra Hypervisor manager (tegra_hv)
+
+Required properties:
+- compatible: Should be "nvidia,tegra-hv".
+
+Optional properties:
+- server-to-peers: Array of guest IDs of the peers this guest is server to.
+Note that if you use loopbacks at least your guest ID should be included.
+
+Required sub-node:
+- queues: configuration about this guest
+
+  Required properties:
+  - #address-cells: Should be <1>
+  - #size-cells: Should be <0>
+
+  Any other sub-nodes contain each ivc channel configuration; Properties
+  required are:
+    - reg: ID of the channel (single cell address)
+    - peers: Tupple of guest IDs this channel pertains to. If none of them is
+    the running guest's ID, then the ivc channel entry is ignored.
+    - nframes: Number of queue entries
+    - frame-size: Size of each frame in bytes. Note that it's good practice to
+    keep this aligned to 16.
+
+Example:
+       tegra_hv: hyp {
+               compatible = "nvidia,tegra-hv";
+               status = "okay";
+
+               server-to-peers = <0 1>;
+
+               queues {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       /* guest 0 <-> guest 1 (frame based) */
+                       ivc0 {
+                               reg = <0>;
+                               peers = <0 1>;
+                               nframes = <2>;
+                               frame-size = <50>;
+                       };
+
+                       /* guest 0 <-> guest 1 (comm) */
+                       ivc1 {
+                               reg = <1>;
+                               peers = <0 1>;
+                               nframes = <4>;
+                               frame-size = <16>;
+                       };
+
+                       /* guest 0 <-> guest 1 (net) */
+                       ivc2 {
+                               reg = <2>;
+                               peers = <0 1>;
+                               nframes = <32>;
+                               frame-size = <1536>;
+                       };
+
+                       /* guest 0 <-> guest 0 (loop) */
+                       ivc5 {
+                               reg = <5>;
+                               peers = <0 0>;
+                               nframes = <4>;
+                               frame-size = <16>;
+                       };
+
+                       /* guest 0 <-> guest 0 (loop) */
+                       ivc6 {
+                               reg = <6>;
+                               peers = <0 0>;
+                               nframes = <4>;
+                               frame-size = <16>;
+                       };
+
+               };
+       };
diff --git a/Documentation/tegra_hv/tegra_hv.txt b/Documentation/tegra_hv/tegra_hv.txt
new file mode 100644 (file)
index 0000000..454fae1
--- /dev/null
@@ -0,0 +1,223 @@
+The Tegra Hypervisor driver exposes a number of APIs both for user-space
+communication and kernel space drivers. The purpose of the APIs is to
+facilitate safe and relatively efficient communication between software
+entities running on different guests of the hypervisor.
+
+In kernel IVC API
+-----------------
+
+The steps required for an in-kernel user are:
+
+1. Call tegra_hv_ivc_reserve() to request an IVC channel
+and reserve it for your own use:
+
+2. Use the fields of the IVC cookie returned as required.
+For instance you can allocate buffers and/or install interrupt
+handled.
+
+3. Use the transfer API to read/write data from the channel, and/or
+query the state of it.
+
+4. Finally, call tegra_hv_ivc_unreserve() to release the channel.
+
+Structures
+----------
+
+struct tegra_hv_ivc_cookie: - is the cookie returned from the call to
+                             tegra_hv_ivc_reserve()
+
+       struct tegra_hv_ivc_cookie {
+               int irq;                /* interrupt this channel uses (both for rx/tx) */
+               int peer_vmid;          /* guest id of the other end */
+               int nframes;            /* number of frames in the queue */
+               int frame_size;         /* frame size in bytes */
+       };
+
+struct tegra_hv_ivc_ops - structure pointer to pass to tegra_hv_ivc_reserve(),
+                       when you don't want to perform IRQ installation in your driver.
+
+       struct tegra_hv_ivc_ops {
+               /* called when data are received */
+               void (*rx_rdy)(struct tegra_hv_ivc_cookie *ivck);
+               /* called when space is available to write data */
+               void (*tx_rdy)(struct tegra_hv_ivc_cookie *ivck);
+       };
+
+Functions
+---------
+
+/**
+ * tegra_hv_ivc_reserve - Reserve an IVC queue for use
+ * @dn:                Device node pointer to the queue in the DT
+ *             If NULL, then operate on first HV device
+ * @queue_id   Id number of the queue to use.
+ * @ops                Ops structure or NULL
+ *
+ * Reserves the queue for use
+ *
+ * Returns a pointer to the ivc_dev to use or an ERR_PTR.
+ * Note that returning EPROBE_DEFER means that the ivc driver
+ * hasn't loaded yet and you should try again later in the
+ * boot sequence.
+ */
+struct tegra_hv_ivc_cookie *tegra_hv_ivc_reserve(
+               struct device_node *dn, int id,
+               const struct tegra_hv_ivc_ops *ops);
+
+/**
+ * tegra_hv_ivc_unreserve - Unreserve an IVC queue used
+ * @ivck       IVC cookie
+ *
+ * Unreserves the IVC channel
+ *
+ * Returns 0 on success and an error code otherwise
+ */
+int tegra_hv_ivc_unreserve(struct tegra_hv_ivc_cookie *ivck);
+
+/**
+ * ivc_hv_ivc_write - Writes a frame to the IVC queue
+ * @ivck       IVC cookie of the queue
+ * @buf                Pointer to the data to write
+ * @size       Size of the data to write
+ *
+ * Write a number of bytes (as a single frame) from the queue.
+ *
+ * Returns size on success and an error code otherwise
+ */
+int tegra_hv_ivc_write(struct tegra_hv_ivc_cookie *ivck, const void *buf,
+               int size);
+
+/**
+ * ivc_hv_ivc_read - Reads a frame from the IVC queue
+ * @ivck       IVC cookie of the queue
+ * @buf                Pointer to the data to read
+ * @size       max size of the data to read
+ *
+ * Reads a number of bytes (as a single frame) from the queue.
+ *
+ * Returns size on success and an error code otherwise
+ */
+int tegra_hv_ivc_read(struct tegra_hv_ivc_cookie *ivck, void *buf, int size);
+
+/**
+ * ivc_hv_ivc_can_read - Test whether data are available
+ * @ivck       IVC cookie of the queue
+ *
+ * Test wheter data to read are available
+ *
+ * Returns 1 if data are available in the rx queue, 0 if not
+ */
+int tegra_hv_ivc_can_read(struct tegra_hv_ivc_cookie *ivck);
+
+/**
+ * ivc_hv_ivc_can_write - Test whether data can be written
+ * @ivck       IVC cookie of the queue
+ *
+ * Test wheter data can be written
+ *
+ * Returns 1 if data are can be written to the tx queue, 0 if not
+ */
+int tegra_hv_ivc_can_write(struct tegra_hv_ivc_cookie *ivck);
+
+/**
+ * ivc_hv_ivc_tx_empty - Test whether the tx queue is empty
+ * @ivck       IVC cookie of the queue
+ *
+ * Test wheter the tx queue is completely empty
+ *
+ * Returns 1 if the queue is empty, zero otherwise
+ */
+int tegra_hv_ivc_tx_empty(struct tegra_hv_ivc_cookie *ivck);
+
+/**
+ * ivc_hv_ivc_loopback - Sets (or clears) loopback mode
+ * @ivck       IVC cookie of the queue
+ * @mode       Set loopback on/off (1 = on, 0 = off)
+ *
+ * Sets or clears loopback mode accordingly.
+ *
+ * When loopback is active any writes are ignored, while
+ * reads do not return data.
+ * Incoming data are copied immediately to the tx queue.
+ *
+ * Returns 0 on success, a negative error code otherwise
+ */
+int tegra_hv_ivc_set_loopback(struct tegra_hv_ivc_cookie *ivck, int mode);
+
+/* perform fast loopback */
+int tegra_hv_ivc_perform_loopback(struct tegra_hv_ivc_cookie *ivck);
+
+/* debugging aid */
+int tegra_hv_ivc_dump(struct tegra_hv_ivc_cookie *ivck);
+
+/**
+ * ivc_hv_ivc_read_peek - Peek (copying) data from a received frame
+ * @ivck       IVC cookie of the queue
+ * @buf                Buffer to receive the data
+ * @off                Offset in the frame
+ * @count      Count of bytes to copy
+ *
+ * Peek data from a received frame, copying to buf, without removing
+ * the frame from the queue.
+ *
+ * Returns 0 on success, a negative error code otherwise
+ */
+int tegra_hv_ivc_read_peek(struct tegra_hv_ivc_cookie *ivck,
+               void *buf, int off, int count);
+
+/**
+ * ivc_hv_ivc_read_get_next_frame - Peek at the next frame to receive
+ * @ivck       IVC cookie of the queue
+ *
+ * Peek at the next frame to be received, without removing it from
+ * the queue.
+ *
+ * Returns a pointer to the frame, or an error encoded pointer.
+ */
+void *tegra_hv_ivc_read_get_next_frame(struct tegra_hv_ivc_cookie *ivck);
+
+/**
+ * ivc_hv_ivc_read_advance - Advance the read queue
+ * @ivck       IVC cookie of the queue
+ *
+ * Advance the read queue
+ *
+ * Returns 0, or a negative error value if failed.
+ */
+int tegra_hv_ivc_read_advance(struct tegra_hv_ivc_cookie *ivck);
+
+/**
+ * ivc_hv_ivc_write_poke - Poke data to a frame to be transmitted
+ * @ivck       IVC cookie of the queue
+ * @buf                Buffer to the data
+ * @off                Offset in the frame
+ * @count      Count of bytes to copy
+ *
+ * Copy data to a transmit frame, copying from buf, without advancing
+ * the the transmit queue.
+ *
+ * Returns 0 on success, a negative error code otherwise
+ */
+int tegra_hv_ivc_write_poke(struct tegra_hv_ivc_cookie *ivck,
+               const void *buf, int off, int count);
+
+/**
+ * ivc_hv_ivc_write_get_next_frame - Poke at the next frame to transmit
+ * @ivck       IVC cookie of the queue
+ *
+ * Get access to the next frame.
+ *
+ * Returns a pointer to the frame, or an error encoded pointer.
+ */
+void *tegra_hv_ivc_write_get_next_frame(struct tegra_hv_ivc_cookie *ivck);
+
+/**
+ * ivc_hv_ivc_write_advance - Advance the write queue
+ * @ivck       IVC cookie of the queue
+ *
+ * Advance the write queue
+ *
+ * Returns 0, or a negative error value if failed.
+ */
+int tegra_hv_ivc_write_advance(struct tegra_hv_ivc_cookie *ivck);
+
index 99ebdde..7f8f2ed 100644 (file)
@@ -30,4 +30,6 @@ config FSL_HV_MANAGER
           4) A kernel interface for receiving callbacks when a managed
             partition shuts down.
 
+source "drivers/virt/tegra/Kconfig"
+
 endif
index c47f04d..d70adc2 100644 (file)
@@ -3,3 +3,4 @@
 #
 
 obj-$(CONFIG_FSL_HV_MANAGER)   += fsl_hypervisor.o
+obj-$(CONFIG_TEGRA_VIRTUALIZATION) += tegra/
diff --git a/drivers/virt/tegra/Kconfig b/drivers/virt/tegra/Kconfig
new file mode 100644 (file)
index 0000000..68469c8
--- /dev/null
@@ -0,0 +1,20 @@
+config TEGRA_VIRTUALIZATION
+       bool "Tegra Virtualization support"
+       depends on ARCH_TEGRA_12x_SOC || ARCH_ARM64
+       select VIRTUALIZATION
+       default n
+       help
+         Enable Tegra Virtualization support; turning this option on
+         enables features like the tegra virtualization manager and
+         all others than depend on it.
+
+         Select y to enable virtualization features on the tegra platforms
+         that support it.
+
+config TEGRA_HV_MANAGER
+       tristate "Nvidia Tegra hypervisor management & IVC"
+       depends on TEGRA_VIRTUALIZATION
+       help
+         The NVIDIA hypervisor management driver provides IVC services (for now)
+
+         Select Y to enable
diff --git a/drivers/virt/tegra/Makefile b/drivers/virt/tegra/Makefile
new file mode 100644 (file)
index 0000000..ccf7214
--- /dev/null
@@ -0,0 +1,6 @@
+#
+# Makefile for Hypervisor interface
+#
+
+obj-$(CONFIG_TEGRA_VIRTUALIZATION)     += hyp_syscall.o sys.o cbuf.o
+obj-$(CONFIG_TEGRA_HV_MANAGER)         += tegra_hv.o
diff --git a/drivers/virt/tegra/cbuf.c b/drivers/virt/tegra/cbuf.c
new file mode 100644 (file)
index 0000000..a128012
--- /dev/null
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Circular buffer with 1 empty slot
+ *
+ * Traits:
+ * - It requires no locking across reader - writer
+ *
+ * Cons:
+ * - Wastes one empty slot
+ * - Does copying
+ *
+ * [0] <-R <-W
+ * [1]         Empty
+ * [2]
+ *
+ * [0]
+ * [1] <-W     Full
+ * [2] <-R
+ * [3]
+ *
+ * [0]
+ * [1] Full
+ * [2] <-W
+ * [3] <-R
+ *
+ *
+ * [0] <-R
+ * [1]         Full
+ * [2]
+ * [3] <-W
+ *
+ * This file is BSD licensed so anyone can use the definitions to implement
+ * compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of NVIDIA CORPORATION nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NVIDIA CORPORATION OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/string.h>
+#include <linux/errno.h>
+
+#include "cbuf.h"
+
+int cbuf_write(struct cbuf *cb, void *data)
+{
+       /* Check if full */
+       if (cbuf_is_full(cb))
+               return -ENOMEM;
+
+       /* Write */
+       memcpy(cbuf_write_data_ptr(cb), data, cb->struct_size);
+
+       cbuf_wmb();
+
+       /* Check if need to wrap */
+       if (cb->w_pos == cb->end_idx)
+               cb->w_pos = 0;  /* Wrap position */
+       else
+               cb->w_pos++;    /* Go to next empty pos */
+
+       return 0;
+}
+
+int cbuf_read(struct cbuf *cb, void *data)
+{
+       /* Check if empty */
+       if (cbuf_is_empty(cb))
+               return -ENOMEM;
+
+       /* Read */
+       memcpy(data, cbuf_read_data_ptr(cb), cb->struct_size);
+
+       /* Update */
+       if (cb->r_pos == cb->end_idx)
+               cb->r_pos = 0;
+       else
+               cb->r_pos++;
+
+       return 0;
+}
+
diff --git a/drivers/virt/tegra/cbuf.h b/drivers/virt/tegra/cbuf.h
new file mode 100644 (file)
index 0000000..86dec91
--- /dev/null
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Circular buffer implementation
+ *
+ * This header is BSD licensed so anyone can use the definitions to implement
+ * compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of NVIDIA CORPORATION nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NVIDIA CORPORATION OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __CBUF_H__
+#define __CBUF_H__
+
+#include <asm/barrier.h>
+
+struct cbuf {
+       int end_idx;    /* Array size - 1 */
+       int struct_size;
+       int w_pos;      /* Always points at empty pos */
+       int r_pos;      /* May point at empty/full. is_empty should tell */
+       char buf[];     /* Special buffer pointer, shmem is same,
+                          but different address across VMs */
+};
+
+/* barriers; make them work even on uP */
+#ifdef CONFIG_SMP
+static inline void cbuf_mb(void)
+{
+       smp_mb();
+}
+static inline void cbuf_rmb(void)
+{
+       smp_rmb();
+}
+static inline void cbuf_wmb(void)
+{
+       smp_wmb();
+}
+#else
+static inline void cbuf_mb(void)
+{
+       mb();
+}
+static inline void cbuf_rmb(void)
+{
+       rmb();
+}
+static inline void cbuf_wmb(void)
+{
+       wmb();
+}
+#endif
+
+static inline struct cbuf *cbuf_init(void *buf, int struct_size, int elems)
+{
+       struct cbuf *cb = buf;  /* Start of buffer */
+       cb->end_idx = elems - 1;
+       cb->struct_size = struct_size;
+       cb->w_pos = 0;
+       cb->r_pos = 0;
+
+       return cb;
+}
+
+static inline int cbuf_is_empty(struct cbuf *cbuf)
+{
+       cbuf_rmb();
+       return cbuf->w_pos == cbuf->r_pos;
+}
+
+static inline int cbuf_is_full(struct cbuf *cbuf)
+{
+       cbuf_rmb();
+
+       /* Full when r=0, w=end_idx */
+       if (cbuf->w_pos - cbuf->end_idx == cbuf->r_pos)
+               return 1;
+       /* Full when w=r-1 */
+       else if (cbuf->w_pos == cbuf->r_pos - 1)
+               return 1;
+       else
+               return 0;
+}
+
+/* assumes check for empty earlier */
+static inline void __iomem *cbuf_read_data_ptr(struct cbuf *cb)
+{
+       cbuf_rmb();
+
+       return &cb->buf[cb->r_pos * cb->struct_size];
+}
+
+/* assumes check for full earlier */
+static inline void __iomem *cbuf_write_data_ptr(struct cbuf *cb)
+{
+       cbuf_rmb();
+
+       return &cb->buf[cb->w_pos * cb->struct_size];
+}
+
+static inline void cbuf_advance_w_pos(struct cbuf *cb)
+{
+       /* Check if need to wrap */
+       if (cb->w_pos >= cb->end_idx)
+               cb->w_pos = 0;  /* Wrap position */
+       else
+               cb->w_pos++;    /* Go to next empty pos */
+
+       cbuf_wmb();
+}
+
+static inline void cbuf_advance_r_pos(struct cbuf *cb)
+{
+       /* Check if need to wrap */
+       if (cb->r_pos >= cb->end_idx)
+               cb->r_pos = 0;
+       else
+               cb->r_pos++;
+
+       cbuf_wmb();
+}
+
+int cbuf_write(struct cbuf *cb, void *data);
+int cbuf_read(struct cbuf *cb, void *data);
+
+#endif /*__CBUF_H__ */
diff --git a/drivers/virt/tegra/hyp_syscall.S b/drivers/virt/tegra/hyp_syscall.S
new file mode 100644 (file)
index 0000000..c13e010
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2014 NVIDIA Corporation. All rights reserved.
+ *
+ * Hypervisor related routines
+ *
+ * This file is BSD licensed so anyone can use the definitions to implement
+ * compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of NVIDIA CORPORATION nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NVIDIA CORPORATION OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/hwcap.h>
+#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
+
+#include "syscalls.h"
+
+#.arch_extension virt
+
+/*
+ * Old toolchains cannot assemble this so we use a macro.
+ */
+.macro hvc nr
+.if \nr > 15
+.error "Unsupported hvc call number, too high for this macro"
+.endif
+.word 0xe140007\nr
+.endm
+/*
+ * Declares a system call entry exit function with
+ * HVC value, a pointer and number of words sent/received.
+ */
+.macro declare_syscall name, syscall_nr
+       ENTRY(hvc_\name)
+               stmfd   sp!, {r0-r12}   @ sp->|r0-r12|
+               hvc     \syscall_nr     @ HVC Call
+               stmfd   sp!, {r0}       @ Store result |->res|r0-r12|
+               ldr     r0, [sp, #4]    @ Load struct ptr from stack
+               stmia   r0, {r1-r12}    @ Fill structure
+               ldmfd   sp!, {r0}       @ Restore result in r0 |->r0-r12|
+               add     sp, sp, #16     @ Unwind until r4
+               ldmfd   sp!, {r4-r12}   @ Restore regs to preserve.
+               mov     pc, lr
+       ENDPROC(hvc_\name)
+.endm
+
+/* Those who need to read data use this */
+declare_syscall read_gid HVC_NR_READ_GID
+declare_syscall read_nguests HVC_NR_READ_NGUESTS
+declare_syscall read_ivc_info HVC_NR_READ_IVC
+
+/* TODO: Define calls with no read in a way that does less reg. read/writes */
+/*
+ * r0 = irqnr
+ * r1 = vmid
+ *
+ * Return:
+ * r0 = return value
+ */
+ENTRY(hvc_raise_irq)
+       hvc     HVC_NR_RAISE_IRQ        @ HVC Call
+       mov     pc, lr
+ENDPROC(hvc_raise_irq)
+
diff --git a/drivers/virt/tegra/sys.c b/drivers/virt/tegra/sys.c
new file mode 100644 (file)
index 0000000..42b0a41
--- /dev/null
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * System calls to hypervisor
+ *
+ * This file is BSD licensed so anyone can use the definitions to implement
+ * compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of NVIDIA CORPORATION nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NVIDIA CORPORATION OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "syscalls.h"
+
+/*
+ * NOTE:
+ * Sysregs struct is for reads from HV only.
+ *
+ * To pass data to HV, use real registers [r0-rX] to HVC call.
+ */
+
+int hyp_read_gid(unsigned int *gid)
+{
+       int sysregs[12];
+       int ret;
+
+       ret = hvc_read_gid(sysregs);
+       *gid = *((unsigned int *)&sysregs[0]);
+       return ret;
+}
+
+int hyp_read_nguests(unsigned int *nguests)
+{
+       int sysregs[12];
+       int ret;
+
+       ret = hvc_read_nguests(sysregs);
+       *nguests = *((unsigned int *)&sysregs[0]);
+       return ret;
+}
+
+int hyp_read_ivc_info(struct hyp_ivc_info *data, int guestid)
+{
+       int sysregs[12];
+       int ret;
+
+       ret = hvc_read_ivc_info(sysregs, guestid);
+       *data = *((struct hyp_ivc_info *)&sysregs[0]);
+       return ret;
+}
+
+int hyp_raise_irq(unsigned int irq, unsigned int vmid)
+{
+       return hvc_raise_irq(irq, vmid);
+}
diff --git a/drivers/virt/tegra/syscalls.h b/drivers/virt/tegra/syscalls.h
new file mode 100644 (file)
index 0000000..212ddf1
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Hypervisor interfaces
+ *
+ * This header is BSD licensed so anyone can use the definitions to implement
+ * compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of NVIDIA CORPORATION nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NVIDIA CORPORATION OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __VMM_SYSCALLS_H__
+#define __VMM_SYSCALLS_H__
+
+#define HVC_NR_READ_STAT       1
+#define HVC_NR_READ_IVC                2
+#define HVC_NR_READ_GID                3
+#define HVC_NR_RAISE_IRQ       4
+#define HVC_NR_READ_NGUESTS    5
+
+#define GUEST_PRIMARY          0
+#define GUEST_IVC_SERVER       0
+
+#ifndef __ASSEMBLY__
+
+/*
+ * IVC communication information for each unique guest pair.
+ */
+struct hyp_ivc_info {
+       unsigned long base;     /* Base of private shared memory segments */
+       unsigned long size;     /* Size of shared memory segments */
+       unsigned long virq_base;        /* Base of virtual interrupts */
+       unsigned int  virq_total;       /* Number of irqs allocated per guest */
+};
+
+int hyp_read_gid(unsigned int *gid);
+int hyp_read_nguests(unsigned int *nguests);
+int hyp_read_ivc_info(struct hyp_ivc_info *data, int guestid);
+int hyp_raise_irq(unsigned int irq, unsigned int vmid);
+
+/* ASM prototypes */
+extern int hvc_read_gid(void *);
+extern int hvc_read_ivc_info(void *, int guestid);
+extern int hvc_read_nguests(void *);
+extern int hvc_raise_irq(unsigned int irq, unsigned int vmid);
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __VMM_SYSCALLS_H__ */
diff --git a/drivers/virt/tegra/tegra_hv.c b/drivers/virt/tegra/tegra_hv.c
new file mode 100644 (file)
index 0000000..b22168f
--- /dev/null
@@ -0,0 +1,2034 @@
+/*
+ * Tegra Hypervisor manager + IVC
+ *
+ * Implements both a kernel level interface as well as a
+ * simple character level interface.
+ *
+ * Copyright (C) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/miscdevice.h>
+#include <linux/ioport.h>
+#include <linux/fcntl.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/sched.h>
+#include <linux/poll.h>
+#include <linux/irqreturn.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+
+#include <linux/tegra-soc.h>
+#include <linux/tegra-ivc.h>
+#include <linux/tegra-ivc-config.h>
+
+#include "syscalls.h"
+#include "cbuf.h"
+
+/* very simple checksum over the shared data */
+static u32 tegra_hv_server_data_sum(const struct tegra_hv_shared_data *shd)
+{
+       const u32 *p;
+       u32 sum;
+       int i, sz;
+
+       sz = tegra_hv_server_data_size(shd->nr_queues);
+       p = (const u32 *)shd;
+       sum = 0;
+       for (i = 0; i < sz; i += sizeof(u32))
+               sum += *p++;
+
+       return (u32)-sum;
+}
+
+/* shared data are valid only when the magic signature is present and
+ * the whole area's checksum is zero */
+static int tegra_hv_server_data_valid(const struct tegra_hv_shared_data *shd)
+{
+       return shd->magic == TEGRA_HV_SHD_MAGIC &&
+               tegra_hv_server_data_sum(shd) == 0;
+}
+
+struct tegra_hv_data;
+
+struct ivc_dev {
+       struct tegra_hv_data    *hvd;
+       int                     minor;
+       struct list_head        list;
+       dev_t                   dev;
+       struct cdev             cdev;
+       struct device           *device;
+       char                    name[32];
+
+       /* channel configuration */
+       struct tegra_hv_queue_data *qd;
+       struct cbuf             *rx_cbuf;
+       struct cbuf             *tx_cbuf;
+       int                     other_guestid;
+       int                     irq;
+
+       /* operation mode */
+       spinlock_t              lock;           /* for accessing state */
+       wait_queue_head_t       wq;             /* wait queue for file mode */
+       unsigned int            opened:1;       /* set when opened */
+       unsigned int            reserved:1;     /* set when reserved */
+       unsigned int            loopback:1;     /* loopback active */
+
+       int                     local_loopback; /* -1 no loopback */
+       struct cbuf             *loopback_rx_cbuf;
+       struct cbuf             *loopback_tx_cbuf;
+       int                     loopback_irq;
+
+       /* in kernel cookies */
+       const struct tegra_hv_ivc_ops *cookie_ops;
+       struct tegra_hv_ivc_cookie cookie;
+};
+
+#define cookie_to_ivc_dev(_cookie) \
+       container_of(_cookie, struct ivc_dev, cookie)
+
+#define NGUESTS_MAX            8
+
+/* Describe all info needed to do IVC to one particular guest */
+struct guest_ivc_info {
+       struct hyp_ivc_info res;
+       void __iomem *shmem;                    /* IO remapped shmem */
+       struct tegra_hv_shared_data *shd;
+       unsigned int server:1;          /* set when we're server */
+       unsigned int valid:1;           /* set on valid guest */
+       int max_qid;                    /* max queue id discovered */
+};
+
+struct tegra_hv_data {
+       struct list_head list;
+       struct platform_device *pdev;
+       int guestid;
+       int nguests;
+       struct guest_ivc_info guest_ivc_info[NGUESTS_MAX];
+       unsigned int emulation:1;
+       unsigned int server:1;
+
+       /* ivc data */
+       struct mutex ivc_devs_lock;
+       struct list_head ivc_devs;
+       struct class *ivc_class;
+       int ivc_major;
+       int ivc_max_qid;
+       dev_t ivc_dev;
+};
+
+static struct list_head hv_list = LIST_HEAD_INIT(hv_list);
+static DEFINE_SPINLOCK(hv_list_lock);
+
+static inline int tegra_hv_get_queue_irq(struct tegra_hv_data *hvd,
+               int guestid, int id)
+{
+       struct guest_ivc_info *givci;
+
+       /* check if valid guest */
+       if ((unsigned int)guestid >= hvd->nguests)
+               return -1;
+
+       givci = &hvd->guest_ivc_info[guestid];
+
+       /* if non-valid guest fail */
+       if (!givci->valid)
+               return -1;
+
+       /* check if valid queue id */
+       if ((unsigned int)id >= givci->res.virq_total)
+               return -1;
+
+       /* simple mapping */
+       return givci->res.virq_base + id;
+}
+
+static struct cbuf *ivc_tx_cbuf(struct ivc_dev *ivc)
+{
+       if (likely(!ivc->loopback_rx_cbuf))
+               return ivc->tx_cbuf;
+
+       /* on loopback we write to the rx queue of the other side */
+       return ivc->loopback_rx_cbuf;
+}
+
+static int ivc_rx_empty(struct ivc_dev *ivc)
+{
+       return cbuf_is_empty(ivc->rx_cbuf);
+}
+
+static int ivc_tx_empty(struct ivc_dev *ivc)
+{
+       struct cbuf *cb = ivc_tx_cbuf(ivc);
+       return cbuf_is_empty(cb);
+}
+
+static int ivc_tx_full(struct ivc_dev *ivc)
+{
+       struct cbuf *cb = ivc_tx_cbuf(ivc);
+       return cbuf_is_full(cb);
+}
+
+static void ivc_raise_irq(struct ivc_dev *ivc)
+{
+       struct ivc_dev *ivcd;
+       struct tegra_hv_ivc_cookie *ivckd;
+
+       if (ivc->hvd->emulation) {
+               if (ivc->local_loopback == -1)
+                       return;
+
+               /* not really efficient, but emulation is a special case */
+               mutex_lock(&ivc->hvd->ivc_devs_lock);
+               list_for_each_entry(ivcd, &ivc->hvd->ivc_devs, list) {
+                       if (ivcd->qd->id == ivc->local_loopback) {
+                               ivckd = &ivcd->cookie;
+                               goto found;
+                       }
+               }
+               ivcd = NULL;
+               ivckd = NULL;
+found:
+               if (ivcd != NULL)
+                       wake_up_interruptible_all(&ivcd->wq);
+               mutex_unlock(&ivc->hvd->ivc_devs_lock);
+
+               /* NOTE: callbacks are inherently racy */
+               if (ivcd != NULL && ivcd->cookie_ops) {
+                       /* there are data in the queue, callback */
+                       if (ivcd->cookie_ops->rx_rdy && !ivc_rx_empty(ivcd))
+                               ivcd->cookie_ops->rx_rdy(ivckd);
+
+                       /* there is space in the queue to write, callback */
+                       if (ivcd->cookie_ops->tx_rdy && !ivc_tx_full(ivcd))
+                               ivcd->cookie_ops->tx_rdy(ivckd);
+               }
+
+               return;
+       }
+
+       if (!ivc->loopback_irq)
+               hyp_raise_irq(ivc->irq, ivc->other_guestid);
+       else
+               hyp_raise_irq(ivc->loopback_irq, ivc->hvd->guestid);
+}
+
+struct parsed_queue_data {
+       u32 reg;
+       u32 peers[2];
+       u32 nframes;
+       u32 frame_size;
+       u32 size;
+};
+
+static int tegra_hv_dt_parse_queue_node(struct tegra_hv_data *hvd,
+               struct device_node *dn, struct parsed_queue_data *pqd,
+               int target_guestid, int server)
+{
+       struct platform_device *pdev = hvd->pdev;
+       struct device *dev = &pdev->dev;
+       struct parsed_queue_data tpqd;
+
+       /* if pqd is NULL perform validity check only (still need structure) */
+       if (pqd == NULL)
+               pqd = &tpqd;
+       memset(pqd, 0, sizeof(*pqd));
+
+       /* required properties */
+       if (of_property_read_u32(dn, "reg", &pqd->reg) != 0) {
+               if (pqd != &tpqd)
+                       dev_err(dev, "@%s reg property missing\n",
+                                       dn->full_name);
+               return -EINVAL;
+       }
+
+       if (of_property_read_u32_array(dn, "peers", pqd->peers, 2) != 0) {
+               if (pqd != &tpqd)
+                       dev_err(dev, "@%s peers property missing\n",
+                                       dn->full_name);
+               return -EINVAL;
+       }
+
+       /*
+       * Due to security implications only a star topology is supported.
+       * What this means is that the server IVC is at the center of the
+       * star, and communication is allowed between the server and
+       * the clients at the tips of the star.
+       *
+       * The only exception is when this is a local loopback for another
+       * peer.
+       */
+
+       if (hvd->guestid != pqd->peers[0] && hvd->guestid != pqd->peers[1]) {
+               if (pqd != &tpqd)
+                       dev_warn(dev, "@%s (%d not in <%d %d>)\n",
+                               dn->full_name, hvd->guestid,
+                               pqd->peers[0], pqd->peers[1]);
+               return -EINVAL;
+       }
+
+       /* we should ignore peers that are not present */
+       if (pqd->peers[0] >= (u32)hvd->nguests ||
+               pqd->peers[1] >= (u32)hvd->nguests) {
+               if (pqd != &tpqd)
+                       dev_warn(dev, "@%s (one of <%d %d> greater than %d)\n",
+                               dn->full_name,
+                               pqd->peers[0], pqd->peers[1],
+                               hvd->nguests - 1);
+               return -EINVAL;
+       }
+
+       /* the target must be one of the peers */
+       if (target_guestid != pqd->peers[0] && target_guestid != pqd->peers[1])
+               return -EINVAL;
+
+       /* finally if both the peers are us we only allow self */
+       if (target_guestid == hvd->guestid &&
+               (pqd->peers[0] != hvd->guestid ||
+                       pqd->peers[1] != hvd->guestid))
+                       return -EINVAL;
+
+       /* if we're not a server we don't need the following properties */
+       if (!server)
+               return 0;
+
+       /* read properties (note no error checking) */
+       of_property_read_u32(dn, "nframes", &pqd->nframes);
+       of_property_read_u32(dn, "frame-size", &pqd->frame_size);
+       of_property_read_u32(dn, "size", &pqd->size);
+
+       /* if no size property given, construct one */
+       if (pqd->size == 0)
+               pqd->size = pqd->nframes * pqd->frame_size;
+
+       /* not valid */
+       if (pqd->size == 0) {
+               if (pqd != &tpqd)
+                       dev_err(dev, "@%s size=0\n", dn->full_name);
+               return -EINVAL;
+       }
+
+       /* single byte */
+       if (pqd->nframes == 0 || pqd->frame_size == 0) {
+               pqd->nframes = pqd->size;
+               pqd->frame_size = 1;
+       }
+
+       return 0;
+}
+
+static int tegra_hv_dt_parse(struct tegra_hv_data *hvd,
+               int target_guestid)
+{
+       struct platform_device *pdev = hvd->pdev;
+       struct device *dev = &pdev->dev;
+       struct device_node *qdn, *dn;
+       void *p;
+       int count, ret = 0, i, cfgsize;
+       struct parsed_queue_data pqd;
+       u32 total_size, server_to;
+       struct cbuf *tx_cbuf, *rx_cbuf;
+       struct tegra_hv_queue_data *qd;
+       struct guest_ivc_info *givci;
+
+       if ((unsigned int)target_guestid >= hvd->nguests)
+               return -EINVAL;
+
+       givci = &hvd->guest_ivc_info[target_guestid];
+
+       /* queues node must exist (on server or client both) */
+       qdn = of_get_child_by_name(dev->of_node, "queues");
+       if (qdn == NULL)
+               return -EINVAL;
+
+       /* find out if the target_guestid is in the server-to-peers property */
+       for (i = 0; of_property_read_u32_index(dev->of_node,
+                       "server-to-peers", i,
+                       &server_to) == 0; i++) {
+               if (server_to == target_guestid) {
+                       givci->server = 1;
+                       break;
+               }
+       }
+
+       dev_info(dev, "we are guest #%d and we are a %s to guest #%d\n",
+                       hvd->guestid,
+                       givci->server ? "server" : "client",
+                       target_guestid);
+
+       /* not a server, we don't configure the shared memory */
+       if (!givci->server)
+               return 0;
+
+       /* iterate over the child nodes to find count */
+       count = 0;
+       for_each_child_of_node(qdn, dn) {
+               /* only increase count for valid nodes */
+               if (tegra_hv_dt_parse_queue_node(hvd, dn,
+                                       NULL, target_guestid,
+                                       givci->server) == 0)
+                       count++;
+       }
+
+       if (count == 0) {
+               dev_warn(dev, "guest #%d, no queues found\n",
+                               target_guestid);
+               ret = 0;
+               goto out;
+       }
+       givci->valid = 1;       /* valid guest */
+
+       cfgsize = tegra_hv_server_data_size(count);
+       if (cfgsize >= givci->res.size) {
+               dev_err(dev, "guest #%d, size %d too large (> %lu)\n",
+                               target_guestid, pqd.size, givci->res.size);
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       /* point right after the queue configuration data */
+       p = (void *)givci->shd + cfgsize;
+
+       dev_info(dev, "guest #%d, starting at 0x%08x - #%d queues\n",
+                       target_guestid, cfgsize, count);
+
+       i = 0;
+       for_each_child_of_node(qdn, dn) {
+
+               /* don't process bad nodes or with non-participating guestid */
+               if (tegra_hv_dt_parse_queue_node(hvd, dn, &pqd,
+                                       target_guestid, givci->server) != 0)
+                       continue;
+
+               /* make sure you don't overwrite the shared data */
+               total_size = pqd.size * 2;
+               if (pqd.frame_size != 1)
+                       total_size += pqd.nframes * sizeof(u32);
+
+               if (p + total_size >= (void *)givci->shd + givci->res.size) {
+                       dev_err(dev, "guest #%d, overflow of shared memory\n",
+                                       target_guestid);
+                       ret = -ENOMEM;
+                       goto out;
+               }
+
+               qd = tegra_hv_shd_to_queue_data(givci->shd) + i;
+
+               qd->id = pqd.reg;
+               qd->peers[0] = pqd.peers[0];
+               qd->peers[1] = pqd.peers[1];
+               qd->size = pqd.size + sizeof(struct cbuf);
+               qd->nframes = pqd.nframes;
+               qd->frame_size = pqd.frame_size;
+               qd->offset = p - (void *)givci->shd;
+               qd->flags = 0;
+
+               if (pqd.frame_size == 1)
+                       qd->flags |= TQF_STREAM_MODE;
+
+               tx_cbuf = cbuf_init(p, pqd.frame_size, pqd.nframes);
+               p += pqd.size + sizeof(struct cbuf);
+               if ((qd->flags & TQF_STREAM_MODE) != 0)
+                       p += pqd.nframes * sizeof(u32);
+
+               rx_cbuf = cbuf_init(p, pqd.frame_size, pqd.nframes);
+               p += pqd.size + sizeof(struct cbuf);
+               if ((qd->flags & TQF_STREAM_MODE) != 0)
+                       p += pqd.nframes * sizeof(u32);
+
+               dev_info(dev, "guest #%d, Q#%d: <%d %d> cbuf #0 @ 0x%06x, cbuf #1 @ 0x%06x\n",
+                               target_guestid, qd->id,
+                               qd->peers[0], qd->peers[1],
+                               (void *)tx_cbuf - (void *)givci->shd,
+                               (void *)rx_cbuf - (void *)givci->shd);
+
+               i++;
+       }
+
+       givci->shd->nr_queues = i;
+       givci->shd->flags = 0;
+       givci->shd->sum = 0;    /* initial */
+       givci->shd->magic = TEGRA_HV_SHD_MAGIC;
+
+       /* update checksum */
+       givci->shd->sum = tegra_hv_server_data_sum(givci->shd);
+
+       cbuf_wmb();
+
+out:
+       of_node_put(qdn);
+       return ret;
+}
+
+static int ivc_perform_loopback(struct ivc_dev *ivc)
+{
+       struct cbuf *rx, *tx;
+       int count;
+
+       if (!ivc->loopback)
+               return -EINVAL;
+
+       rx = ivc->rx_cbuf;
+       tx = ivc->tx_cbuf;
+
+       BUG_ON(rx->struct_size != tx->struct_size);
+
+       /* got input and space is available */
+       count = 0;
+       while (!cbuf_is_empty(rx) && !cbuf_is_full(tx)) {
+
+               /* copy data directly from rx to tx */
+               memcpy(cbuf_write_data_ptr(tx), cbuf_read_data_ptr(rx),
+                               tx->struct_size);
+               cbuf_advance_r_pos(rx);
+               cbuf_advance_w_pos(tx);
+               count++;
+       }
+
+       if (count > 0)
+               ivc_raise_irq(ivc);
+
+       return 0;
+}
+
+static irqreturn_t ivc_dev_loopback_irq_handler(int irq, void *data)
+{
+       struct ivc_dev *ivc = data;
+
+       ivc_perform_loopback(ivc);
+
+       return IRQ_HANDLED;
+}
+
+static int ivc_set_loopback(struct ivc_dev *ivc, int mode)
+{
+       int ret;
+
+       if ((unsigned int)mode >= 2)
+               return -EINVAL;
+
+       ret = -EINVAL;
+
+       spin_lock(&ivc->lock);
+
+       /* should not be opened, neither reserved */
+       if (ivc->opened || ivc->reserved)
+               goto out;
+
+       /* if switching loopback mode install specific handler */
+       if (ivc->loopback != mode) {
+
+               if (!ivc->hvd->emulation) {
+                       if (mode) {
+                               /* request our irq */
+                               ret = devm_request_irq(
+                                               ivc->device, ivc->irq,
+                                               ivc_dev_loopback_irq_handler,
+                                               0, dev_name(ivc->device), ivc);
+                               if (ret != 0)
+                                       goto out;
+                       } else
+                               devm_free_irq(ivc->device, ivc->irq, ivc);
+               }
+
+               ivc->loopback = mode;
+       }
+
+       ret = 0;
+out:
+       spin_unlock(&ivc->lock);
+
+       return ret;
+}
+
+static irqreturn_t ivc_dev_irq_handler(int irq, void *data)
+{
+       struct ivc_dev *ivc = data;
+
+       /* simple implementation, just kick all waiters */
+       wake_up_interruptible_all(&ivc->wq);
+
+       return IRQ_HANDLED;
+}
+
+static int ivc_dev_open(struct inode *inode, struct file *filp)
+{
+       struct cdev *cdev = inode->i_cdev;
+       struct ivc_dev *ivc = container_of(cdev, struct ivc_dev, cdev);
+       int ret;
+
+       spin_lock(&ivc->lock);
+       if (ivc->opened || ivc->reserved || ivc->loopback) {
+               ret = -EBUSY;
+               goto out;
+       }
+
+       ivc->opened = 1;
+       spin_unlock(&ivc->lock);
+
+       if (!ivc->hvd->emulation) {
+               /* request our irq */
+               ret = devm_request_irq(
+                               ivc->device, ivc->irq,
+                               ivc_dev_irq_handler,
+                               0, dev_name(ivc->device), ivc);
+               if (ret < 0) {
+                       dev_err(ivc->device, "Failed to request irq %d\n",
+                                       ivc->irq);
+                       goto err_no_irq;
+               }
+       }
+
+       /* all done */
+       filp->private_data = ivc;
+
+       return 0;
+err_no_irq:
+
+       spin_lock(&ivc->lock);
+       ivc->opened = 0;
+out:
+       spin_unlock(&ivc->lock);
+
+       filp->private_data = NULL;
+
+       return ret;
+}
+
+static int ivc_dev_release(struct inode *inode, struct file *filp)
+{
+       struct ivc_dev *ivc = filp->private_data;
+
+       if (ivc == NULL)
+               return 0;
+
+       if (!ivc->hvd->emulation)
+               devm_free_irq(ivc->device, ivc->irq, ivc);
+
+       spin_lock(&ivc->lock);
+       ivc->opened = 0;
+       filp->private_data = NULL;
+       spin_unlock(&ivc->lock);
+
+       return 0;
+}
+
+static long ivc_dev_ioctl(struct file *filp, unsigned int cmd,
+               unsigned long arg)
+{
+       struct ivc_dev *ivc = filp->private_data;
+
+       /* placeholder */
+       (void)ivc;
+
+       switch (cmd) {
+       default:
+               return -ENOTTY;
+       }
+}
+
+static int ivc_dump(struct ivc_dev *ivc)
+{
+       struct tegra_hv_data *hvd = ivc->hvd;
+       struct platform_device *pdev = hvd->pdev;
+       struct device *dev = &pdev->dev;
+       struct cbuf *cb;
+
+       dev_info(dev, "IVC#%d: IRQ=%d nframes=%d frame_size=%d offset=%d\n",
+                       ivc->qd->id, ivc->irq,
+                       ivc->qd->nframes, ivc->qd->frame_size, ivc->qd->offset);
+
+       cb = ivc->rx_cbuf;
+       dev_info(dev, " RXCB: end_idx=%d struct_size=%d w_pos=%d r_pos=%d\n",
+                       cb->end_idx, cb->struct_size, cb->w_pos, cb->r_pos);
+       cb = ivc->tx_cbuf;
+       dev_info(dev, " TXCB: end_idx=%d struct_size=%d w_pos=%d r_pos=%d\n",
+                       cb->end_idx, cb->struct_size, cb->w_pos, cb->r_pos);
+
+       return 0;
+}
+
+static int ivc_rx_read(struct ivc_dev *ivc, void *buf, int max_read)
+{
+       struct cbuf *cb = ivc->rx_cbuf;
+       int ret, chunk, left;
+
+       if (cbuf_is_empty(cb))
+               return -ENOMEM;
+
+       if (max_read > cb->struct_size) {
+               chunk = cb->struct_size;
+               left = max_read - chunk;
+       } else {
+               chunk = max_read;
+               left = 0;
+       }
+
+       memcpy(buf, cbuf_read_data_ptr(cb), chunk);
+       memset(buf + chunk, 0, left);
+
+       cbuf_advance_r_pos(cb);
+       ivc_raise_irq(ivc);
+
+       ret = chunk;
+
+       return ret;
+}
+
+static int ivc_rx_read_user(struct ivc_dev *ivc, void __user *buf, int max_read)
+{
+       struct cbuf *cb = ivc->rx_cbuf;
+       int ret, chunk, left;
+
+       if (cbuf_is_empty(cb))
+               return -ENOMEM;
+
+       if (max_read > cb->struct_size) {
+               chunk = cb->struct_size;
+               left = max_read - chunk;
+       } else {
+               chunk = max_read;
+               left = 0;
+       }
+
+       if (copy_to_user(buf, cbuf_read_data_ptr(cb), chunk))
+               return -EFAULT;
+       if (left > 0 && clear_user(buf + chunk, left))
+               return -EFAULT;
+
+       cbuf_advance_r_pos(cb);
+       ivc_raise_irq(ivc);
+
+       ret = chunk;
+
+       return ret;
+}
+
+/* peek in the next rx buffer at offset off, the count bytes */
+static int ivc_rx_peek(struct ivc_dev *ivc, void *buf, int off, int count)
+{
+       struct cbuf *cb = ivc->rx_cbuf;
+       int chunk, rem;
+
+       if (cbuf_is_empty(cb))
+               return -ENOMEM;
+
+       /* get maximum available number of bytes */
+       rem = cb->struct_size - off;
+       chunk = count;
+
+       /* if request is for more than rem, return only rem */
+       if (chunk > rem)
+               chunk = rem;
+
+       memcpy(buf, cbuf_read_data_ptr(cb) + off, chunk);
+
+       /* note, no interrupt is generated */
+
+       return chunk;
+}
+
+/* directly peek at the next frame rx'ed */
+static void *ivc_rx_get_next_frame(struct ivc_dev *ivc)
+{
+       struct cbuf *cb = ivc->rx_cbuf;
+
+       if (cbuf_is_empty(cb))
+               return ERR_PTR(-ENOMEM);
+
+       return cbuf_read_data_ptr(cb);
+}
+
+/* advance the rx buffer */
+static int ivc_rx_advance(struct ivc_dev *ivc)
+{
+       struct cbuf *cb = ivc->rx_cbuf;
+
+       if (cbuf_is_empty(cb))
+               return -ENOMEM;
+
+       cbuf_advance_r_pos(cb);
+       ivc_raise_irq(ivc);
+
+       return 0;
+}
+
+static int ivc_tx_write(struct ivc_dev *ivc, const void *buf, int size)
+{
+       struct cbuf *cb;
+       void __iomem *p;
+       int ret, left, chunk;
+
+       /* in emulation mode, only local loopback modes are possible */
+       if (ivc->hvd->emulation && ivc->local_loopback == -1)
+               return -EINVAL;
+
+       cb = ivc_tx_cbuf(ivc);
+       if (cbuf_is_full(cb))
+               return -ENOMEM;
+
+       if (size > cb->struct_size) {
+               chunk = cb->struct_size;
+               left = size - cb->struct_size;
+       } else {
+               chunk = size;
+               left = cb->struct_size - chunk;
+       }
+
+       p = cbuf_write_data_ptr(cb);
+
+       memcpy(p, buf, chunk);
+       memset(p + chunk, 0, left);
+
+       cbuf_advance_w_pos(cb);
+       ivc_raise_irq(ivc);
+
+       ret = chunk;
+
+       return size;
+}
+
+static int ivc_tx_write_user(struct ivc_dev *ivc,
+               const void __user *buf, int size)
+{
+       struct cbuf *cb;
+       void __iomem *p;
+       int ret, left, chunk;
+
+       /* in emulation mode, only local loopback modes are possible */
+       if (ivc->hvd->emulation && ivc->local_loopback == -1)
+               return -EINVAL;
+
+       cb = ivc_tx_cbuf(ivc);
+       if (cbuf_is_full(cb))
+               return -ENOMEM;
+
+       if (size > cb->struct_size) {
+               chunk = cb->struct_size;
+               left = size - cb->struct_size;
+       } else {
+               chunk = size;
+               left = cb->struct_size - chunk;
+       }
+
+       p = cbuf_write_data_ptr(cb);
+       if (copy_from_user(p, buf, chunk))
+               return -EFAULT;
+       if (left > 0)
+               memset(p + chunk, 0, left);
+
+       cbuf_advance_w_pos(cb);
+       ivc_raise_irq(ivc);
+
+       ret = chunk;
+
+       return size;
+}
+
+/* poke in the next tx buffer at offset off, the count bytes */
+static int ivc_tx_poke(struct ivc_dev *ivc, const void *buf, int off, int count)
+{
+       struct cbuf *cb;
+       int rem, chunk;
+
+       /* in emulation mode, only local loopback modes are possible */
+       if (ivc->hvd->emulation && ivc->local_loopback == -1)
+               return -EINVAL;
+
+       cb = ivc_tx_cbuf(ivc);
+       if (cbuf_is_full(cb))
+               return -ENOMEM;
+
+       rem = cb->struct_size + off;
+       chunk = count;
+       if (chunk > rem)
+               chunk = rem;
+
+       memcpy(cbuf_write_data_ptr(cb) + off, buf, chunk);
+
+       return chunk;
+}
+
+/* directly poke at the next frame to be tx'ed */
+static void *ivc_tx_get_next_frame(struct ivc_dev *ivc)
+{
+       struct cbuf *cb;
+
+       /* in emulation mode, only local loopback modes are possible */
+       if (ivc->hvd->emulation && ivc->local_loopback == -1)
+               return ERR_PTR(-EINVAL);
+
+       cb = ivc_tx_cbuf(ivc);
+       if (cbuf_is_full(cb))
+               return ERR_PTR(-ENOMEM);
+
+       return cbuf_write_data_ptr(cb);
+}
+
+/* advance the tx buffer */
+static int ivc_tx_advance(struct ivc_dev *ivc)
+{
+       struct cbuf *cb;
+
+       /* in emulation mode, only local loopback modes are possible */
+       if (ivc->hvd->emulation && ivc->local_loopback == -1)
+               return -EINVAL;
+
+       cb = ivc_tx_cbuf(ivc);
+       if (cbuf_is_full(cb))
+               return -ENOMEM;
+
+       cbuf_advance_w_pos(cb);
+       ivc_raise_irq(ivc);
+
+       return 0;
+}
+
+static ssize_t ivc_dev_read(struct file *filp, char __user *buf,
+               size_t count, loff_t *ppos)
+{
+       struct ivc_dev *ivc = filp->private_data;
+       int left = count, ret = 0, chunk;
+
+       if (ivc_rx_empty(ivc)) {
+               if (filp->f_flags & O_NONBLOCK)
+                       return -EAGAIN;
+
+               ret = wait_event_interruptible(ivc->wq, !ivc_rx_empty(ivc));
+               if (ret) {
+                       if (ret != -ERESTARTSYS)
+                               dev_err(ivc->device,
+                                       "wait_event_interruptible %d\n", ret);
+                       return ret;
+               }
+       }
+
+       while (left > 0 && !ivc_rx_empty(ivc)) {
+
+               chunk = ivc->qd->frame_size;
+               if (chunk > left)
+                       chunk = left;
+               ret = ivc_rx_read_user(ivc, buf, chunk);
+               if (ret < 0)
+                       break;
+
+               buf += chunk;
+               left -= chunk;
+       }
+
+       if (left >= count)
+               return ret;
+
+       return count - left;
+}
+
+static ssize_t ivc_dev_write(struct file *filp, const char __user *buf,
+               size_t count, loff_t *pos)
+{
+       struct ivc_dev *ivc = filp->private_data;
+       ssize_t done;
+       size_t left, chunk;
+       int ret = 0;
+
+       done = 0;
+       while (done < count) {
+
+               left = count - done;
+
+               if (left < ivc->qd->frame_size)
+                       chunk = left;
+               else
+                       chunk = ivc->qd->frame_size;
+
+               /* is queue full? */
+               if (ivc_tx_full(ivc)) {
+
+                       /* check non-blocking mode */
+                       if (filp->f_flags & O_NONBLOCK) {
+                               ret = -EAGAIN;
+                               break;
+                       }
+
+                       ret = wait_event_interruptible(ivc->wq,
+                                       !ivc_tx_full(ivc));
+                       if (ret) {
+                               if (ret != -ERESTARTSYS)
+                                       dev_err(ivc->device,
+                                               "wait_event_interruptible %d\n",
+                                                       ret);
+                               break;
+                       }
+               }
+
+               ret = ivc_tx_write_user(ivc, buf, chunk);
+               if (ret < 0)
+                       break;
+
+               buf += chunk;
+
+               done += chunk;
+               *pos += chunk;
+       }
+
+
+       if (done == 0)
+               return ret;
+
+       return done;
+}
+
+static unsigned int ivc_dev_poll(struct file *filp, poll_table *wait)
+{
+       struct ivc_dev *ivc = filp->private_data;
+       int mask = 0;
+
+       poll_wait(filp, &ivc->wq, wait);
+
+       if (!ivc_rx_empty(ivc))
+               mask = POLLIN | POLLRDNORM;
+
+       if (!ivc_tx_full(ivc))
+               mask |= POLLOUT | POLLWRNORM;
+
+       /* no exceptions */
+
+       return mask;
+}
+
+static const struct file_operations ivc_fops = {
+       .owner          = THIS_MODULE,
+       .open           = ivc_dev_open,
+       .release        = ivc_dev_release,
+       .unlocked_ioctl = ivc_dev_ioctl,
+       .llseek         = noop_llseek,
+       .read           = ivc_dev_read,
+       .write          = ivc_dev_write,
+       .poll           = ivc_dev_poll,
+};
+
+static ssize_t id_show(struct device *dev,
+       struct device_attribute *attr, char *buf)
+{
+       struct ivc_dev *ivc = dev_get_drvdata(dev);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", ivc->qd->id);
+}
+
+static ssize_t frame_size_show(struct device *dev,
+       struct device_attribute *attr, char *buf)
+{
+       struct ivc_dev *ivc = dev_get_drvdata(dev);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", ivc->qd->frame_size);
+}
+
+static ssize_t nframes_show(struct device *dev,
+       struct device_attribute *attr, char *buf)
+{
+       struct ivc_dev *ivc = dev_get_drvdata(dev);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", ivc->qd->nframes);
+}
+
+static ssize_t opened_show(struct device *dev,
+       struct device_attribute *attr, char *buf)
+{
+       struct ivc_dev *ivc = dev_get_drvdata(dev);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", ivc->opened);
+}
+
+static ssize_t reserved_show(struct device *dev,
+       struct device_attribute *attr, char *buf)
+{
+       struct ivc_dev *ivc = dev_get_drvdata(dev);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", ivc->reserved);
+}
+
+static ssize_t peer_show(struct device *dev,
+       struct device_attribute *attr, char *buf)
+{
+       struct ivc_dev *ivc = dev_get_drvdata(dev);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", ivc->other_guestid);
+}
+
+static ssize_t loopback_show(struct device *dev,
+       struct device_attribute *attr, char *buf)
+{
+       struct ivc_dev *ivc = dev_get_drvdata(dev);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", (int)ivc->loopback);
+}
+
+ssize_t loopback_store(struct device *dev, struct device_attribute *attr,
+                      const char *buf, size_t len)
+{
+       struct ivc_dev *ivc = dev_get_drvdata(dev);
+       long int mode;
+       int ret;
+
+       ret = kstrtol(buf, 10, &mode);
+       if (ret)
+               return -EINVAL;
+
+       ret = ivc_set_loopback(ivc, (int)mode);
+       if (ret != 0)
+               return ret;
+       return len;
+}
+
+static ssize_t local_loopback_show(struct device *dev,
+       struct device_attribute *attr, char *buf)
+{
+       struct ivc_dev *ivc = dev_get_drvdata(dev);
+       int ret;
+
+       spin_lock(&ivc->lock);
+       ret = ivc->local_loopback;
+       spin_unlock(&ivc->lock);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+
+}
+
+ssize_t local_loopback_store(struct device *dev, struct device_attribute *attr,
+                      const char *buf, size_t len)
+{
+       struct ivc_dev *ivc = dev_get_drvdata(dev);
+       struct ivc_dev *ivcd;
+       struct tegra_hv_data *hvd = ivc->hvd;
+       long int dest;
+       int found, to_find;
+       int ret;
+
+       ret = kstrtol(buf, 10, &dest);
+       if (ret)
+               return -EINVAL;
+
+       /* setting to the same value is a nop */
+       if (dest == ivc->local_loopback)
+               return len;
+
+       /* can't bind self */
+       if (dest == ivc->qd->id)
+               return -EINVAL;
+
+       found = 0;
+       to_find = dest != -1 ? dest : ivc->local_loopback;
+
+       /* add to the list */
+       mutex_lock(&hvd->ivc_devs_lock);
+       list_for_each_entry(ivcd, &hvd->ivc_devs, list) {
+               if (ivcd->qd->id == to_find)
+                       goto found;
+       }
+       mutex_unlock(&hvd->ivc_devs_lock);
+
+       return -ENODEV;
+
+found:
+       /* lock low to high id to avoid deadlocks */
+       if (ivcd->qd->id < ivc->qd->id) {
+               spin_lock(&ivcd->lock);
+               spin_lock(&ivc->lock);
+       } else {
+               spin_lock(&ivc->lock);
+               spin_lock(&ivcd->lock);
+       }
+
+       if (dest != -1) {
+               /* link them together */
+               ivc->loopback_rx_cbuf = ivcd->rx_cbuf;
+               ivc->loopback_tx_cbuf = ivcd->tx_cbuf;
+               ivc->loopback_irq = ivcd->irq;
+               ivc->local_loopback = ivcd->qd->id;
+
+               ivcd->loopback_rx_cbuf = ivc->rx_cbuf;
+               ivcd->loopback_tx_cbuf = ivc->tx_cbuf;
+               ivcd->loopback_irq = ivc->irq;
+               ivcd->local_loopback = ivc->qd->id;
+       } else {
+               /* unlink them */
+               ivc->loopback_rx_cbuf = NULL;
+               ivc->loopback_tx_cbuf = NULL;
+               ivc->loopback_irq = 0;
+               ivc->local_loopback = -1;
+
+               ivcd->loopback_rx_cbuf = NULL;
+               ivcd->loopback_tx_cbuf = NULL;
+               ivcd->loopback_irq = 0;
+               ivcd->local_loopback = -1;
+       }
+
+       /* unlock high to low */
+       if (ivcd->qd->id < ivc->qd->id) {
+               spin_unlock(&ivc->lock);
+               spin_unlock(&ivcd->lock);
+       } else {
+               spin_unlock(&ivcd->lock);
+               spin_unlock(&ivc->lock);
+       }
+       mutex_unlock(&hvd->ivc_devs_lock);
+
+       return len;
+
+}
+
+struct device_attribute ivc_dev_attrs[] = {
+       __ATTR_RO(id),
+       __ATTR_RO(frame_size),
+       __ATTR_RO(nframes),
+       __ATTR_RO(opened),
+       __ATTR_RO(reserved),
+       __ATTR_RO(peer),
+       __ATTR(loopback, 0644, loopback_show, loopback_store),
+       __ATTR(local_loopback, 0644, local_loopback_show, local_loopback_store),
+       __ATTR_NULL
+};
+
+static int tegra_hv_add_ivc(struct tegra_hv_data *hvd,
+               struct tegra_hv_queue_data *qd)
+{
+       struct platform_device *pdev = hvd->pdev;
+       struct device *dev = &pdev->dev;
+       struct ivc_dev *ivc;
+       struct guest_ivc_info *givci;
+       int ret;
+
+       /* sanity check */
+       if (qd->peers[0] != hvd->guestid && qd->peers[1] != hvd->guestid)
+               return -EINVAL;
+
+       ivc = kzalloc(sizeof(*ivc), GFP_KERNEL);
+       if (ivc == NULL) {
+               dev_err(dev, "Failed to allocate ivc structure\n");
+               return -ENOMEM;
+       }
+       ivc->hvd = hvd;
+       ivc->qd = qd;
+
+       spin_lock_init(&ivc->lock);
+       init_waitqueue_head(&ivc->wq);
+
+       ivc->qd = qd;
+       ivc->minor = qd->id;
+       ivc->dev = MKDEV(hvd->ivc_major, ivc->minor);
+       cdev_init(&ivc->cdev, &ivc_fops);
+       snprintf(ivc->name, sizeof(ivc->name) - 1, "ivc%d", qd->id);
+       ret = cdev_add(&ivc->cdev, ivc->dev, 1);
+       if (ret != 0) {
+               dev_err(dev, "cdev_add() failed\n");
+               goto out_free;
+       }
+       /* parent is this hvd dev */
+       ivc->device = device_create(hvd->ivc_class, dev, ivc->dev, ivc,
+                       ivc->name);
+       if (IS_ERR(ivc->device)) {
+               dev_err(dev, "device_create() failed for %s\n", ivc->name);
+               goto out_clear_cdev;
+       }
+       /* point to ivc */
+       dev_set_drvdata(ivc->device, ivc);
+
+       /*
+        * cbufs are already initialized
+        * we only have to assign rx/tx
+        * peer[0] it's rx, tx
+        * peer[1] it's tx, rx
+        * both queues are of the same size
+        */
+       if (hvd->guestid == qd->peers[0]) {
+               ivc->other_guestid = qd->peers[1];
+               givci = &hvd->guest_ivc_info[ivc->other_guestid];
+               ivc->rx_cbuf = (void *)givci->shd + qd->offset;
+               ivc->tx_cbuf = (void *)ivc->rx_cbuf + qd->size;
+       } else {
+               ivc->other_guestid = qd->peers[0];
+               givci = &hvd->guest_ivc_info[ivc->other_guestid];
+               ivc->tx_cbuf = (void *)givci->shd + qd->offset;
+               ivc->rx_cbuf = (void *)ivc->tx_cbuf + qd->size;
+       }
+
+       if (!hvd->emulation) {
+               /* IRQ# of this IVC channel */
+               ivc->irq = tegra_hv_get_queue_irq(hvd, ivc->other_guestid,
+                               ivc->qd->id);
+       }
+
+       /* no loopback yet */
+       ivc->local_loopback = -1;
+
+       /* add to the list */
+       mutex_lock(&hvd->ivc_devs_lock);
+       list_add_tail(&ivc->list, &hvd->ivc_devs);
+       mutex_unlock(&hvd->ivc_devs_lock);
+
+       dev_info(dev, "added %s\n", ivc->name);
+
+       return 0;
+out_clear_cdev:
+       cdev_del(&ivc->cdev);
+out_free:
+       kfree(ivc);
+       return ret;
+}
+
+/* reserve a channel for internal kernel use */
+struct ivc_dev *ivc_reserve(struct tegra_hv_data *hvd, int id)
+{
+       struct ivc_dev *ivc;
+       int found;
+
+       found = 0;
+
+       mutex_lock(&hvd->ivc_devs_lock);
+       list_for_each_entry(ivc, &hvd->ivc_devs, list) {
+               if (ivc->qd->id != id)
+                       continue;
+
+               spin_lock(&ivc->lock);
+               if (!ivc->opened && !ivc->reserved && !ivc->loopback) {
+                       ivc->reserved = 1;
+                       found = 1;
+               }
+               spin_unlock(&ivc->lock);
+
+               if (found)
+                       goto out;
+       }
+       ivc = NULL;
+out:
+       mutex_unlock(&hvd->ivc_devs_lock);
+
+       return ivc;
+}
+
+int ivc_unreserve(struct ivc_dev *ivc)
+{
+       struct tegra_hv_data *hvd;
+       int found;
+
+       if (ivc == NULL)
+               return -EINVAL;
+
+       hvd = ivc->hvd;
+
+       found = 0;
+       spin_lock(&ivc->lock);
+       if (!ivc->opened && ivc->reserved) {
+               ivc->reserved = 0;
+               found = 1;
+       }
+       spin_unlock(&ivc->lock);
+
+       return found ? 0 : -ENODEV;
+}
+
+static void tegra_hv_ivc_cleanup(struct tegra_hv_data *hvd)
+{
+       struct ivc_dev *ivc, *ivcn;
+
+       mutex_lock(&hvd->ivc_devs_lock);
+       list_for_each_entry_safe(ivc, ivcn, &hvd->ivc_devs, list) {
+               list_del(&ivc->list);
+
+               if (ivc->device) {
+                       cdev_del(&ivc->cdev);
+                       device_del(ivc->device);
+               }
+               kfree(ivc);
+       }
+       mutex_unlock(&hvd->ivc_devs_lock);
+}
+
+static int tegra_hv_prepare_to_instantiate(struct tegra_hv_data *hvd,
+               int target_guestid)
+{
+       struct platform_device *pdev = hvd->pdev;
+       struct device *dev = &pdev->dev;
+       struct guest_ivc_info *givci;
+       struct tegra_hv_shared_data *shd;
+       struct tegra_hv_queue_data *qd;
+       unsigned long timeout;
+       int i;
+
+       if ((unsigned int)target_guestid >= hvd->nguests)
+               return -EINVAL;
+
+       givci = &hvd->guest_ivc_info[target_guestid];
+
+       /* non-valid guest on server just return */
+       if (givci->server && !givci->valid)
+               return 0;
+
+       shd = givci->shd;
+
+       if (!givci->server) {
+               dev_info(dev, "slave; waiting for valid signature from %d\n",
+                               target_guestid);
+               /* 8 second timeout */
+               timeout = jiffies + 8 * HZ;
+               while (time_before(jiffies, timeout)) {
+                       cbuf_rmb();
+                       if (tegra_hv_server_data_valid(shd))
+                               break;
+                       msleep(20);
+               }
+       }
+
+       cbuf_rmb();
+       if (!tegra_hv_server_data_valid(shd)) {
+               if (!givci->server) {
+                       dev_warn(dev, "guest #%d, no configuration found\n",
+                                       target_guestid);
+                       return 0;
+               }
+               /* on server this is fatal */
+               dev_err(dev, "server guest #%d, no configuration found\n",
+                               target_guestid);
+               return -EINVAL;
+       }
+
+       /* client, data found, set valid flag */
+       if (!givci->server)
+               givci->valid = 1;
+
+       /* find maximum queue id for this quest */
+       givci->max_qid = 0;
+       for (i = 0; i < shd->nr_queues; i++) {
+               qd = tegra_hv_shd_to_queue_data(shd) + i;
+               if (qd->id > givci->max_qid)
+                       givci->max_qid = qd->id;
+       }
+       if (givci->max_qid > hvd->ivc_max_qid)
+               hvd->ivc_max_qid = givci->max_qid;
+
+       dev_info(dev, "guest #%d, config found; #%d total queues, max qid %d\n",
+                       target_guestid, shd->nr_queues, givci->max_qid);
+
+       return 0;
+}
+
+static int tegra_hv_instantiate(struct tegra_hv_data *hvd, int target_guestid)
+{
+       struct platform_device *pdev = hvd->pdev;
+       struct device *dev = &pdev->dev;
+       struct tegra_hv_queue_data *qd;
+       struct guest_ivc_info *givci;
+       struct tegra_hv_shared_data *shd;
+       int ret, i, our_count;
+
+       if ((unsigned int)target_guestid >= hvd->nguests)
+               return -EINVAL;
+
+       givci = &hvd->guest_ivc_info[target_guestid];
+       if (!givci->valid)
+               return 0;
+
+       shd = givci->shd;
+
+       /* we now have to iterate over the queues and setup the ivc devices */
+       our_count = 0;
+       for (i = 0; i < shd->nr_queues; i++) {
+
+               qd = tegra_hv_shd_to_queue_data(shd) + i;
+
+               /* we have to be one of the peers */
+               if (qd->peers[0] != hvd->guestid &&
+                               qd->peers[1] != hvd->guestid)
+                       continue;
+
+               /* the target must be one of the peers */
+               if (qd->peers[0] != target_guestid &&
+                               qd->peers[1] != target_guestid) {
+                       continue;
+               }
+
+               /* add the IVC device */
+               ret = tegra_hv_add_ivc(hvd, qd);
+               if (ret != 0) {
+                       dev_err(dev, "tegra_hv_add_ivc() failed\n");
+                       tegra_hv_ivc_cleanup(hvd);
+                       return ret;
+               }
+               our_count++;
+       }
+
+       dev_info(dev, "guest #%d, #%d IVC devices\n",
+                       target_guestid, our_count);
+
+       return 0;
+}
+
+static int tegra_hv_perform_local_loopback(struct tegra_hv_data *hvd)
+{
+       struct platform_device *pdev = hvd->pdev;
+       struct device *dev = &pdev->dev;
+       struct ivc_dev *ivc, *ivct[2];
+       u32 loop[2];
+       int i;
+
+       /* iterate over the loopback map */
+       mutex_lock(&hvd->ivc_devs_lock);
+       for (i = 0;
+               of_property_read_u32_index(dev->of_node,
+                       "local-loopbacks", i * 2, loop) == 0 &&
+               of_property_read_u32_index(dev->of_node,
+                       "local-loopbacks", i * 2 + 1, loop + 1) == 0; i++) {
+
+               ivct[0] = ivct[1] = NULL;
+
+               list_for_each_entry(ivc, &hvd->ivc_devs, list) {
+                       if (ivc->qd->id == loop[0])
+                               ivct[0] = ivc;
+                       if (ivc->qd->id == loop[1])
+                               ivct[1] = ivc;
+               }
+
+               /* this is normal for non-guest loopback, so no message */
+               if (ivct[0] == NULL || ivct[1] == NULL)
+                       continue;
+
+               /* link them together */
+               ivct[0]->loopback_rx_cbuf = ivct[1]->rx_cbuf;
+               ivct[0]->loopback_tx_cbuf = ivct[1]->tx_cbuf;
+               ivct[0]->loopback_irq = ivct[1]->irq;
+               ivct[0]->local_loopback = ivct[1]->qd->id;
+
+               ivct[1]->loopback_rx_cbuf = ivct[0]->rx_cbuf;
+               ivct[1]->loopback_tx_cbuf = ivct[0]->tx_cbuf;
+               ivct[1]->loopback_irq = ivct[0]->irq;
+               ivct[1]->local_loopback = ivct[0]->qd->id;
+
+               dev_info(dev, "Local looback on %d <-> %d\n",
+                               ivct[0]->qd->id, ivct[1]->qd->id);
+       }
+
+       mutex_unlock(&hvd->ivc_devs_lock);
+
+       return 0;
+}
+
+static int tegra_hv_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct tegra_hv_data *hvd;
+       struct guest_ivc_info *givci;
+       int i, ret;
+
+       if (dev->of_node == NULL) {
+               dev_err(dev, "driver requires DT data\n");
+               return -EINVAL;
+       }
+
+       hvd = devm_kzalloc(dev, sizeof(*hvd), GFP_KERNEL);
+       if (hvd == NULL) {
+               dev_err(dev, "Failed to allocate hvd structure\n");
+               return -ENOMEM;
+       }
+
+       INIT_LIST_HEAD(&hvd->list);
+
+       hvd->pdev = pdev;
+
+       /* we're emulating only when running on non-hyp mode */
+       hvd->emulation = !is_tegra_hypervisor_mode();
+
+       if (!hvd->emulation) {
+               ret = hyp_read_gid(&hvd->guestid);
+               if (ret != 0) {
+                       dev_err(dev, "Failed to read guest id\n");
+                       return -ENODEV;
+               }
+
+               /* Read total number of guests advertised to us */
+               ret = hyp_read_nguests(&hvd->nguests);
+               dev_info(dev, "Number of guests in the system: %d\n",
+                               hvd->nguests);
+
+               if (ret != 0) {
+                       dev_err(dev, "Failed to read guest id\n");
+                       return -ENODEV;
+               }
+
+               if (hvd->nguests >= NGUESTS_MAX) {
+                       dev_err(dev, "nguests=%d > %d (clamping to %d)\n",
+                                       hvd->nguests, NGUESTS_MAX, NGUESTS_MAX);
+                       hvd->nguests = NGUESTS_MAX;
+               }
+
+       } else {
+               dev_info(dev, "No hypervisor mode; loopback only\n");
+       }
+
+       /* initialize the standard IVC stuff */
+       mutex_init(&hvd->ivc_devs_lock);
+       INIT_LIST_HEAD(&hvd->ivc_devs);
+
+       hvd->ivc_class = class_create(THIS_MODULE, "ivc");
+       if (IS_ERR(hvd->ivc_class)) {
+               dev_err(dev, "class_create() failed\n");
+               ret = PTR_ERR(hvd->ivc_class);
+               hvd->ivc_class = NULL;
+               return ret;
+       }
+       /* set class attributes */
+       hvd->ivc_class->dev_attrs = ivc_dev_attrs;
+
+       if (!hvd->emulation) {
+               /* For all advertised guests, read IVC comm info */
+               for (i = 0; i < hvd->nguests; i++) {
+                       givci = &hvd->guest_ivc_info[i];
+
+                       /* Get info for this guest */
+                       ret = hyp_read_ivc_info(&givci->res, i);
+                       if (ret != 0) {
+                               dev_warn(dev,
+                                       "No hyp shared memory for guest #%d\n",
+                                       i);
+                               continue;
+                       }
+
+                       /* Map segment for this guest  */
+                       givci->shmem = ioremap_cached(givci->res.base,
+                                               givci->res.size);
+                       if (givci->shmem == NULL) {
+                               dev_err(dev,
+                                       "guest #%d, shmem ioremap failed\n", i);
+
+                               /* iounmap all previous ones */
+                               while (--i >= 0) {
+                                       givci = &hvd->guest_ivc_info[i];
+                                       iounmap(givci->shmem);
+                               }
+                               ret = -ENOMEM;
+                               goto out_class_rel;
+                       }
+
+                       /* Also assign shd field for this guest */
+                       givci->shd = (void *)givci->shmem;
+               }
+
+       } else {
+               /* pretend we're guest #0 and nguests=1 */
+               hvd->guestid = 0;
+               hvd->nguests = 1;
+               givci = &hvd->guest_ivc_info[0];
+
+               memset(givci, 0, sizeof(*givci));
+               givci->res.size = SZ_1M;      /* TODO: Hardcoded at 1M */
+               givci->shd = kmalloc(givci->res.size, GFP_KERNEL);
+               if (givci->shd == NULL) {
+                       dev_err(dev, "Failed to allocate local memory\n");
+                       ret = -ENOMEM;
+                       goto out_class_rel;
+               }
+               givci->server = 1;
+
+               dev_warn(dev, "Non-hypervisor mode with 1MB of memory\n");
+       }
+
+       /* parse the DT data */
+       for (i = 0; i < hvd->nguests; i++) {
+               ret = tegra_hv_dt_parse(hvd, i);
+               if (ret != 0) {
+                       dev_err(dev, "Failed to parse DT data\n");
+                       goto out_unmap;
+               }
+       }
+
+       /* prepare to instantiate the IVC */
+       hvd->ivc_max_qid = 0;
+
+       for (i = 0; i < hvd->nguests; i++) {
+               ret = tegra_hv_prepare_to_instantiate(hvd, i);
+               if (ret != 0) {
+                       dev_err(dev, "guest #%d, failed to prepare\n", i);
+                       goto out_unmap;
+               }
+       }
+
+       /* have we found any IVCs? */
+       if (hvd->ivc_max_qid == 0) {
+               dev_err(dev, "No IVC channels\n");
+               ret = -ENODEV;
+               goto out_unmap;
+       }
+
+       dev_info(dev, "guestid=%d, total guests=%d\n",
+                       hvd->guestid, hvd->nguests);
+       for (i = 0; i < hvd->nguests; i++) {
+               givci = &hvd->guest_ivc_info[i];
+               dev_info(dev, "guest #%d, shmem: mem=0x%lx-0x%lx irq=%lu-%u %c%c\n",
+                               i, givci->res.base, givci->res.size,
+                               givci->res.virq_base, givci->res.virq_total,
+                               givci->valid ? 'V' : '-',
+                               givci->server ? 'S' : '-');
+       }
+
+       /* allocate the whole chardev range */
+       ret = alloc_chrdev_region(&hvd->ivc_dev, 0, hvd->ivc_max_qid + 1,
+                                       "ivc");
+       if (ret < 0) {
+               dev_err(dev, "alloc_chrdev_region() failed\n");
+               goto out_unmap;
+       }
+       hvd->ivc_major = MAJOR(hvd->ivc_dev);
+
+       /* instantiate the IVC */
+       for (i = 0; i < hvd->nguests; i++) {
+               ret = tegra_hv_instantiate(hvd, i);
+               if (ret != 0) {
+                       dev_err(dev, "guest #%d, failed to instantiate\n", i);
+                       goto out_unreg_chr;
+               }
+       }
+
+       ret = tegra_hv_perform_local_loopback(hvd);
+       if (ret != 0) {
+               dev_err(dev, "Local loopback failed\n");
+               goto out_unreg_chr;
+       }
+
+       /* finally add it to the list */
+       spin_lock(&hv_list_lock);
+       platform_set_drvdata(pdev, hvd);
+       list_add_tail(&hvd->list, &hv_list);
+       spin_unlock(&hv_list_lock);
+
+       dev_info(dev, "initialized\n");
+
+       return 0;
+
+out_unreg_chr:
+       unregister_chrdev_region(hvd->ivc_dev, hvd->ivc_max_qid);
+
+out_unmap:
+       if (!hvd->emulation) {
+               for (i = 0; i < hvd->nguests; i++)
+                       iounmap(hvd->guest_ivc_info[i].shmem);
+       } else {
+               /* sanity checks */
+               BUG_ON(hvd->guestid != 0);
+               BUG_ON(hvd->nguests != 1);
+
+               /* free the locally allocated memory */
+               kfree(hvd->guest_ivc_info[0].shd);
+       }
+
+out_class_rel:
+       class_destroy(hvd->ivc_class);
+       return ret;
+}
+
+static int tegra_hv_release(struct platform_device *pdev)
+{
+       struct tegra_hv_data *hvd = platform_get_drvdata(pdev);
+       int i;
+
+       /* remove from the list */
+       spin_lock(&hv_list_lock);
+       list_del(&hvd->list);
+       spin_unlock(&hv_list_lock);
+
+       tegra_hv_ivc_cleanup(hvd);
+
+       unregister_chrdev_region(hvd->ivc_dev, hvd->ivc_max_qid);
+
+       if (!hvd->emulation) {
+               /* Unmap mapped areas for all guests */
+               for (i = 0; i < hvd->nguests; i++)
+                       iounmap(hvd->guest_ivc_info[i].shmem);
+       } else {
+               /* sanity checks */
+               BUG_ON(hvd->guestid != 0);
+               BUG_ON(hvd->nguests != 1);
+
+               /* free the locally allocated memory */
+               kfree(hvd->guest_ivc_info[0].shd);
+       }
+
+       class_destroy(hvd->ivc_class);
+
+       dev_info(&pdev->dev, "Released\n");
+
+       /* no need to devm_free or release the irq (done automatically) */
+       return 0;
+}
+
+static const struct of_device_id tegra_hv_of_match[] = {
+       { .compatible = "nvidia,tegra-hv", },
+       {},
+};
+
+MODULE_DEVICE_TABLE(of, tegra_hv_of_match);
+
+static struct platform_driver tegra_hv_driver = {
+       .probe  = tegra_hv_probe,
+       .remove = tegra_hv_release,
+       .driver = {
+               .owner  = THIS_MODULE,
+               .name   = "tegra_hv",
+               .of_match_table = of_match_ptr(tegra_hv_of_match),
+       },
+};
+
+static irqreturn_t ivc_dev_cookie_irq_handler(int irq, void *data)
+{
+       struct ivc_dev *ivc = data;
+       struct tegra_hv_ivc_cookie *ivck = &ivc->cookie;
+
+       /* there are data in the queue, callback */
+       if (ivc->cookie_ops->rx_rdy && !ivc_rx_empty(ivc))
+               ivc->cookie_ops->rx_rdy(ivck);
+
+       /* there is space in the queue to write, callback */
+       if (ivc->cookie_ops->tx_rdy && !ivc_tx_full(ivc))
+               ivc->cookie_ops->tx_rdy(ivck);
+
+       return IRQ_HANDLED;
+}
+
+struct tegra_hv_ivc_cookie *tegra_hv_ivc_reserve(struct device_node *dn,
+               int id, const struct tegra_hv_ivc_ops *ops)
+{
+       struct platform_device *pdev = NULL;
+       struct tegra_hv_data *hvd = NULL;
+       struct ivc_dev *ivc = NULL;
+       struct tegra_hv_ivc_cookie *ivck = NULL;
+       int ret;
+
+       ret = -ENODEV;
+
+       if (dn == NULL) {
+               /* grab the first in the list */
+               spin_lock(&hv_list_lock);
+               if (!list_empty(&hv_list)) {
+                       hvd = list_entry(hv_list.next,
+                                       struct tegra_hv_data, list);
+                       pdev = hvd->pdev;
+                       get_device(&pdev->dev);
+               }
+               spin_unlock(&hv_list_lock);
+       } else
+               pdev = of_find_device_by_node(dn);
+
+       if (pdev != NULL)
+               hvd = platform_get_drvdata(pdev);
+
+       if (pdev == NULL || hvd == NULL) {
+               if (pdev != NULL && hvd == NULL) {
+                       pr_err("%s: Called too early (return EPROBE_DEFER)\n",
+                                       __func__);
+                       ret = -EPROBE_DEFER;
+               }
+               goto out;
+       }
+
+       ivc = ivc_reserve(hvd, id);
+       if (ivc == NULL)
+               goto out;
+
+       ivck = &ivc->cookie;
+       ivck->irq = ivc->irq;
+       ivck->peer_vmid = ivc->other_guestid;
+       ivck->nframes = ivc->qd->nframes;
+       ivck->frame_size = ivc->qd->frame_size;
+
+       ivc->cookie_ops = ops;
+
+       if (ivc->cookie_ops) {
+               /* there are data in the queue, callback */
+               if (ivc->cookie_ops->rx_rdy &&
+                               !ivc_rx_empty(ivc))
+                       ivc->cookie_ops->rx_rdy(ivck);
+
+               /* there is space in the queue to write, callback */
+               if (ivc->cookie_ops->tx_rdy &&
+                               !ivc_tx_full(ivc))
+                       ivc->cookie_ops->tx_rdy(ivck);
+
+               if (!ivc->hvd->emulation) {
+                       /* request our irq */
+                       ret = devm_request_irq(
+                                       ivc->device, ivc->irq,
+                                       ivc_dev_cookie_irq_handler,
+                                       0, dev_name(ivc->device), ivc);
+                       if (ret < 0)
+                               goto out;
+               }
+       }
+
+out:
+       platform_device_put(pdev);
+
+       if (ivck == NULL)
+               return ERR_PTR(ret);
+
+       /* return pointer to the cookie */
+       return ivck;
+}
+EXPORT_SYMBOL(tegra_hv_ivc_reserve);
+
+int tegra_hv_ivc_unreserve(struct tegra_hv_ivc_cookie *ivck)
+{
+       struct ivc_dev *ivc;
+
+       if (ivck == NULL)
+               return -EINVAL;
+
+       ivc = cookie_to_ivc_dev(ivck);
+
+       if (ivc->cookie_ops) {
+               if (!ivc->hvd->emulation)
+                       devm_free_irq(ivc->device, ivc->irq, ivc);
+       }
+
+       return ivc_unreserve(ivc);
+}
+EXPORT_SYMBOL(tegra_hv_ivc_unreserve);
+
+int tegra_hv_ivc_write(struct tegra_hv_ivc_cookie *ivck,
+               const void *buf, int size)
+{
+       struct ivc_dev *ivc = cookie_to_ivc_dev(ivck);
+
+       return ivc_tx_write(ivc, buf, size);
+}
+EXPORT_SYMBOL(tegra_hv_ivc_write);
+
+int tegra_hv_ivc_read(struct tegra_hv_ivc_cookie *ivck, void *buf, int size)
+{
+       struct ivc_dev *ivc = cookie_to_ivc_dev(ivck);
+
+       return ivc_rx_read(ivc, buf, size);
+}
+EXPORT_SYMBOL(tegra_hv_ivc_read);
+
+int tegra_hv_ivc_can_read(struct tegra_hv_ivc_cookie *ivck)
+{
+       struct ivc_dev *ivc = cookie_to_ivc_dev(ivck);
+
+       return !ivc_rx_empty(ivc);
+}
+EXPORT_SYMBOL(tegra_hv_ivc_can_read);
+
+int tegra_hv_ivc_can_write(struct tegra_hv_ivc_cookie *ivck)
+{
+       struct ivc_dev *ivc = cookie_to_ivc_dev(ivck);
+
+       return !ivc_tx_full(ivc);
+}
+EXPORT_SYMBOL(tegra_hv_ivc_can_write);
+
+int tegra_hv_ivc_tx_empty(struct tegra_hv_ivc_cookie *ivck)
+{
+       struct ivc_dev *ivc = cookie_to_ivc_dev(ivck);
+
+       return ivc_tx_empty(ivc);
+}
+EXPORT_SYMBOL(tegra_hv_ivc_tx_empty);
+
+int tegra_hv_ivc_set_loopback(struct tegra_hv_ivc_cookie *ivck, int mode)
+{
+       struct ivc_dev *ivc = cookie_to_ivc_dev(ivck);
+
+       return ivc_set_loopback(ivc, mode);
+}
+EXPORT_SYMBOL(tegra_hv_ivc_set_loopback);
+
+int tegra_hv_ivc_perform_loopback(struct tegra_hv_ivc_cookie *ivck)
+{
+       return ivc_perform_loopback(cookie_to_ivc_dev(ivck));
+}
+EXPORT_SYMBOL(tegra_hv_ivc_perform_loopback);
+
+int tegra_hv_ivc_dump(struct tegra_hv_ivc_cookie *ivck)
+{
+       return ivc_dump(cookie_to_ivc_dev(ivck));
+}
+EXPORT_SYMBOL(tegra_hv_ivc_dump);
+
+int tegra_hv_ivc_read_peek(struct tegra_hv_ivc_cookie *ivck,
+               void *buf, int off, int count)
+{
+       struct ivc_dev *ivc = cookie_to_ivc_dev(ivck);
+
+       return ivc_rx_peek(ivc, buf, off, count);
+}
+EXPORT_SYMBOL(tegra_hv_ivc_read_peek);
+
+void *tegra_hv_ivc_read_get_next_frame(struct tegra_hv_ivc_cookie *ivck)
+{
+       return ivc_rx_get_next_frame(cookie_to_ivc_dev(ivck));
+}
+EXPORT_SYMBOL(tegra_hv_ivc_read_get_next_frame);
+
+int tegra_hv_ivc_read_advance(struct tegra_hv_ivc_cookie *ivck)
+{
+       return ivc_rx_advance(cookie_to_ivc_dev(ivck));
+}
+EXPORT_SYMBOL(tegra_hv_ivc_read_advance);
+
+int tegra_hv_ivc_write_poke(struct tegra_hv_ivc_cookie *ivck,
+               const void *buf, int off, int count)
+{
+       struct ivc_dev *ivc = cookie_to_ivc_dev(ivck);
+
+       return ivc_tx_poke(ivc, buf, off, count);
+}
+EXPORT_SYMBOL(tegra_hv_ivc_write_poke);
+
+void *tegra_hv_ivc_write_get_next_frame(struct tegra_hv_ivc_cookie *ivck)
+{
+       return ivc_tx_get_next_frame(cookie_to_ivc_dev(ivck));
+}
+EXPORT_SYMBOL(tegra_hv_ivc_write_get_next_frame);
+
+int tegra_hv_ivc_write_advance(struct tegra_hv_ivc_cookie *ivck)
+{
+       return ivc_tx_advance(cookie_to_ivc_dev(ivck));
+}
+EXPORT_SYMBOL(tegra_hv_ivc_write_advance);
+
+static int __init tegra_hv_mod_init(void)
+{
+       return platform_driver_register(&tegra_hv_driver);
+}
+
+static void __exit tegra_hv_mod_exit(void)
+{
+       platform_driver_unregister(&tegra_hv_driver);
+}
+
+subsys_initcall(tegra_hv_mod_init);
+module_exit(tegra_hv_mod_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/include/linux/tegra-ivc-config.h b/include/linux/tegra-ivc-config.h
new file mode 100644 (file)
index 0000000..47b91d0
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This header is BSD licensed so anyone can use the definitions to implement
+ * compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of NVIDIA CORPORATION nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NVIDIA CORPORATION OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __TEGRA_IVC_CONFIG_H
+#define __TEGRA_IVC_CONFIG_H
+
+#include <linux/types.h>
+
+/* 8 x 4 = 32 bytes */
+struct tegra_hv_queue_data {
+       uint32_t        id;     /* IVC id */
+       uint32_t        peers[2];
+       uint32_t        size;
+       uint32_t        nframes;
+       uint32_t        frame_size;
+       uint32_t        offset;
+       uint32_t        flags;
+};
+
+/* Queue flags */
+#define TQF_STREAM_MODE                (1 << 0)
+#define TQF_PAGE_ALIGNED       (1 << 1)
+
+struct tegra_hv_shared_data {
+       uint32_t        magic;
+       uint32_t        sum;
+       uint32_t        nr_queues;
+       uint32_t        flags;
+       /* Do not use fields; only one is possible */
+       /* struct tegra_hv_queue_data   queue_data[]; */
+};
+
+#define TEGRA_HV_SHD_MAGIC     0xf00fbaaf
+
+#define tegra_hv_shd_to_queue_data(_shd) \
+       ((struct tegra_hv_queue_data *) \
+               ((void *)(_shd) + sizeof(struct tegra_hv_shared_data)))
+
+
+#define tegra_hv_server_data_size(nr_queues) \
+       (sizeof(struct tegra_hv_shared_data) + \
+        sizeof(struct tegra_hv_queue_data) * (nr_queues))
+
+#endif
diff --git a/include/linux/tegra-ivc.h b/include/linux/tegra-ivc.h
new file mode 100644 (file)
index 0000000..0327897
--- /dev/null
@@ -0,0 +1,231 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This header is BSD licensed so anyone can use the definitions to implement
+ * compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of NVIDIA CORPORATION nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NVIDIA CORPORATION OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __TEGRA_IVC_H
+#define __TEGRA_IVC_H
+
+struct device_node;
+
+/* in kernel interfaces */
+
+struct tegra_hv_ivc_ops;
+
+struct tegra_hv_ivc_cookie {
+       /* some fields that might be useful */
+       int irq;
+       int peer_vmid;
+       int nframes;
+       int frame_size;
+};
+
+struct tegra_hv_ivc_ops {
+       /* called when data are received */
+       void (*rx_rdy)(struct tegra_hv_ivc_cookie *ivck);
+       /* called when space is available to write data */
+       void (*tx_rdy)(struct tegra_hv_ivc_cookie *ivck);
+};
+
+/**
+ * tegra_hv_ivc_reserve - Reserve an IVC queue for use
+ * @dn:                Device node pointer to the queue in the DT
+ *             If NULL, then operate on first HV device
+ * @queue_id   Id number of the queue to use.
+ * @ops                Ops structure or NULL
+ *
+ * Reserves the queue for use
+ *
+ * Returns a pointer to the ivc_dev to use or an ERR_PTR.
+ * Note that returning EPROBE_DEFER means that the ivc driver
+ * hasn't loaded yet and you should try again later in the
+ * boot sequence.
+ */
+struct tegra_hv_ivc_cookie *tegra_hv_ivc_reserve(
+               struct device_node *dn, int id,
+               const struct tegra_hv_ivc_ops *ops);
+
+/**
+ * tegra_hv_ivc_unreserve - Unreserve an IVC queue used
+ * @ivck       IVC cookie
+ *
+ * Unreserves the IVC channel
+ *
+ * Returns 0 on success and an error code otherwise
+ */
+int tegra_hv_ivc_unreserve(struct tegra_hv_ivc_cookie *ivck);
+
+/**
+ * ivc_hv_ivc_write - Writes a frame to the IVC queue
+ * @ivck       IVC cookie of the queue
+ * @buf                Pointer to the data to write
+ * @size       Size of the data to write
+ *
+ * Write a number of bytes (as a single frame) from the queue.
+ *
+ * Returns size on success and an error code otherwise
+ */
+int tegra_hv_ivc_write(struct tegra_hv_ivc_cookie *ivck, const void *buf,
+               int size);
+
+/**
+ * ivc_hv_ivc_read - Reads a frame from the IVC queue
+ * @ivck       IVC cookie of the queue
+ * @buf                Pointer to the data to read
+ * @size       max size of the data to read
+ *
+ * Reads a number of bytes (as a single frame) from the queue.
+ *
+ * Returns size on success and an error code otherwise
+ */
+int tegra_hv_ivc_read(struct tegra_hv_ivc_cookie *ivck, void *buf, int size);
+
+/**
+ * ivc_hv_ivc_can_read - Test whether data are available
+ * @ivck       IVC cookie of the queue
+ *
+ * Test wheter data to read are available
+ *
+ * Returns 1 if data are available in the rx queue, 0 if not
+ */
+int tegra_hv_ivc_can_read(struct tegra_hv_ivc_cookie *ivck);
+
+/**
+ * ivc_hv_ivc_can_write - Test whether data can be written
+ * @ivck       IVC cookie of the queue
+ *
+ * Test wheter data can be written
+ *
+ * Returns 1 if data are can be written to the tx queue, 0 if not
+ */
+int tegra_hv_ivc_can_write(struct tegra_hv_ivc_cookie *ivck);
+
+/**
+ * ivc_hv_ivc_tx_empty - Test whether the tx queue is empty
+ * @ivck       IVC cookie of the queue
+ *
+ * Test wheter the tx queue is completely empty
+ *
+ * Returns 1 if the queue is empty, zero otherwise
+ */
+int tegra_hv_ivc_tx_empty(struct tegra_hv_ivc_cookie *ivck);
+
+/**
+ * ivc_hv_ivc_loopback - Sets (or clears) loopback mode
+ * @ivck       IVC cookie of the queue
+ * @mode       Set loopback on/off (1 = on, 0 = off)
+ *
+ * Sets or clears loopback mode accordingly.
+ *
+ * When loopback is active any writes are ignored, while
+ * reads do not return data.
+ * Incoming data are copied immediately to the tx queue.
+ *
+ * Returns 0 on success, a negative error code otherwise
+ */
+int tegra_hv_ivc_set_loopback(struct tegra_hv_ivc_cookie *ivck, int mode);
+
+/* perform fast loopback */
+int tegra_hv_ivc_perform_loopback(struct tegra_hv_ivc_cookie *ivck);
+
+/* debugging aid */
+int tegra_hv_ivc_dump(struct tegra_hv_ivc_cookie *ivck);
+
+/**
+ * ivc_hv_ivc_read_peek - Peek (copying) data from a received frame
+ * @ivck       IVC cookie of the queue
+ * @buf                Buffer to receive the data
+ * @off                Offset in the frame
+ * @count      Count of bytes to copy
+ *
+ * Peek data from a received frame, copying to buf, without removing
+ * the frame from the queue.
+ *
+ * Returns 0 on success, a negative error code otherwise
+ */
+int tegra_hv_ivc_read_peek(struct tegra_hv_ivc_cookie *ivck,
+               void *buf, int off, int count);
+
+/**
+ * ivc_hv_ivc_read_get_next_frame - Peek at the next frame to receive
+ * @ivck       IVC cookie of the queue
+ *
+ * Peek at the next frame to be received, without removing it from
+ * the queue.
+ *
+ * Returns a pointer to the frame, or an error encoded pointer.
+ */
+void *tegra_hv_ivc_read_get_next_frame(struct tegra_hv_ivc_cookie *ivck);
+
+/**
+ * ivc_hv_ivc_read_advance - Advance the read queue
+ * @ivck       IVC cookie of the queue
+ *
+ * Advance the read queue
+ *
+ * Returns 0, or a negative error value if failed.
+ */
+int tegra_hv_ivc_read_advance(struct tegra_hv_ivc_cookie *ivck);
+
+/**
+ * ivc_hv_ivc_write_poke - Poke data to a frame to be transmitted
+ * @ivck       IVC cookie of the queue
+ * @buf                Buffer to the data
+ * @off                Offset in the frame
+ * @count      Count of bytes to copy
+ *
+ * Copy data to a transmit frame, copying from buf, without advancing
+ * the the transmit queue.
+ *
+ * Returns 0 on success, a negative error code otherwise
+ */
+int tegra_hv_ivc_write_poke(struct tegra_hv_ivc_cookie *ivck,
+               const void *buf, int off, int count);
+
+/**
+ * ivc_hv_ivc_write_get_next_frame - Poke at the next frame to transmit
+ * @ivck       IVC cookie of the queue
+ *
+ * Get access to the next frame.
+ *
+ * Returns a pointer to the frame, or an error encoded pointer.
+ */
+void *tegra_hv_ivc_write_get_next_frame(struct tegra_hv_ivc_cookie *ivck);
+
+/**
+ * ivc_hv_ivc_write_advance - Advance the write queue
+ * @ivck       IVC cookie of the queue
+ *
+ * Advance the write queue
+ *
+ * Returns 0, or a negative error value if failed.
+ */
+int tegra_hv_ivc_write_advance(struct tegra_hv_ivc_cookie *ivck);
+
+#endif