media: video: tegra: add support for the AVP media offload engine
Dima Zavin [Mon, 1 Nov 2010 21:24:21 +0000 (14:24 -0700)]
Change-Id: Ia72e022ad1217ffe75915465ca0e886b16d1a64a
Signed-off-by: Dima Zavin <dima@android.com>

drivers/media/video/tegra/Kconfig
drivers/media/video/tegra/Makefile
drivers/media/video/tegra/avp/Kconfig
drivers/media/video/tegra/avp/Makefile
drivers/media/video/tegra/avp/avp.c [new file with mode: 0644]
drivers/media/video/tegra/avp/avp.h [new file with mode: 0644]
drivers/media/video/tegra/avp/avp_msg.h [new file with mode: 0644]
drivers/media/video/tegra/avp/avp_svc.c [new file with mode: 0644]
drivers/media/video/tegra/avp/headavp.S [new file with mode: 0644]
drivers/media/video/tegra/avp/headavp.h [new file with mode: 0644]
include/linux/tegra_avp.h [new file with mode: 0644]

index ce31c4e..ae77e89 100644 (file)
@@ -1,3 +1,5 @@
+source "drivers/media/video/tegra/avp/Kconfig"
+
 config TEGRA_CAMERA
         bool "Enable support for tegra camera/isp hardware"
         depends on ARCH_TEGRA
@@ -6,6 +8,3 @@ config TEGRA_CAMERA
           Enables support for the Tegra camera interface
 
           If unsure, say Y
-
-
-source "drivers/media/video/tegra/avp/Kconfig"
index 4cb8e3c..68b5c42 100644 (file)
@@ -1,2 +1,2 @@
-obj-$(CONFIG_TEGRA_CAMERA)             += tegra_camera.o
 obj-y                          += avp/
+obj-$(CONFIG_TEGRA_CAMERA)             += tegra_camera.o
index dca58da..fdd2085 100644 (file)
@@ -13,3 +13,13 @@ config TEGRA_RPC
          the tegra multimedia framework.
 
          If unsure, say Y
+
+config TEGRA_AVP
+       bool "Enable support for the AVP multimedia offload engine"
+       depends on ARCH_TEGRA && TEGRA_RPC
+       default y
+       help
+         Enables support for the multimedia offload engine used by Tegra
+         multimedia framework.
+
+         If unsure, say Y
index 438cbb7..6d8be11 100644 (file)
@@ -1,3 +1,6 @@
 obj-$(CONFIG_TEGRA_RPC)                += tegra_rpc.o
 obj-$(CONFIG_TEGRA_RPC)                += trpc_local.o
 obj-$(CONFIG_TEGRA_RPC)                += trpc_sema.o
+obj-$(CONFIG_TEGRA_AVP)                += avp.o
+obj-$(CONFIG_TEGRA_AVP)                += avp_svc.o
+obj-$(CONFIG_TEGRA_AVP)                += headavp.o
diff --git a/drivers/media/video/tegra/avp/avp.c b/drivers/media/video/tegra/avp/avp.c
new file mode 100644 (file)
index 0000000..de6034b
--- /dev/null
@@ -0,0 +1,1683 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioctl.h>
+#include <linux/irq.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/tegra_rpc.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+
+#include <mach/clk.h>
+#include <mach/io.h>
+#include <mach/iomap.h>
+#include <mach/nvmap.h>
+
+#include "../../../../video/tegra/nvmap/nvmap.h"
+
+#include "headavp.h"
+#include "avp_msg.h"
+#include "trpc.h"
+#include "avp.h"
+
+u32 avp_debug_mask = (AVP_DBG_TRACE_TRPC_CONN |
+                     AVP_DBG_TRACE_XPC_CONN |
+                     AVP_DBG_TRACE_LIB);
+module_param_named(debug_mask, avp_debug_mask, uint, S_IWUSR | S_IRUGO);
+
+#define TEGRA_AVP_NAME                 "tegra-avp"
+
+#define TEGRA_AVP_KERNEL_FW            "nvrm_avp.bin"
+
+#define TEGRA_AVP_RESET_VECTOR_ADDR    \
+               (IO_ADDRESS(TEGRA_EXCEPTION_VECTORS_BASE) + 0x200)
+
+#define TEGRA_AVP_RESUME_ADDR          IO_ADDRESS(TEGRA_IRAM_BASE)
+
+#define FLOW_CTRL_HALT_COP_EVENTS      IO_ADDRESS(TEGRA_FLOW_CTRL_BASE + 0x4)
+#define FLOW_MODE_STOP                 (0x2 << 29)
+#define FLOW_MODE_NONE                 0x0
+
+#define MBOX_FROM_AVP                  IO_ADDRESS(TEGRA_RES_SEMA_BASE + 0x10)
+#define MBOX_TO_AVP                    IO_ADDRESS(TEGRA_RES_SEMA_BASE + 0x20)
+
+/* Layout of the mailbox registers:
+ * bit 31      - pending message interrupt enable (mailbox full, i.e. valid=1)
+ * bit 30      - message cleared interrupt enable (mailbox empty, i.e. valid=0)
+ * bit 29      - message valid. peer clears this bit after reading msg
+ * bits 27:0   - message data
+ */
+#define MBOX_MSG_PENDING_INT_EN                (1 << 31)
+#define MBOX_MSG_READ_INT_EN           (1 << 30)
+#define MBOX_MSG_VALID                 (1 << 29)
+
+#define AVP_MSG_MAX_CMD_LEN            16
+#define AVP_MSG_AREA_SIZE      (AVP_MSG_MAX_CMD_LEN + TEGRA_RPC_MAX_MSG_LEN)
+
+struct avp_info {
+       struct clk                      *cop_clk;
+
+       int                             mbox_from_avp_pend_irq;
+
+       dma_addr_t                      msg_area_addr;
+       u32                             msg;
+       void                            *msg_to_avp;
+       void                            *msg_from_avp;
+       struct mutex                    to_avp_lock;
+       struct mutex                    from_avp_lock;
+
+       struct work_struct              recv_work;
+       struct workqueue_struct         *recv_wq;
+
+       struct trpc_node                *rpc_node;
+       struct miscdevice               misc_dev;
+       bool                            opened;
+       struct mutex                    open_lock;
+
+       spinlock_t                      state_lock;
+       bool                            initialized;
+       bool                            shutdown;
+       bool                            suspending;
+       bool                            defer_remote;
+
+       struct mutex                    libs_lock;
+       struct list_head                libs;
+       struct nvmap_client             *nvmap_libs;
+
+       /* client for driver allocations, persistent */
+       struct nvmap_client             *nvmap_drv;
+       struct nvmap_handle_ref         *kernel_handle;
+       void                            *kernel_data;
+       unsigned long                   kernel_phys;
+
+       struct nvmap_handle_ref         *iram_backup_handle;
+       void                            *iram_backup_data;
+       unsigned long                   iram_backup_phys;
+       unsigned long                   resume_addr;
+
+       struct trpc_endpoint            *avp_ep;
+       struct rb_root                  endpoints;
+
+       struct avp_svc_info             *avp_svc;
+};
+
+struct remote_info {
+       u32                             loc_id;
+       u32                             rem_id;
+       struct kref                     ref;
+
+       struct trpc_endpoint            *trpc_ep;
+       struct rb_node                  rb_node;
+};
+
+struct lib_item {
+       struct list_head                list;
+       u32                             handle;
+       char                            name[TEGRA_AVP_LIB_MAX_NAME];
+};
+
+static struct avp_info *tegra_avp;
+
+static int avp_trpc_send(struct trpc_endpoint *ep, void *buf, size_t len);
+static void avp_trpc_close(struct trpc_endpoint *ep);
+static void libs_cleanup(struct avp_info *avp);
+
+static struct trpc_ep_ops remote_ep_ops = {
+       .send   = avp_trpc_send,
+       .close  = avp_trpc_close,
+};
+
+static struct remote_info *rinfo_alloc(struct avp_info *avp)
+{
+       struct remote_info *rinfo;
+
+       rinfo = kzalloc(sizeof(struct remote_info), GFP_KERNEL);
+       if (!rinfo)
+               return NULL;
+       kref_init(&rinfo->ref);
+       return rinfo;
+}
+
+static void _rinfo_release(struct kref *ref)
+{
+       struct remote_info *rinfo = container_of(ref, struct remote_info, ref);
+       kfree(rinfo);
+}
+
+static inline void rinfo_get(struct remote_info *rinfo)
+{
+       kref_get(&rinfo->ref);
+}
+
+static inline void rinfo_put(struct remote_info *rinfo)
+{
+       kref_put(&rinfo->ref, _rinfo_release);
+}
+
+static int remote_insert(struct avp_info *avp, struct remote_info *rinfo)
+{
+       struct rb_node **p;
+       struct rb_node *parent;
+       struct remote_info *tmp;
+
+       p = &avp->endpoints.rb_node;
+       parent = NULL;
+       while (*p) {
+               parent = *p;
+               tmp = rb_entry(parent, struct remote_info, rb_node);
+
+               if (rinfo->loc_id < tmp->loc_id)
+                       p = &(*p)->rb_left;
+               else if (rinfo->loc_id > tmp->loc_id)
+                       p = &(*p)->rb_right;
+               else {
+                       pr_info("%s: avp endpoint id=%x (%s) already exists\n",
+                               __func__, rinfo->loc_id,
+                               trpc_name(rinfo->trpc_ep));
+                       return -EEXIST;
+               }
+       }
+       rb_link_node(&rinfo->rb_node, parent, p);
+       rb_insert_color(&rinfo->rb_node, &avp->endpoints);
+       rinfo_get(rinfo);
+       return 0;
+}
+
+static struct remote_info *remote_find(struct avp_info *avp, u32 local_id)
+{
+       struct rb_node *n = avp->endpoints.rb_node;
+       struct remote_info *rinfo;
+
+       while (n) {
+               rinfo = rb_entry(n, struct remote_info, rb_node);
+
+               if (local_id < rinfo->loc_id)
+                       n = n->rb_left;
+               else if (local_id > rinfo->loc_id)
+                       n = n->rb_right;
+               else
+                       return rinfo;
+       }
+       return NULL;
+}
+
+static void remote_remove(struct avp_info *avp, struct remote_info *rinfo)
+{
+       rb_erase(&rinfo->rb_node, &avp->endpoints);
+       rinfo_put(rinfo);
+}
+
+/* test whether or not the trpc endpoint provided is a valid AVP node
+ * endpoint */
+static struct remote_info *validate_trpc_ep(struct avp_info *avp,
+                                           struct trpc_endpoint *ep)
+{
+       struct remote_info *tmp = trpc_priv(ep);
+       struct remote_info *rinfo;
+
+       if (!tmp)
+               return NULL;
+       rinfo = remote_find(avp, tmp->loc_id);
+       if (rinfo && rinfo == tmp && rinfo->trpc_ep == ep)
+               return rinfo;
+       return NULL;
+}
+
+static inline void mbox_writel(u32 val, void __iomem *mbox)
+{
+       writel(val, mbox);
+}
+
+static inline u32 mbox_readl(void __iomem *mbox)
+{
+       return readl(mbox);
+}
+
+static inline void msg_ack_remote(struct avp_info *avp, u32 cmd, u32 arg)
+{
+       struct msg_ack *ack = avp->msg_from_avp;
+
+       /* must make sure the arg is there first */
+       ack->arg = arg;
+       wmb();
+       ack->cmd = cmd;
+       wmb();
+}
+
+static inline u32 msg_recv_get_cmd(struct avp_info *avp)
+{
+       volatile u32 *cmd = avp->msg_from_avp;
+       rmb();
+       return *cmd;
+}
+
+static inline int __msg_write(struct avp_info *avp, void *hdr, size_t hdr_len,
+                             void *buf, size_t len)
+{
+       memcpy(avp->msg_to_avp, hdr, hdr_len);
+       if (buf && len)
+               memcpy(avp->msg_to_avp + hdr_len, buf, len);
+       mbox_writel(avp->msg, MBOX_TO_AVP);
+       return 0;
+}
+
+static inline int msg_write(struct avp_info *avp, void *hdr, size_t hdr_len,
+                           void *buf, size_t len)
+{
+       /* rem_ack is a pointer into shared memory that the AVP modifies */
+       volatile u32 *rem_ack = avp->msg_to_avp;
+       unsigned long endtime = jiffies + HZ;
+
+       /* the other side ack's the message by clearing the first word,
+        * wait for it to do so */
+       rmb();
+       while (*rem_ack != 0 && time_before(jiffies, endtime)) {
+               usleep_range(100, 2000);
+               rmb();
+       }
+       if (*rem_ack != 0)
+               return -ETIMEDOUT;
+       __msg_write(avp, hdr, hdr_len, buf, len);
+       return 0;
+}
+
+static inline int msg_check_ack(struct avp_info *avp, u32 cmd, u32 *arg)
+{
+       struct msg_ack ack;
+
+       rmb();
+       memcpy(&ack, avp->msg_to_avp, sizeof(ack));
+       if (ack.cmd != cmd)
+               return -ENOENT;
+       if (arg)
+               *arg = ack.arg;
+       return 0;
+}
+
+/* XXX: add timeout */
+static int msg_wait_ack_locked(struct avp_info *avp, u32 cmd, u32 *arg)
+{
+       /* rem_ack is a pointer into shared memory that the AVP modifies */
+       volatile u32 *rem_ack = avp->msg_to_avp;
+       unsigned long endtime = jiffies + HZ / 5;
+       int ret;
+
+       do {
+               ret = msg_check_ack(avp, cmd, arg);
+               usleep_range(1000, 5000);
+       } while (ret && time_before(jiffies, endtime));
+
+       /* clear out the ack */
+       *rem_ack = 0;
+       wmb();
+       return ret;
+}
+
+static int avp_trpc_send(struct trpc_endpoint *ep, void *buf, size_t len)
+{
+       struct avp_info *avp = tegra_avp;
+       struct remote_info *rinfo;
+       struct msg_port_data msg;
+       int ret;
+       unsigned long flags;
+
+       DBG(AVP_DBG_TRACE_TRPC_MSG, "%s: ep=%p priv=%p buf=%p len=%d\n",
+           __func__, ep, trpc_priv(ep), buf, len);
+
+       spin_lock_irqsave(&avp->state_lock, flags);
+       if (unlikely(avp->suspending && trpc_peer(ep) != avp->avp_ep)) {
+               ret = -EBUSY;
+               goto err_state_locked;
+       } else if (avp->shutdown) {
+               ret = -ENODEV;
+               goto err_state_locked;
+       }
+       rinfo = validate_trpc_ep(avp, ep);
+       if (!rinfo) {
+               ret = -ENOTTY;
+               goto err_state_locked;
+       }
+       rinfo_get(rinfo);
+       spin_unlock_irqrestore(&avp->state_lock, flags);
+
+       msg.cmd = CMD_MESSAGE;
+       msg.port_id = rinfo->rem_id;
+       msg.msg_len = len;
+
+       mutex_lock(&avp->to_avp_lock);
+       ret = msg_write(avp, &msg, sizeof(msg), buf, len);
+       mutex_unlock(&avp->to_avp_lock);
+
+       DBG(AVP_DBG_TRACE_TRPC_MSG, "%s: msg sent for %s (%x->%x) (%d)\n",
+           __func__, trpc_name(ep), rinfo->loc_id, rinfo->rem_id, ret);
+       rinfo_put(rinfo);
+       return ret;
+
+err_state_locked:
+       spin_unlock_irqrestore(&avp->state_lock, flags);
+       return ret;
+}
+
+static int _send_disconnect(struct avp_info *avp, u32 port_id)
+{
+       struct msg_disconnect msg;
+       int ret;
+
+       msg.cmd = CMD_DISCONNECT;
+       msg.port_id = port_id;
+
+       mutex_lock(&avp->to_avp_lock);
+       ret = msg_write(avp, &msg, sizeof(msg), NULL, 0);
+       mutex_unlock(&avp->to_avp_lock);
+
+       DBG(AVP_DBG_TRACE_XPC_CONN, "%s: sent disconnect msg for 0x%x\n",
+           __func__, port_id);
+       return ret;
+}
+
+/* Note: Assumes that the rinfo was previously successfully added to the
+ * endpoints rb_tree. The initial refcnt of 1 is inherited by the port when the
+ * trpc endpoint is created with thi trpc_xxx functions. Thus, on close,
+ * we must drop that reference here.
+ * The avp->endpoints rb_tree keeps its own reference on rinfo objects.
+ *
+ * The try_connect function does not use this on error because it needs to
+ * split the close of trpc_ep port and the put.
+ */
+static inline void remote_close(struct remote_info *rinfo)
+{
+       trpc_close(rinfo->trpc_ep);
+       rinfo_put(rinfo);
+}
+
+static void avp_trpc_close(struct trpc_endpoint *ep)
+{
+       struct avp_info *avp = tegra_avp;
+       struct remote_info *rinfo;
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&avp->state_lock, flags);
+       if (avp->shutdown) {
+               spin_unlock_irqrestore(&avp->state_lock, flags);
+               return;
+       }
+
+       rinfo = validate_trpc_ep(avp, ep);
+       if (!rinfo) {
+               pr_err("%s: tried to close invalid port '%s' endpoint (%p)\n",
+                      __func__, trpc_name(ep), ep);
+               spin_unlock_irqrestore(&avp->state_lock, flags);
+               return;
+       }
+       rinfo_get(rinfo);
+       remote_remove(avp, rinfo);
+       spin_unlock_irqrestore(&avp->state_lock, flags);
+
+       DBG(AVP_DBG_TRACE_TRPC_CONN, "%s: closing '%s' (%x)\n", __func__,
+           trpc_name(ep), rinfo->rem_id);
+
+       ret = _send_disconnect(avp, rinfo->rem_id);
+       if (ret)
+               pr_err("%s: error while closing remote port '%s' (%x)\n",
+                      __func__, trpc_name(ep), rinfo->rem_id);
+       remote_close(rinfo);
+       rinfo_put(rinfo);
+}
+
+/* takes and holds avp->from_avp_lock */
+static void recv_msg_lock(struct avp_info *avp)
+{
+       unsigned long flags;
+
+       mutex_lock(&avp->from_avp_lock);
+       spin_lock_irqsave(&avp->state_lock, flags);
+       avp->defer_remote = true;
+       spin_unlock_irqrestore(&avp->state_lock, flags);
+}
+
+/* MUST be called with avp->from_avp_lock held */
+static void recv_msg_unlock(struct avp_info *avp)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&avp->state_lock, flags);
+       avp->defer_remote = false;
+       spin_unlock_irqrestore(&avp->state_lock, flags);
+       mutex_unlock(&avp->from_avp_lock);
+}
+
+static int avp_node_try_connect(struct trpc_node *node,
+                               struct trpc_node *src_node,
+                               struct trpc_endpoint *from)
+{
+       struct avp_info *avp = tegra_avp;
+       const char *port_name = trpc_name(from);
+       struct remote_info *rinfo;
+       struct msg_connect msg;
+       int ret;
+       unsigned long flags;
+       int len;
+
+       DBG(AVP_DBG_TRACE_TRPC_CONN, "%s: trying connect from %s\n", __func__,
+           port_name);
+
+       if (node != avp->rpc_node || node->priv != avp)
+               return -ENODEV;
+
+       len = strlen(port_name);
+       if (len > XPC_PORT_NAME_LEN) {
+               pr_err("%s: port name (%s) to long\n", __func__, port_name);
+               return -EINVAL;
+       }
+
+       ret = 0;
+       spin_lock_irqsave(&avp->state_lock, flags);
+       if (avp->suspending) {
+               ret = -EBUSY;
+       } else if (likely(src_node != avp->rpc_node)) {
+               /* only check for initialized when the source is not ourselves
+                * since we'll end up calling into here during initialization */
+               if (!avp->initialized)
+                       ret = -ENODEV;
+       } else if (strncmp(port_name, "RPC_AVP_PORT", XPC_PORT_NAME_LEN)) {
+               /* we only allow connections to ourselves for the cpu-to-avp
+                  port */
+               ret = -EINVAL;
+       }
+       spin_unlock_irqrestore(&avp->state_lock, flags);
+       if (ret)
+               return ret;
+
+       rinfo = rinfo_alloc(avp);
+       if (!rinfo) {
+               pr_err("%s: cannot alloc mem for rinfo\n", __func__);
+               ret = -ENOMEM;
+               goto err_alloc_rinfo;
+       }
+       rinfo->loc_id = (u32)rinfo;
+
+       msg.cmd = CMD_CONNECT;
+       msg.port_id = rinfo->loc_id;
+       memcpy(msg.name, port_name, len);
+       memset(msg.name + len, 0, XPC_PORT_NAME_LEN - len);
+
+       /* when trying to connect to remote, we need to block remote
+        * messages until we get our ack and can insert it into our lists.
+        * Otherwise, we can get a message from the other side for a port
+        * that we haven't finished setting up.
+        *
+        * 'defer_remote' will force the irq handler to not process messages
+        * at irq context but to schedule work to do so. The work function will
+        * take the from_avp_lock and everything should stay consistent.
+        */
+       recv_msg_lock(avp);
+       mutex_lock(&avp->to_avp_lock);
+       ret = msg_write(avp, &msg, sizeof(msg), NULL, 0);
+       if (ret) {
+               pr_err("%s: remote has not acked last message (%s)\n", __func__,
+                      port_name);
+               mutex_unlock(&avp->to_avp_lock);
+               goto err_msg_write;
+       }
+       ret = msg_wait_ack_locked(avp, CMD_RESPONSE, &rinfo->rem_id);
+       mutex_unlock(&avp->to_avp_lock);
+
+       if (ret) {
+               pr_err("%s: remote end won't respond for '%s'\n", __func__,
+                      port_name);
+               goto err_wait_ack;
+       }
+       if (!rinfo->rem_id) {
+               pr_err("%s: can't connect to '%s'\n", __func__, port_name);
+               ret = -ECONNREFUSED;
+               goto err_nack;
+       }
+
+       DBG(AVP_DBG_TRACE_TRPC_CONN, "%s: got conn ack '%s' (%x <-> %x)\n",
+           __func__, port_name, rinfo->loc_id, rinfo->rem_id);
+
+       rinfo->trpc_ep = trpc_create_peer(node, from, &remote_ep_ops,
+                                              rinfo);
+       if (!rinfo->trpc_ep) {
+               pr_err("%s: cannot create peer for %s\n", __func__, port_name);
+               ret = -EINVAL;
+               goto err_create_peer;
+       }
+
+       spin_lock_irqsave(&avp->state_lock, flags);
+       ret = remote_insert(avp, rinfo);
+       spin_unlock_irqrestore(&avp->state_lock, flags);
+       if (ret)
+               goto err_ep_insert;
+
+       recv_msg_unlock(avp);
+       return 0;
+
+err_ep_insert:
+       trpc_close(rinfo->trpc_ep);
+err_create_peer:
+       _send_disconnect(avp, rinfo->rem_id);
+err_nack:
+err_wait_ack:
+err_msg_write:
+       recv_msg_unlock(avp);
+       rinfo_put(rinfo);
+err_alloc_rinfo:
+       return ret;
+}
+
+static void process_disconnect_locked(struct avp_info *avp,
+                                     struct msg_data *raw_msg)
+{
+       struct msg_disconnect *disconn_msg = (struct msg_disconnect *)raw_msg;
+       unsigned long flags;
+       struct remote_info *rinfo;
+
+       DBG(AVP_DBG_TRACE_XPC_CONN, "%s: got disconnect (%x)\n", __func__,
+           disconn_msg->port_id);
+
+       if (avp_debug_mask & AVP_DBG_TRACE_XPC_MSG)
+               print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, disconn_msg,
+                                    sizeof(struct msg_disconnect));
+
+       spin_lock_irqsave(&avp->state_lock, flags);
+       rinfo = remote_find(avp, disconn_msg->port_id);
+       if (!rinfo) {
+               spin_unlock_irqrestore(&avp->state_lock, flags);
+               pr_warning("%s: got disconnect for unknown port 0x%x\n",
+                          __func__, disconn_msg->port_id);
+               goto ack;
+       }
+       rinfo_get(rinfo);
+       remote_remove(avp, rinfo);
+       spin_unlock_irqrestore(&avp->state_lock, flags);
+
+       remote_close(rinfo);
+       rinfo_put(rinfo);
+ack:
+       msg_ack_remote(avp, CMD_ACK, 0);
+}
+
+static void process_connect_locked(struct avp_info *avp,
+                                  struct msg_data *raw_msg)
+{
+       struct msg_connect *conn_msg = (struct msg_connect *)raw_msg;
+       struct trpc_endpoint *trpc_ep;
+       struct remote_info *rinfo;
+       char name[XPC_PORT_NAME_LEN + 1];
+       int ret;
+       u32 local_port_id = 0;
+       unsigned long flags;
+
+       DBG(AVP_DBG_TRACE_XPC_CONN, "%s: got connect (%x)\n", __func__,
+           conn_msg->port_id);
+       if (avp_debug_mask & AVP_DBG_TRACE_XPC_MSG)
+               print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
+                                    conn_msg, sizeof(struct msg_connect));
+
+       rinfo = rinfo_alloc(avp);
+       if (!rinfo) {
+               pr_err("%s: cannot alloc mem for rinfo\n", __func__);
+               ret = -ENOMEM;
+               goto ack;
+       }
+       rinfo->loc_id = (u32)rinfo;
+       rinfo->rem_id = conn_msg->port_id;
+
+       memcpy(name, conn_msg->name, XPC_PORT_NAME_LEN);
+       name[XPC_PORT_NAME_LEN] = '\0';
+       trpc_ep = trpc_create_connect(avp->rpc_node, name, &remote_ep_ops,
+                                     rinfo, 0);
+       if (IS_ERR(trpc_ep)) {
+               pr_err("%s: remote requested unknown port '%s' (%d)\n",
+                      __func__, name, (int)PTR_ERR(trpc_ep));
+               goto nack;
+       }
+       rinfo->trpc_ep = trpc_ep;
+
+       spin_lock_irqsave(&avp->state_lock, flags);
+       ret = remote_insert(avp, rinfo);
+       spin_unlock_irqrestore(&avp->state_lock, flags);
+       if (ret)
+               goto err_ep_insert;
+
+       local_port_id = rinfo->loc_id;
+       goto ack;
+
+err_ep_insert:
+       trpc_close(trpc_ep);
+nack:
+       rinfo_put(rinfo);
+       local_port_id = 0;
+ack:
+       msg_ack_remote(avp, CMD_RESPONSE, local_port_id);
+}
+
+static int process_message(struct avp_info *avp, struct msg_data *raw_msg,
+                           gfp_t gfp_flags)
+{
+       struct msg_port_data *port_msg = (struct msg_port_data *)raw_msg;
+       struct remote_info *rinfo;
+       unsigned long flags;
+       int len;
+       int ret;
+
+       len = min(port_msg->msg_len, (u32)TEGRA_RPC_MAX_MSG_LEN);
+
+       if (avp_debug_mask & AVP_DBG_TRACE_XPC_MSG) {
+               pr_info("%s: got message cmd=%x port=%x len=%d\n", __func__,
+                       port_msg->cmd, port_msg->port_id, port_msg->msg_len);
+               print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, port_msg,
+                                    sizeof(struct msg_port_data) + len);
+       }
+
+       if (len != port_msg->msg_len)
+               pr_err("%s: message sent is too long (%d bytes)\n", __func__,
+                      port_msg->msg_len);
+
+       spin_lock_irqsave(&avp->state_lock, flags);
+       rinfo = remote_find(avp, port_msg->port_id);
+       if (rinfo) {
+               rinfo_get(rinfo);
+               trpc_get(rinfo->trpc_ep);
+       } else {
+               pr_err("%s: port %x not found\n", __func__, port_msg->port_id);
+               spin_unlock_irqrestore(&avp->state_lock, flags);
+               ret = -ENOENT;
+               goto ack;
+       }
+       spin_unlock_irqrestore(&avp->state_lock, flags);
+
+       ret = trpc_send_msg(avp->rpc_node, rinfo->trpc_ep, port_msg->data,
+                                len, gfp_flags);
+       if (ret == -ENOMEM) {
+               trpc_put(rinfo->trpc_ep);
+               rinfo_put(rinfo);
+               goto no_ack;
+       } else if (ret) {
+               pr_err("%s: cannot queue message for port %s/%x (%d)\n",
+                      __func__, trpc_name(rinfo->trpc_ep), rinfo->loc_id,
+                      ret);
+       } else {
+               DBG(AVP_DBG_TRACE_XPC_MSG, "%s: msg queued\n", __func__);
+       }
+
+       trpc_put(rinfo->trpc_ep);
+       rinfo_put(rinfo);
+ack:
+       msg_ack_remote(avp, CMD_ACK, 0);
+no_ack:
+       return ret;
+}
+
+static void process_avp_message(struct work_struct *work)
+{
+       struct avp_info *avp = container_of(work, struct avp_info, recv_work);
+       struct msg_data *msg = avp->msg_from_avp;
+
+       mutex_lock(&avp->from_avp_lock);
+       rmb();
+       switch (msg->cmd) {
+       case CMD_CONNECT:
+               process_connect_locked(avp, msg);
+               break;
+       case CMD_DISCONNECT:
+               process_disconnect_locked(avp, msg);
+               break;
+       case CMD_MESSAGE:
+               process_message(avp, msg, GFP_KERNEL);
+               break;
+       default:
+               pr_err("%s: unknown cmd (%x) received\n", __func__, msg->cmd);
+               break;
+       }
+       mutex_unlock(&avp->from_avp_lock);
+}
+
+static irqreturn_t avp_mbox_pending_isr(int irq, void *data)
+{
+       struct avp_info *avp = data;
+       struct msg_data *msg = avp->msg_from_avp;
+       u32 mbox_msg;
+       unsigned long flags;
+       int ret;
+
+       mbox_msg = mbox_readl(MBOX_FROM_AVP);
+       mbox_writel(0, MBOX_FROM_AVP);
+
+       DBG(AVP_DBG_TRACE_XPC_IRQ, "%s: got msg %x\n", __func__, mbox_msg);
+
+       /* XXX: re-use previous message? */
+       if (!(mbox_msg & MBOX_MSG_VALID)) {
+               WARN_ON(1);
+               goto done;
+       }
+
+       mbox_msg <<= 4;
+       if (mbox_msg == 0x2f00bad0UL) {
+               pr_info("%s: petting watchdog\n", __func__);
+               goto done;
+       }
+
+       spin_lock_irqsave(&avp->state_lock, flags);
+       if (avp->shutdown) {
+               spin_unlock_irqrestore(&avp->state_lock, flags);
+               goto done;
+       } else if (avp->defer_remote) {
+               spin_unlock_irqrestore(&avp->state_lock, flags);
+               goto defer;
+       }
+       spin_unlock_irqrestore(&avp->state_lock, flags);
+
+       rmb();
+       if (msg->cmd == CMD_MESSAGE) {
+               ret = process_message(avp, msg, GFP_ATOMIC);
+               if (ret != -ENOMEM)
+                       goto done;
+               pr_info("%s: deferring message (%d)\n", __func__, ret);
+       }
+defer:
+       queue_work(avp->recv_wq, &avp->recv_work);
+done:
+       return IRQ_HANDLED;
+}
+
+static int avp_reset(struct avp_info *avp, unsigned long reset_addr)
+{
+       unsigned long stub_code_phys = virt_to_phys(_tegra_avp_boot_stub);
+       dma_addr_t stub_data_phys;
+       unsigned long timeout;
+       int ret = 0;
+
+       writel(FLOW_MODE_STOP, FLOW_CTRL_HALT_COP_EVENTS);
+
+       _tegra_avp_boot_stub_data.map_phys_addr = avp->kernel_phys;
+       _tegra_avp_boot_stub_data.jump_addr = reset_addr;
+       wmb();
+       stub_data_phys = dma_map_single(NULL, &_tegra_avp_boot_stub_data,
+                                       sizeof(_tegra_avp_boot_stub_data),
+                                       DMA_TO_DEVICE);
+
+       writel(stub_code_phys, TEGRA_AVP_RESET_VECTOR_ADDR);
+
+       tegra_periph_reset_assert(avp->cop_clk);
+       udelay(10);
+       tegra_periph_reset_deassert(avp->cop_clk);
+
+       writel(FLOW_MODE_NONE, FLOW_CTRL_HALT_COP_EVENTS);
+
+       /* the AVP firmware will reprogram its reset vector as the kernel
+        * starts, so a dead kernel can be detected by polling this value */
+       timeout = jiffies + msecs_to_jiffies(2000);
+       while (time_before(jiffies, timeout)) {
+               if (readl(TEGRA_AVP_RESET_VECTOR_ADDR) != stub_code_phys)
+                       break;
+               cpu_relax();
+       }
+       if (readl(TEGRA_AVP_RESET_VECTOR_ADDR) == stub_code_phys)
+               ret = -EINVAL;
+       WARN_ON(ret);
+       dma_unmap_single(NULL, stub_data_phys,
+                        sizeof(_tegra_avp_boot_stub_data),
+                        DMA_TO_DEVICE);
+       return ret;
+}
+
+static void avp_halt(struct avp_info *avp)
+{
+       /* ensure the AVP is halted */
+       writel(FLOW_MODE_STOP, FLOW_CTRL_HALT_COP_EVENTS);
+       tegra_periph_reset_assert(avp->cop_clk);
+
+       /* set up the initial memory areas and mailbox contents */
+       *((u32 *)avp->msg_from_avp) = 0;
+       *((u32 *)avp->msg_to_avp) = 0xfeedf00d;
+       mbox_writel(0, MBOX_FROM_AVP);
+       mbox_writel(0, MBOX_TO_AVP);
+}
+
+/* Note: CPU_PORT server and AVP_PORT client are registered with the avp
+ * node, but are actually meant to be processed on our side (either
+ * by the svc thread for processing remote calls or by the client
+ * of the char dev for receiving replies for managing remote
+ * libraries/modules. */
+
+static int avp_init(struct avp_info *avp, const char *fw_file)
+{
+       const struct firmware *avp_fw;
+       int ret;
+       struct trpc_endpoint *ep;
+
+       avp->nvmap_libs = nvmap_create_client(nvmap_dev, "avp_libs");
+       if (IS_ERR(avp->nvmap_libs)) {
+               pr_err("%s: cannot create libs nvmap client\n", __func__);
+               ret = PTR_ERR(avp->nvmap_libs);
+               goto err_nvmap_create_libs_client;
+       }
+
+       /* put the address of the shared mem area into the mailbox for AVP
+        * to read out when its kernel boots. */
+       mbox_writel(avp->msg, MBOX_TO_AVP);
+
+       ret = request_firmware(&avp_fw, fw_file, avp->misc_dev.this_device);
+       if (ret) {
+               pr_err("%s: Cannot read firmware '%s'\n", __func__, fw_file);
+               goto err_req_fw;
+       }
+       pr_info("%s: read firmware from '%s' (%d bytes)\n", __func__,
+               fw_file, avp_fw->size);
+       memcpy(avp->kernel_data, avp_fw->data, avp_fw->size);
+       memset(avp->kernel_data + avp_fw->size, 0, SZ_1M - avp_fw->size);
+       wmb();
+       release_firmware(avp_fw);
+
+       ret = avp_reset(avp, AVP_KERNEL_VIRT_BASE);
+       if (ret) {
+               pr_err("%s: cannot reset the AVP.. aborting..\n", __func__);
+               goto err_reset;
+       }
+
+       enable_irq(avp->mbox_from_avp_pend_irq);
+       /* Initialize the avp_svc *first*. This creates RPC_CPU_PORT to be
+        * ready for remote commands. Then, connect to the
+        * remote RPC_AVP_PORT to be able to send library load/unload and
+        * suspend commands to it */
+       ret = avp_svc_start(avp->avp_svc);
+       if (ret)
+               goto err_avp_svc_start;
+
+       ep = trpc_create_connect(avp->rpc_node, "RPC_AVP_PORT", NULL,
+                                     NULL, -1);
+       if (IS_ERR(ep)) {
+               pr_err("%s: can't connect to RPC_AVP_PORT server\n", __func__);
+               ret = PTR_ERR(ep);
+               goto err_rpc_avp_port;
+       }
+       avp->avp_ep = ep;
+
+       avp->initialized = true;
+       smp_wmb();
+       pr_info("%s: avp init done\n", __func__);
+       return 0;
+
+err_rpc_avp_port:
+       avp_svc_stop(avp->avp_svc);
+err_avp_svc_start:
+       disable_irq(avp->mbox_from_avp_pend_irq);
+err_reset:
+       avp_halt(avp);
+err_req_fw:
+       nvmap_client_put(avp->nvmap_libs);
+err_nvmap_create_libs_client:
+       avp->nvmap_libs = NULL;
+       return ret;
+}
+
+static void avp_uninit(struct avp_info *avp)
+{
+       unsigned long flags;
+       struct rb_node *n;
+       struct remote_info *rinfo;
+
+       spin_lock_irqsave(&avp->state_lock, flags);
+       avp->initialized = false;
+       avp->shutdown = true;
+       spin_unlock_irqrestore(&avp->state_lock, flags);
+
+       disable_irq(avp->mbox_from_avp_pend_irq);
+       cancel_work_sync(&avp->recv_work);
+
+       avp_halt(avp);
+
+       spin_lock_irqsave(&avp->state_lock, flags);
+       while ((n = rb_first(&avp->endpoints)) != NULL) {
+               rinfo = rb_entry(n, struct remote_info, rb_node);
+               rinfo_get(rinfo);
+               remote_remove(avp, rinfo);
+               spin_unlock_irqrestore(&avp->state_lock, flags);
+
+               remote_close(rinfo);
+               rinfo_put(rinfo);
+
+               spin_lock_irqsave(&avp->state_lock, flags);
+       }
+       spin_unlock_irqrestore(&avp->state_lock, flags);
+
+       avp_svc_stop(avp->avp_svc);
+
+       if (avp->avp_ep) {
+               trpc_close(avp->avp_ep);
+               avp->avp_ep = NULL;
+       }
+
+       libs_cleanup(avp);
+
+       avp->shutdown = false;
+       smp_wmb();
+}
+
+/* returns the remote lib handle in lib->handle */
+static int _load_lib(struct avp_info *avp, struct tegra_avp_lib *lib)
+{
+       struct svc_lib_attach svc;
+       struct svc_lib_attach_resp resp;
+       const struct firmware *fw;
+       void *args;
+       struct nvmap_handle_ref *lib_handle;
+       void *lib_data;
+       unsigned long lib_phys;
+       int ret;
+
+       pr_info("avp_lib: loading library %s\n", lib->name);
+
+       args = kmalloc(lib->args_len, GFP_KERNEL);
+       if (!args) {
+               pr_err("avp_lib: can't alloc mem for args (%d)\n",
+                       lib->args_len);
+               return -ENOMEM;
+       }
+       if (copy_from_user(args, lib->args, lib->args_len)) {
+               pr_err("avp_lib: can't copy lib args\n");
+               ret = -EFAULT;
+               goto err_cp_args;
+       }
+
+       ret = request_firmware(&fw, lib->name, avp->misc_dev.this_device);
+       if (ret) {
+               pr_err("avp_lib: Cannot read firmware '%s'\n", lib->name);
+               goto err_req_fw;
+       }
+
+       lib_handle = nvmap_alloc(avp->nvmap_libs, fw->size, L1_CACHE_BYTES,
+                                NVMAP_HANDLE_WRITE_COMBINE);
+       if (IS_ERR(lib_handle)) {
+               pr_err("avp_lib: can't nvmap alloc for lib '%s'\n", lib->name);
+               ret = PTR_ERR(lib_handle);
+               goto err_nvmap_alloc;
+       }
+
+       lib_data = nvmap_mmap(lib_handle);
+       if (!lib_data) {
+               pr_err("avp_lib: can't nvmap map for lib '%s'\n", lib->name);
+               ret = -ENOMEM;
+               goto err_nvmap_mmap;
+       }
+
+       lib_phys = nvmap_pin(avp->nvmap_libs, lib_handle);
+       if (IS_ERR((void *)lib_phys)) {
+               pr_err("avp_lib: can't nvmap pin for lib '%s'\n", lib->name);
+               ret = PTR_ERR(lib_handle);
+               goto err_nvmap_pin;
+       }
+
+       memcpy(lib_data, fw->data, fw->size);
+
+       svc.svc_id = SVC_LIBRARY_ATTACH;
+       svc.address = lib_phys;
+       svc.args_len = lib->args_len;
+       svc.lib_size = fw->size;
+       svc.reason = lib->greedy ? AVP_LIB_REASON_ATTACH_GREEDY :
+               AVP_LIB_REASON_ATTACH;
+       memcpy(svc.args, args, lib->args_len);
+       wmb();
+
+       /* send message, wait for reply */
+       ret = trpc_send_msg(avp->rpc_node, avp->avp_ep, &svc, sizeof(svc),
+                                GFP_KERNEL);
+       if (ret)
+               goto err_send_msg;
+
+       ret = trpc_recv_msg(avp->rpc_node, avp->avp_ep, &resp,
+                                sizeof(resp), -1);
+       if (ret != sizeof(resp)) {
+               pr_err("avp_lib: Couldn't get lib load reply (%d)\n", ret);
+               goto err_recv_msg;
+       } else if (resp.err) {
+               pr_err("avp_lib: got remote error (%d) while loading lib %s\n",
+                      resp.err, lib->name);
+               ret = -EPROTO;
+               goto err_recv_msg;
+       }
+       lib->handle = resp.lib_id;
+       ret = 0;
+       pr_info("avp_lib: Successfully loaded library %s (lib_id=%x)\n",
+               lib->name, resp.lib_id);
+
+       /* We free the memory here because by this point the AVP has already
+        * requested memory for the library for all the sections since it does
+        * it's own relocation and memory management. So, our allocations were
+        * temporary to hand the library code over to the AVP.
+        */
+
+err_recv_msg:
+err_send_msg:
+       nvmap_unpin(avp->nvmap_libs, lib_handle);
+err_nvmap_pin:
+       nvmap_munmap(lib_handle, lib_data);
+err_nvmap_mmap:
+       nvmap_free(avp->nvmap_libs, lib_handle);
+err_nvmap_alloc:
+       release_firmware(fw);
+err_req_fw:
+err_cp_args:
+       kfree(args);
+       return ret;
+}
+
+static int send_unload_lib_msg(struct avp_info *avp, u32 handle,
+                              const char *name)
+{
+       struct svc_lib_detach svc;
+       struct svc_lib_detach_resp resp;
+       int ret;
+
+       svc.svc_id = SVC_LIBRARY_DETACH;
+       svc.reason = AVP_LIB_REASON_DETACH;
+       svc.lib_id = handle;
+
+       ret = trpc_send_msg(avp->rpc_node, avp->avp_ep, &svc, sizeof(svc),
+                                GFP_KERNEL);
+       if (ret) {
+               pr_err("avp_lib: can't send unload message to avp for '%s'\n",
+                      name);
+               goto err;
+       }
+
+       ret = trpc_recv_msg(avp->rpc_node, avp->avp_ep, &resp,
+                                sizeof(resp), -1);
+       if (ret != sizeof(resp)) {
+               pr_err("avp_lib: Couldn't get unload reply for '%s' (%d)\n",
+                      name, ret);
+       } else if (resp.err) {
+               pr_err("avp_lib: remote error (%d) while unloading lib %s\n",
+                      resp.err, name);
+               ret = -EPROTO;
+       } else
+               ret = 0;
+err:
+       return ret;
+}
+
+static struct lib_item *_find_lib_locked(struct avp_info *avp, u32 handle)
+{
+       struct lib_item *item;
+
+       list_for_each_entry(item, &avp->libs, list) {
+               if (item->handle == handle)
+                       return item;
+       }
+       return NULL;
+}
+
+static int _insert_lib_locked(struct avp_info *avp, u32 handle, char *name)
+{
+       struct lib_item *item;
+
+       item = kzalloc(sizeof(struct lib_item), GFP_KERNEL);
+       if (!item)
+               return -ENOMEM;
+       item->handle = handle;
+       strlcpy(item->name, name, TEGRA_AVP_LIB_MAX_NAME);
+       list_add_tail(&item->list, &avp->libs);
+       return 0;
+}
+
+static void _delete_lib_locked(struct avp_info *avp, struct lib_item *item)
+{
+       list_del(&item->list);
+       kfree(item);
+}
+
+static int handle_load_lib_ioctl(struct avp_info *avp, unsigned long arg)
+{
+       struct tegra_avp_lib lib;
+       int ret;
+
+       if (copy_from_user(&lib, (void __user *)arg, sizeof(lib)))
+               return -EFAULT;
+       lib.name[TEGRA_AVP_LIB_MAX_NAME - 1] = '\0';
+
+       if (lib.args_len > TEGRA_AVP_LIB_MAX_ARGS) {
+               pr_err("%s: library args too long (%d)\n", __func__,
+                       lib.args_len);
+               return -E2BIG;
+       }
+
+       mutex_lock(&avp->libs_lock);
+       ret = _load_lib(avp, &lib);
+       if (ret)
+               goto err_load_lib;
+
+       if (copy_to_user((void __user *)arg, &lib, sizeof(lib))) {
+               /* TODO: probably need to free the library from remote
+                * we just loaded */
+               ret = -EFAULT;
+               goto err_copy_to_user;
+       }
+       ret = _insert_lib_locked(avp, lib.handle, lib.name);
+       if (ret) {
+               pr_err("%s: can't insert lib (%d)\n", __func__, ret);
+               goto err_insert_lib;
+       }
+
+       mutex_unlock(&avp->libs_lock);
+       return 0;
+
+err_insert_lib:
+       send_unload_lib_msg(avp, lib.handle, lib.name);
+err_copy_to_user:
+       mutex_unlock(&avp->libs_lock);
+err_load_lib:
+       return ret;
+}
+
+static int handle_unload_lib_ioctl(struct avp_info *avp, unsigned long arg)
+{
+       struct lib_item *item;
+       int ret;
+
+       mutex_lock(&avp->libs_lock);
+       item = _find_lib_locked(avp, (u32)arg);
+       if (!item) {
+               pr_err("avp_lib: avp lib with handle 0x%x not found\n",
+                      (u32)arg);
+               ret = -ENOENT;
+               goto err_find;
+       }
+       ret = send_unload_lib_msg(avp, item->handle, item->name);
+       if (!ret)
+               DBG(AVP_DBG_TRACE_LIB, "avp_lib: unloaded '%s'\n", item->name);
+       else
+               pr_err("avp_lib: can't unload lib '%s'/0x%x (%d)\n", item->name,
+                      item->handle, ret);
+       _delete_lib_locked(avp, item);
+
+err_find:
+       mutex_unlock(&avp->libs_lock);
+       return ret;
+}
+
+static void libs_cleanup(struct avp_info *avp)
+{
+       struct lib_item *lib;
+       struct lib_item *lib_tmp;
+
+       mutex_lock(&avp->libs_lock);
+       list_for_each_entry_safe(lib, lib_tmp, &avp->libs, list) {
+               _delete_lib_locked(avp, lib);
+       }
+       mutex_unlock(&avp->libs_lock);
+
+       nvmap_client_put(avp->nvmap_libs);
+       avp->nvmap_libs = NULL;
+}
+
+static long tegra_avp_ioctl(struct file *file, unsigned int cmd,
+                           unsigned long arg)
+{
+       struct avp_info *avp = tegra_avp;
+       int ret;
+
+       if (_IOC_TYPE(cmd) != TEGRA_AVP_IOCTL_MAGIC ||
+           _IOC_NR(cmd) < TEGRA_AVP_IOCTL_MIN_NR ||
+           _IOC_NR(cmd) > TEGRA_AVP_IOCTL_MAX_NR)
+               return -ENOTTY;
+
+       switch (cmd) {
+       case TEGRA_AVP_IOCTL_LOAD_LIB:
+               ret = handle_load_lib_ioctl(avp, arg);
+               break;
+       case TEGRA_AVP_IOCTL_UNLOAD_LIB:
+               ret = handle_unload_lib_ioctl(avp, arg);
+               break;
+       default:
+               pr_err("avp_lib: Unknown tegra_avp ioctl 0x%x\n", _IOC_NR(cmd));
+               ret = -ENOTTY;
+               break;
+       }
+       return ret;
+}
+
+static int tegra_avp_open(struct inode *inode, struct file *file)
+{
+       struct avp_info *avp = tegra_avp;
+       int ret = 0;
+
+       nonseekable_open(inode, file);
+
+       mutex_lock(&avp->open_lock);
+       /* only one userspace client at a time */
+       if (avp->opened) {
+               pr_err("%s: already have client, aborting\n", __func__);
+               ret = -EBUSY;
+               goto out;
+       }
+
+       ret = avp_init(avp, TEGRA_AVP_KERNEL_FW);
+       avp->opened = !ret;
+out:
+       mutex_unlock(&avp->open_lock);
+       return ret;
+}
+
+static int tegra_avp_release(struct inode *inode, struct file *file)
+{
+       struct avp_info *avp = tegra_avp;
+       int ret = 0;
+
+       pr_info("%s: release\n", __func__);
+       mutex_lock(&avp->open_lock);
+       if (!avp->opened) {
+               pr_err("%s: releasing while in invalid state\n", __func__);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       avp_uninit(avp);
+
+       avp->opened = false;
+out:
+       mutex_unlock(&avp->open_lock);
+       return ret;
+}
+
+static int avp_enter_lp0(struct avp_info *avp)
+{
+       volatile u32 *avp_suspend_done =
+               avp->iram_backup_data + TEGRA_IRAM_SIZE;
+       struct svc_enter_lp0 svc;
+       unsigned long endtime;
+       int ret;
+
+       svc.svc_id = SVC_ENTER_LP0;
+       svc.src_addr = (u32)TEGRA_IRAM_BASE;
+       svc.buf_addr = (u32)avp->iram_backup_phys;
+       svc.buf_size = TEGRA_IRAM_SIZE;
+
+       *avp_suspend_done = 0;
+       wmb();
+
+       ret = trpc_send_msg(avp->rpc_node, avp->avp_ep, &svc, sizeof(svc),
+                                GFP_KERNEL);
+       if (ret) {
+               pr_err("%s: cannot send AVP suspend message\n", __func__);
+               return ret;
+       }
+
+       endtime = jiffies + msecs_to_jiffies(1000);
+       rmb();
+       while ((*avp_suspend_done == 0) && time_before(jiffies, endtime)) {
+               udelay(10);
+               rmb();
+       }
+
+       rmb();
+       if (*avp_suspend_done == 0) {
+               pr_err("%s: AVP failed to suspend\n", __func__);
+               ret = -ETIMEDOUT;
+               goto err;
+       }
+
+       return 0;
+
+err:
+       return ret;
+}
+
+static int tegra_avp_suspend(struct platform_device *pdev, pm_message_t state)
+{
+       struct avp_info *avp = tegra_avp;
+       unsigned long flags;
+       int ret;
+
+       pr_info("%s()+\n", __func__);
+       spin_lock_irqsave(&avp->state_lock, flags);
+       if (!avp->initialized) {
+               spin_unlock_irqrestore(&avp->state_lock, flags);
+               return 0;
+       }
+       avp->suspending = true;
+       spin_unlock_irqrestore(&avp->state_lock, flags);
+
+       ret = avp_enter_lp0(avp);
+       if (ret)
+               goto err;
+
+       avp->resume_addr = readl(TEGRA_AVP_RESUME_ADDR);
+       if (!avp->resume_addr) {
+               pr_err("%s: AVP failed to set it's resume address\n", __func__);
+               ret = -EINVAL;
+               goto err;
+       }
+
+       disable_irq(avp->mbox_from_avp_pend_irq);
+
+       pr_info("avp_suspend: resume_addr=%lx\n", avp->resume_addr);
+       avp->resume_addr &= 0xfffffffeUL;
+       pr_info("%s()-\n", __func__);
+
+       return 0;
+
+err:
+       /* TODO: we need to kill the AVP so that when we come back
+        * it could be reinitialized.. We'd probably need to kill
+        * the users of it so they don't have the wrong state.
+        */
+       return ret;
+}
+
+static int tegra_avp_resume(struct platform_device *pdev)
+{
+       struct avp_info *avp = tegra_avp;
+       int ret = 0;
+
+       pr_info("%s()+\n", __func__);
+       smp_rmb();
+       if (!avp->initialized)
+               goto out;
+
+       BUG_ON(!avp->resume_addr);
+
+       avp_reset(avp, avp->resume_addr);
+       avp->resume_addr = 0;
+       avp->suspending = false;
+       smp_wmb();
+       enable_irq(avp->mbox_from_avp_pend_irq);
+
+       pr_info("%s()-\n", __func__);
+
+out:
+       return ret;
+}
+
+static const struct file_operations tegra_avp_fops = {
+       .owner          = THIS_MODULE,
+       .open           = tegra_avp_open,
+       .release        = tegra_avp_release,
+       .unlocked_ioctl = tegra_avp_ioctl,
+};
+
+static struct trpc_node avp_trpc_node = {
+       .name           = "avp-remote",
+       .type           = TRPC_NODE_REMOTE,
+       .try_connect    = avp_node_try_connect,
+};
+
+static int tegra_avp_probe(struct platform_device *pdev)
+{
+       void *msg_area;
+       struct avp_info *avp;
+       int ret = 0;
+       int irq;
+
+       irq = platform_get_irq_byname(pdev, "mbox_from_avp_pending");
+       if (irq < 0) {
+               pr_err("%s: invalid platform data\n", __func__);
+               return -EINVAL;
+       }
+
+       avp = kzalloc(sizeof(struct avp_info), GFP_KERNEL);
+       if (!avp) {
+               pr_err("%s: cannot allocate avp_info\n", __func__);
+               return -ENOMEM;
+       }
+
+       avp->nvmap_drv = nvmap_create_client(nvmap_dev, "avp_core");
+       if (IS_ERR(avp->nvmap_drv)) {
+               pr_err("%s: cannot create drv nvmap client\n", __func__);
+               ret = PTR_ERR(avp->nvmap_drv);
+               goto err_nvmap_create_drv_client;
+       }
+
+       avp->kernel_handle = nvmap_alloc(avp->nvmap_drv, SZ_1M, SZ_1M,
+                                        NVMAP_HANDLE_WRITE_COMBINE);
+       if (IS_ERR(avp->kernel_handle)) {
+               pr_err("%s: cannot create handle\n", __func__);
+               ret = PTR_ERR(avp->kernel_handle);
+               goto err_nvmap_alloc;
+       }
+
+       avp->kernel_data = nvmap_mmap(avp->kernel_handle);
+       if (!avp->kernel_data) {
+               pr_err("%s: cannot map kernel handle\n", __func__);
+               ret = -ENOMEM;
+               goto err_nvmap_mmap;
+       }
+
+       avp->kernel_phys = nvmap_pin(avp->nvmap_drv, avp->kernel_handle);
+       if (IS_ERR((void *)avp->kernel_phys)) {
+               pr_err("%s: cannot pin kernel handle\n", __func__);
+               ret = PTR_ERR((void *)avp->kernel_phys);
+               goto err_nvmap_pin;
+       }
+
+       /* allocate an extra 4 bytes at the end which AVP uses to signal to
+        * us that it is done suspending.
+        */
+       avp->iram_backup_handle =
+               nvmap_alloc(avp->nvmap_drv, TEGRA_IRAM_SIZE + 4,
+                           L1_CACHE_BYTES, NVMAP_HANDLE_WRITE_COMBINE);
+       if (IS_ERR(avp->iram_backup_handle)) {
+               pr_err("%s: cannot create handle for iram backup\n", __func__);
+               ret = PTR_ERR(avp->iram_backup_handle);
+               goto err_iram_nvmap_alloc;
+       }
+       avp->iram_backup_data = nvmap_mmap(avp->iram_backup_handle);
+       if (!avp->iram_backup_data) {
+               pr_err("%s: cannot map iram backup handle\n", __func__);
+               ret = -ENOMEM;
+               goto err_iram_nvmap_mmap;
+       }
+       avp->iram_backup_phys = nvmap_pin(avp->nvmap_drv,
+                                         avp->iram_backup_handle);
+       if (IS_ERR((void *)avp->iram_backup_phys)) {
+               pr_err("%s: cannot pin iram backup handle\n", __func__);
+               ret = PTR_ERR((void *)avp->iram_backup_phys);
+               goto err_iram_nvmap_pin;
+       }
+
+       avp->mbox_from_avp_pend_irq = irq;
+       avp->endpoints = RB_ROOT;
+       spin_lock_init(&avp->state_lock);
+       mutex_init(&avp->open_lock);
+       mutex_init(&avp->to_avp_lock);
+       mutex_init(&avp->from_avp_lock);
+       INIT_WORK(&avp->recv_work, process_avp_message);
+
+       mutex_init(&avp->libs_lock);
+       INIT_LIST_HEAD(&avp->libs);
+
+       avp->recv_wq = alloc_workqueue("avp-msg-recv",
+                                      WQ_NON_REENTRANT | WQ_HIGHPRI, 1);
+       if (!avp->recv_wq) {
+               pr_err("%s: can't create recve workqueue\n", __func__);
+               ret = -ENOMEM;
+               goto err_create_wq;
+       }
+
+       avp->cop_clk = clk_get(&pdev->dev, "cop");
+       if (IS_ERR(avp->cop_clk)) {
+               pr_err("%s: Couldn't get cop clock\n", TEGRA_AVP_NAME);
+               ret = -ENOENT;
+               goto err_get_cop_clk;
+       }
+
+       msg_area = dma_alloc_coherent(&pdev->dev, AVP_MSG_AREA_SIZE * 2,
+                                     &avp->msg_area_addr, GFP_KERNEL);
+       if (!msg_area) {
+               pr_err("%s: cannot allocate msg_area\n", __func__);
+               ret = -ENOMEM;
+               goto err_alloc_msg_area;
+       }
+       memset(msg_area, 0, AVP_MSG_AREA_SIZE * 2);
+       avp->msg = ((avp->msg_area_addr >> 4) |
+                       MBOX_MSG_VALID | MBOX_MSG_PENDING_INT_EN);
+       avp->msg_to_avp = msg_area;
+       avp->msg_from_avp = msg_area + AVP_MSG_AREA_SIZE;
+
+       avp_halt(avp);
+
+       avp_trpc_node.priv = avp;
+       ret = trpc_node_register(&avp_trpc_node);
+       if (ret) {
+               pr_err("%s: Can't register avp rpc node\n", __func__);
+               goto err_node_reg;
+       }
+       avp->rpc_node = &avp_trpc_node;
+
+       avp->avp_svc = avp_svc_init(pdev, avp->rpc_node);
+       if (IS_ERR(avp->avp_svc)) {
+               pr_err("%s: Cannot initialize avp_svc\n", __func__);
+               ret = PTR_ERR(avp->avp_svc);
+               goto err_avp_svc_init;
+       }
+
+       avp->misc_dev.minor = MISC_DYNAMIC_MINOR;
+       avp->misc_dev.name = "tegra_avp";
+       avp->misc_dev.fops = &tegra_avp_fops;
+
+       ret = misc_register(&avp->misc_dev);
+       if (ret) {
+               pr_err("%s: Unable to register misc device!\n", TEGRA_AVP_NAME);
+               goto err_misc_reg;
+       }
+
+       ret = request_irq(irq, avp_mbox_pending_isr, 0, TEGRA_AVP_NAME, avp);
+       if (ret) {
+               pr_err("%s: cannot register irq handler\n", __func__);
+               goto err_req_irq_pend;
+       }
+       disable_irq(avp->mbox_from_avp_pend_irq);
+
+       tegra_avp = avp;
+
+       pr_info("%s: driver registered, kernel %lx(%p), msg area %lx/%lx\n",
+               __func__, avp->kernel_phys, avp->kernel_data,
+               (unsigned long)avp->msg_area_addr,
+               (unsigned long)avp->msg_area_addr + AVP_MSG_AREA_SIZE);
+
+       return 0;
+
+err_req_irq_pend:
+       misc_deregister(&avp->misc_dev);
+err_misc_reg:
+       avp_svc_destroy(avp->avp_svc);
+err_avp_svc_init:
+       trpc_node_unregister(avp->rpc_node);
+err_node_reg:
+       dma_free_coherent(&pdev->dev, AVP_MSG_AREA_SIZE * 2, msg_area,
+                         avp->msg_area_addr);
+err_alloc_msg_area:
+       clk_put(avp->cop_clk);
+err_get_cop_clk:
+       destroy_workqueue(avp->recv_wq);
+err_create_wq:
+       nvmap_unpin(avp->nvmap_drv, avp->iram_backup_handle);
+err_iram_nvmap_pin:
+       nvmap_munmap(avp->iram_backup_handle, avp->iram_backup_data);
+err_iram_nvmap_mmap:
+       nvmap_free(avp->nvmap_drv, avp->iram_backup_handle);
+err_iram_nvmap_alloc:
+       nvmap_unpin(avp->nvmap_drv, avp->kernel_handle);
+err_nvmap_pin:
+       nvmap_munmap(avp->kernel_handle, avp->kernel_data);
+err_nvmap_mmap:
+       nvmap_free(avp->nvmap_drv, avp->kernel_handle);
+err_nvmap_alloc:
+       nvmap_client_put(avp->nvmap_drv);
+err_nvmap_create_drv_client:
+       kfree(avp);
+       tegra_avp = NULL;
+       return ret;
+}
+
+static int tegra_avp_remove(struct platform_device *pdev)
+{
+       struct avp_info *avp = tegra_avp;
+
+       if (!avp)
+               return 0;
+
+       mutex_lock(&avp->open_lock);
+       if (avp->opened) {
+               mutex_unlock(&avp->open_lock);
+               return -EBUSY;
+       }
+       /* ensure that noone can open while we tear down */
+       avp->opened = true;
+       mutex_unlock(&avp->open_lock);
+
+       misc_deregister(&avp->misc_dev);
+
+       avp_halt(avp);
+
+       avp_svc_destroy(avp->avp_svc);
+       trpc_node_unregister(avp->rpc_node);
+       dma_free_coherent(&pdev->dev, AVP_MSG_AREA_SIZE * 2, avp->msg_to_avp,
+                         avp->msg_area_addr);
+       clk_put(avp->cop_clk);
+       destroy_workqueue(avp->recv_wq);
+       nvmap_unpin(avp->nvmap_drv, avp->iram_backup_handle);
+       nvmap_munmap(avp->iram_backup_handle, avp->iram_backup_data);
+       nvmap_free(avp->nvmap_drv, avp->iram_backup_handle);
+       nvmap_unpin(avp->nvmap_drv, avp->kernel_handle);
+       nvmap_munmap(avp->kernel_handle, avp->kernel_data);
+       nvmap_free(avp->nvmap_drv, avp->kernel_handle);
+       nvmap_client_put(avp->nvmap_drv);
+       kfree(avp);
+       tegra_avp = NULL;
+       return 0;
+}
+
+static struct platform_driver tegra_avp_driver = {
+       .probe          = tegra_avp_probe,
+       .remove         = tegra_avp_remove,
+       .suspend        = tegra_avp_suspend,
+       .resume         = tegra_avp_resume,
+       .driver = {
+               .name   = TEGRA_AVP_NAME,
+               .owner  = THIS_MODULE,
+       },
+};
+
+static int __init tegra_avp_init(void)
+{
+       return platform_driver_register(&tegra_avp_driver);
+}
+
+static void __exit tegra_avp_exit(void)
+{
+       platform_driver_unregister(&tegra_avp_driver);
+}
+
+module_init(tegra_avp_init);
+module_exit(tegra_avp_exit);
diff --git a/drivers/media/video/tegra/avp/avp.h b/drivers/media/video/tegra/avp/avp.h
new file mode 100644 (file)
index 0000000..f5e4e91
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MEDIA_VIDEO_TEGRA_AVP_H
+#define __MEDIA_VIDEO_TEGRA_AVP_H
+
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include "trpc.h"
+
+enum {
+       AVP_DBG_TRACE_XPC       = 1U << 0,
+       AVP_DBG_TRACE_XPC_IRQ   = 1U << 1,
+       AVP_DBG_TRACE_XPC_MSG   = 1U << 2,
+       AVP_DBG_TRACE_XPC_CONN  = 1U << 3,
+       AVP_DBG_TRACE_SVC       = 1U << 4,
+       AVP_DBG_TRACE_TRPC_MSG  = 1U << 5,
+       AVP_DBG_TRACE_TRPC_CONN = 1U << 6,
+       AVP_DBG_TRACE_LIB       = 1U << 7,
+};
+
+extern u32 avp_debug_mask;
+#define DBG(flag, args...) \
+       do { if (unlikely(avp_debug_mask & (flag))) pr_info(args); } while (0)
+
+struct avp_svc_info;
+
+struct avp_svc_info *avp_svc_init(struct platform_device *pdev,
+                                 struct trpc_node *rpc_node);
+void avp_svc_destroy(struct avp_svc_info *avp_svc);
+int avp_svc_start(struct avp_svc_info *svc);
+void avp_svc_stop(struct avp_svc_info *svc);
+
+#endif
diff --git a/drivers/media/video/tegra/avp/avp_msg.h b/drivers/media/video/tegra/avp/avp_msg.h
new file mode 100644 (file)
index 0000000..54d3a63
--- /dev/null
@@ -0,0 +1,342 @@
+/* drivers/media/video/tegra/avp/avp_msg.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MEDIA_VIDEO_TEGRA_AVP_MSG_H
+#define __MEDIA_VIDEO_TEGRA_AVP_MSG_H
+
+#include <linux/tegra_avp.h>
+#include <linux/types.h>
+
+/* Note: the port name string is not NUL terminated, so make sure to
+ * allocate appropriate space locally when operating on the string */
+#define XPC_PORT_NAME_LEN              16
+
+#define SVC_ARGS_MAX_LEN               220
+#define SVC_MAX_STRING_LEN             200
+
+#define AVP_ERR_ENOTSUP                        0x2
+#define AVP_ERR_EINVAL                 0x4
+#define AVP_ERR_ENOMEM                 0x6
+#define AVP_ERR_EACCES                 0x00030010
+
+enum {
+       SVC_NVMAP_CREATE                = 0,
+       SVC_NVMAP_CREATE_RESPONSE       = 1,
+       SVC_NVMAP_FREE                  = 3,
+       SVC_NVMAP_ALLOC                 = 4,
+       SVC_NVMAP_ALLOC_RESPONSE        = 5,
+       SVC_NVMAP_PIN                   = 6,
+       SVC_NVMAP_PIN_RESPONSE          = 7,
+       SVC_NVMAP_UNPIN                 = 8,
+       SVC_NVMAP_UNPIN_RESPONSE        = 9,
+       SVC_NVMAP_GET_ADDRESS           = 10,
+       SVC_NVMAP_GET_ADDRESS_RESPONSE  = 11,
+       SVC_NVMAP_FROM_ID               = 12,
+       SVC_NVMAP_FROM_ID_RESPONSE      = 13,
+       SVC_MODULE_CLOCK                = 14,
+       SVC_MODULE_CLOCK_RESPONSE       = 15,
+       SVC_MODULE_RESET                = 16,
+       SVC_MODULE_RESET_RESPONSE       = 17,
+       SVC_POWER_REGISTER              = 18,
+       SVC_POWER_UNREGISTER            = 19,
+       SVC_POWER_STARVATION            = 20,
+       SVC_POWER_BUSY_HINT             = 21,
+       SVC_POWER_BUSY_HINT_MULTI       = 22,
+       SVC_DFS_GETSTATE                = 23,
+       SVC_DFS_GETSTATE_RESPONSE       = 24,
+       SVC_POWER_RESPONSE              = 25,
+       SVC_POWER_MAXFREQ               = 26,
+       SVC_ENTER_LP0                   = 27,
+       SVC_ENTER_LP0_RESPONSE          = 28,
+       SVC_PRINTF                      = 29,
+       SVC_LIBRARY_ATTACH              = 30,
+       SVC_LIBRARY_ATTACH_RESPONSE     = 31,
+       SVC_LIBRARY_DETACH              = 32,
+       SVC_LIBRARY_DETACH_RESPONSE     = 33,
+       SVC_AVP_WDT_RESET               = 34,
+       SVC_DFS_GET_CLK_UTIL            = 35,
+       SVC_DFS_GET_CLK_UTIL_RESPONSE   = 36,
+};
+
+struct svc_msg {
+       u32     svc_id;
+       u8      data[0];
+};
+
+struct svc_common_resp {
+       u32     svc_id;
+       u32     err;
+};
+
+struct svc_printf {
+       u32             svc_id;
+       const char      str[SVC_MAX_STRING_LEN];
+};
+
+struct svc_enter_lp0 {
+       u32     svc_id;
+       u32     src_addr;
+       u32     buf_addr;
+       u32     buf_size;
+};
+
+/* nvmap messages */
+struct svc_nvmap_create {
+       u32     svc_id;
+       u32     size;
+};
+
+struct svc_nvmap_create_resp {
+       u32     svc_id;
+       u32     handle_id;
+       u32     err;
+};
+
+enum {
+       AVP_NVMAP_HEAP_EXTERNAL                 = 1,
+       AVP_NVMAP_HEAP_GART                     = 2,
+       AVP_NVMAP_HEAP_EXTERNAL_CARVEOUT        = 3,
+       AVP_NVMAP_HEAP_IRAM                     = 4,
+};
+
+struct svc_nvmap_alloc {
+       u32     svc_id;
+       u32     handle_id;
+       u32     heaps[4];
+       u32     num_heaps;
+       u32     align;
+       u32     mapping_type;
+};
+
+struct svc_nvmap_free {
+       u32     svc_id;
+       u32     handle_id;
+};
+
+struct svc_nvmap_pin {
+       u32     svc_id;
+       u32     handle_id;
+};
+
+struct svc_nvmap_pin_resp {
+       u32     svc_id;
+       u32     addr;
+};
+
+struct svc_nvmap_unpin {
+       u32     svc_id;
+       u32     handle_id;
+};
+
+struct svc_nvmap_from_id {
+       u32     svc_id;
+       u32     handle_id;
+};
+
+struct svc_nvmap_get_addr {
+       u32     svc_id;
+       u32     handle_id;
+       u32     offs;
+};
+
+struct svc_nvmap_get_addr_resp {
+       u32     svc_id;
+       u32     addr;
+};
+
+/* library management messages */
+enum {
+       AVP_LIB_REASON_ATTACH           = 0,
+       AVP_LIB_REASON_DETACH           = 1,
+       AVP_LIB_REASON_ATTACH_GREEDY    = 2,
+};
+
+struct svc_lib_attach {
+       u32     svc_id;
+       u32     address;
+       u32     args_len;
+       u32     lib_size;
+       u8      args[SVC_ARGS_MAX_LEN];
+       u32     reason;
+};
+
+struct svc_lib_attach_resp {
+       u32     svc_id;
+       u32     err;
+       u32     lib_id;
+};
+
+struct svc_lib_detach {
+       u32     svc_id;
+       u32     reason;
+       u32     lib_id;
+};
+
+struct svc_lib_detach_resp {
+       u32     svc_id;
+       u32     err;
+};
+
+/* hw module management from the AVP side */
+enum {
+       AVP_MODULE_ID_AVP       = 2,
+       AVP_MODULE_ID_VCP       = 3,
+       AVP_MODULE_ID_BSEA      = 27,
+       AVP_MODULE_ID_VDE       = 28,
+       AVP_MODULE_ID_MPE       = 29,
+};
+
+struct svc_module_ctrl {
+       u32     svc_id;
+       u32     module_id;
+       u32     client_id;
+       u8      enable;
+};
+
+/* power messages */
+struct svc_pwr_register {
+       u32     svc_id;
+       u32     client_id;
+       u32     unused;
+};
+
+struct svc_pwr_register_resp {
+       u32     svc_id;
+       u32     err;
+       u32     client_id;
+};
+
+struct svc_pwr_starve_hint {
+       u32     svc_id;
+       u32     dfs_clk_id;
+       u32     client_id;
+       u8      starving;
+};
+
+struct svc_pwr_busy_hint {
+       u32     svc_id;
+       u32     dfs_clk_id;
+       u32     client_id;
+       u32     boost_ms; /* duration */
+       u32     boost_freq; /* in khz */
+};
+
+struct svc_pwr_max_freq {
+       u32     svc_id;
+       u32     module_id;
+};
+
+struct svc_pwr_max_freq_resp {
+       u32     svc_id;
+       u32     freq;
+};
+
+/* dfs related messages */
+enum {
+       AVP_DFS_STATE_INVALID           = 0,
+       AVP_DFS_STATE_DISABLED          = 1,
+       AVP_DFS_STATE_STOPPED           = 2,
+       AVP_DFS_STATE_CLOSED_LOOP       = 3,
+       AVP_DFS_STATE_PROFILED_LOOP     = 4,
+};
+
+struct svc_dfs_get_state_resp {
+       u32     svc_id;
+       u32     state;
+};
+
+enum {
+       AVP_DFS_CLK_CPU         = 1,
+       AVP_DFS_CLK_AVP         = 2,
+       AVP_DFS_CLK_SYSTEM      = 3,
+       AVP_DFS_CLK_AHB         = 4,
+       AVP_DFS_CLK_APB         = 5,
+       AVP_DFS_CLK_VDE         = 6,
+       /* external memory controller */
+       AVP_DFS_CLK_EMC         = 7,
+};
+
+struct avp_clk_usage {
+       u32     min;
+       u32     max;
+       u32     curr_min;
+       u32     curr_max;
+       u32     curr;
+       u32     avg; /* average activity.. whatever that means */
+};
+
+struct svc_dfs_get_clk_util {
+       u32     svc_id;
+       u32     dfs_clk_id;
+};
+
+/* all units are in kHz */
+struct svc_dfs_get_clk_util_resp {
+       u32                     svc_id;
+       u32                     err;
+       struct avp_clk_usage    usage;
+};
+
+/************************/
+
+enum {
+       CMD_ACK         = 0,
+       CMD_CONNECT     = 2,
+       CMD_DISCONNECT  = 3,
+       CMD_MESSAGE     = 4,
+       CMD_RESPONSE    = 5,
+};
+
+struct msg_data {
+       u32             cmd;
+       u8              data[0];
+};
+
+struct msg_ack {
+       u32             cmd;
+       u32             arg;
+};
+
+struct msg_connect {
+       u32             cmd;
+       u32             port_id;
+       /* not NUL terminated, just 0 padded */
+       char            name[XPC_PORT_NAME_LEN];
+};
+
+struct msg_connect_reply {
+       u32             cmd;
+       u32             port_id;
+};
+
+struct msg_disconnect {
+       u32             cmd;
+       u32             port_id;
+};
+
+struct msg_disconnect_reply {
+       u32             cmd;
+       u32             ack;
+};
+
+struct msg_port_data {
+       u32             cmd;
+       u32             port_id;
+       u32             msg_len;
+       u8              data[0];
+};
+
+#endif
diff --git a/drivers/media/video/tegra/avp/avp_svc.c b/drivers/media/video/tegra/avp/avp_svc.c
new file mode 100644 (file)
index 0000000..983d602
--- /dev/null
@@ -0,0 +1,690 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/tegra_rpc.h>
+#include <linux/types.h>
+
+#include <mach/clk.h>
+#include <mach/nvmap.h>
+
+#include "../../../../video/tegra/nvmap/nvmap.h"
+
+#include "avp_msg.h"
+#include "trpc.h"
+#include "avp.h"
+
+enum {
+       CLK_REQUEST_VCP         = 0,
+       CLK_REQUEST_BSEA        = 1,
+       CLK_REQUEST_VDE         = 2,
+       NUM_CLK_REQUESTS,
+};
+
+struct avp_module {
+       const char              *name;
+       u32                     clk_req;
+};
+
+static struct avp_module avp_modules[] = {
+       [AVP_MODULE_ID_VCP] = {
+               .name           = "vcp",
+               .clk_req        = CLK_REQUEST_VCP,
+       },
+       [AVP_MODULE_ID_BSEA]    = {
+               .name           = "bsea",
+               .clk_req        = CLK_REQUEST_BSEA,
+       },
+       [AVP_MODULE_ID_VDE]     = {
+               .name           = "vde",
+               .clk_req        = CLK_REQUEST_VDE,
+       },
+};
+#define NUM_AVP_MODULES                ARRAY_SIZE(avp_modules)
+
+struct avp_svc_info {
+       struct clk                      *clks[NUM_CLK_REQUESTS];
+       /* used for dvfs */
+       struct clk                      *sclk;
+
+       /* XXX: if # of clocks > BITS_PER_LONG, fix this */
+       unsigned long                   clk_reqs;
+       struct mutex                    clk_lock;
+
+       struct trpc_endpoint            *cpu_ep;
+       struct task_struct              *svc_thread;
+
+       /* client for remote allocations, for easy tear down */
+       struct nvmap_client             *nvmap_remote;
+       struct trpc_node                *rpc_node;
+};
+
+static void do_svc_nvmap_create(struct avp_svc_info *avp_svc,
+                               struct svc_msg *_msg,
+                               size_t len)
+{
+       struct svc_nvmap_create *msg = (struct svc_nvmap_create *)_msg;
+       struct svc_nvmap_create_resp resp;
+       struct nvmap_handle_ref *handle;
+       u32 handle_id = 0;
+       u32 err = 0;
+
+       handle = nvmap_create_handle(avp_svc->nvmap_remote, msg->size);
+       if (unlikely(IS_ERR(handle))) {
+               pr_err("avp_svc: error creating handle (%d bytes) for remote\n",
+                      msg->size);
+               err = AVP_ERR_ENOMEM;
+       } else
+               handle_id = (u32)nvmap_ref_to_id(handle);
+
+       resp.svc_id = SVC_NVMAP_CREATE_RESPONSE;
+       resp.err = err;
+       resp.handle_id = handle_id;
+       trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+                     sizeof(resp), GFP_KERNEL);
+       /* TODO: do we need to put the handle if send_msg failed? */
+}
+
+static void do_svc_nvmap_alloc(struct avp_svc_info *avp_svc,
+                              struct svc_msg *_msg,
+                              size_t len)
+{
+       struct svc_nvmap_alloc *msg = (struct svc_nvmap_alloc *)_msg;
+       struct svc_common_resp resp;
+       struct nvmap_handle *handle;
+       u32 err = 0;
+       u32 heap_mask = 0;
+       int i;
+       size_t align;
+
+       handle = nvmap_get_handle_id(avp_svc->nvmap_remote, msg->handle_id);
+       if (IS_ERR(handle)) {
+               pr_err("avp_svc: unknown remote handle 0x%x\n", msg->handle_id);
+               err = AVP_ERR_EACCES;
+               goto out;
+       }
+
+       if (msg->num_heaps > 4) {
+               pr_err("avp_svc: invalid remote alloc request (%d heaps?!)\n",
+                      msg->num_heaps);
+               /* TODO: should we error out instead ? */
+               msg->num_heaps = 0;
+       }
+       if (msg->num_heaps == 0)
+               heap_mask = NVMAP_HEAP_CARVEOUT_GENERIC | NVMAP_HEAP_SYSMEM;
+
+       for (i = 0; i < msg->num_heaps; i++) {
+               switch (msg->heaps[i]) {
+               case AVP_NVMAP_HEAP_EXTERNAL:
+                       heap_mask |= NVMAP_HEAP_SYSMEM;
+                       break;
+               case AVP_NVMAP_HEAP_GART:
+                       heap_mask |= NVMAP_HEAP_IOVMM;
+                       break;
+               case AVP_NVMAP_HEAP_EXTERNAL_CARVEOUT:
+                       heap_mask |= NVMAP_HEAP_CARVEOUT_GENERIC;
+                       break;
+               case AVP_NVMAP_HEAP_IRAM:
+                       heap_mask |= NVMAP_HEAP_CARVEOUT_IRAM;
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       align = max_t(size_t, L1_CACHE_BYTES, msg->align);
+       err = nvmap_alloc_handle_id(avp_svc->nvmap_remote, msg->handle_id,
+                                   heap_mask, align, 0);
+       nvmap_handle_put(handle);
+       if (err) {
+               pr_err("avp_svc: can't allocate for handle 0x%x (%d)\n",
+                      msg->handle_id, err);
+               err = AVP_ERR_ENOMEM;
+       }
+
+out:
+       resp.svc_id = SVC_NVMAP_ALLOC_RESPONSE;
+       resp.err = err;
+       trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+                     sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_nvmap_free(struct avp_svc_info *avp_svc,
+                             struct svc_msg *_msg,
+                             size_t len)
+{
+       struct svc_nvmap_free *msg = (struct svc_nvmap_free *)_msg;
+
+       nvmap_free_handle_id(avp_svc->nvmap_remote, msg->handle_id);
+}
+
+static void do_svc_nvmap_pin(struct avp_svc_info *avp_svc,
+                            struct svc_msg *_msg,
+                            size_t len)
+{
+       struct svc_nvmap_pin *msg = (struct svc_nvmap_pin *)_msg;
+       struct svc_nvmap_pin_resp resp;
+       struct nvmap_handle_ref *handle;
+       unsigned long addr = ~0UL;
+       unsigned long id = msg->handle_id;
+       int err;
+
+       handle = nvmap_duplicate_handle_id(avp_svc->nvmap_remote, id);
+       if (IS_ERR(handle)) {
+               pr_err("avp_svc: can't dup handle %lx\n", id);
+               goto out;
+       }
+       err = nvmap_pin_ids(avp_svc->nvmap_remote, 1, &id);
+       if (err) {
+               pr_err("avp_svc: can't pin for handle %lx (%d)\n", id, err);
+               goto out;
+       }
+       addr = nvmap_handle_address(avp_svc->nvmap_remote, id);
+
+out:
+       resp.svc_id = SVC_NVMAP_PIN_RESPONSE;
+       resp.addr = addr;
+       trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+                     sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_nvmap_unpin(struct avp_svc_info *avp_svc,
+                              struct svc_msg *_msg,
+                              size_t len)
+{
+       struct svc_nvmap_unpin *msg = (struct svc_nvmap_unpin *)_msg;
+       struct svc_common_resp resp;
+       unsigned long id = msg->handle_id;
+
+       nvmap_unpin_ids(avp_svc->nvmap_remote, 1, &id);
+       nvmap_free_handle_id(avp_svc->nvmap_remote, id);
+
+       resp.svc_id = SVC_NVMAP_UNPIN_RESPONSE;
+       resp.err = 0;
+       trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+                     sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_nvmap_from_id(struct avp_svc_info *avp_svc,
+                                struct svc_msg *_msg,
+                                size_t len)
+{
+       struct svc_nvmap_from_id *msg = (struct svc_nvmap_from_id *)_msg;
+       struct svc_common_resp resp;
+       struct nvmap_handle_ref *handle;
+       int err = 0;
+
+       handle = nvmap_duplicate_handle_id(avp_svc->nvmap_remote,
+                                          msg->handle_id);
+       if (IS_ERR(handle)) {
+               pr_err("avp_svc: can't duplicate handle for id 0x%x (%d)\n",
+                      msg->handle_id, (int)PTR_ERR(handle));
+               err = AVP_ERR_ENOMEM;
+       }
+
+       resp.svc_id = SVC_NVMAP_FROM_ID_RESPONSE;
+       resp.err = err;
+       trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+                     sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_nvmap_get_addr(struct avp_svc_info *avp_svc,
+                                 struct svc_msg *_msg,
+                                 size_t len)
+{
+       struct svc_nvmap_get_addr *msg = (struct svc_nvmap_get_addr *)_msg;
+       struct svc_nvmap_get_addr_resp resp;
+
+       resp.svc_id = SVC_NVMAP_GET_ADDRESS_RESPONSE;
+       resp.addr = nvmap_handle_address(avp_svc->nvmap_remote, msg->handle_id);
+       resp.addr += msg->offs;
+       trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+                     sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_pwr_register(struct avp_svc_info *avp_svc,
+                               struct svc_msg *_msg,
+                               size_t len)
+{
+       struct svc_pwr_register *msg = (struct svc_pwr_register *)_msg;
+       struct svc_pwr_register_resp resp;
+
+       resp.svc_id = SVC_POWER_RESPONSE;
+       resp.err = 0;
+       resp.client_id = msg->client_id;
+
+       trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+                     sizeof(resp), GFP_KERNEL);
+}
+
+static struct avp_module *find_avp_module(struct avp_svc_info *avp_svc, u32 id)
+{
+       if (id < NUM_AVP_MODULES && avp_modules[id].name)
+               return &avp_modules[id];
+       return NULL;
+}
+
+static void do_svc_module_reset(struct avp_svc_info *avp_svc,
+                               struct svc_msg *_msg,
+                               size_t len)
+{
+       struct svc_module_ctrl *msg = (struct svc_module_ctrl *)_msg;
+       struct svc_common_resp resp;
+       struct avp_module *mod;
+
+       pr_info("avp_svc: module reset: %d\n", msg->module_id);
+
+       mod = find_avp_module(avp_svc, msg->module_id);
+       if (!mod) {
+               if (msg->module_id == AVP_MODULE_ID_AVP)
+                       pr_err("avp_svc: AVP suicidal?!?!\n");
+               else
+                       pr_err("avp_svc: Unknown module reset requested: %d\n",
+                              msg->module_id);
+               /* other side doesn't handle errors for reset */
+               resp.err = 0;
+               goto send_response;
+       }
+
+       tegra_periph_reset_assert(avp_svc->clks[mod->clk_req]);
+       udelay(10);
+       tegra_periph_reset_deassert(avp_svc->clks[mod->clk_req]);
+       resp.err = 0;
+
+send_response:
+       resp.svc_id = SVC_MODULE_RESET_RESPONSE;
+       trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+                     sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_module_clock(struct avp_svc_info *avp_svc,
+                               struct svc_msg *_msg,
+                               size_t len)
+{
+       struct svc_module_ctrl *msg = (struct svc_module_ctrl *)_msg;
+       struct svc_common_resp resp;
+       struct avp_module *mod;
+       unsigned long clk_bit;
+
+       pr_info("avp_svc: module clock: %d %s\n", msg->module_id,
+               msg->enable ? "on" : "off");
+       mod = find_avp_module(avp_svc, msg->module_id);
+       if (!mod) {
+               pr_err("avp_svc: unknown module clock requested: %d\n",
+                      msg->module_id);
+               resp.err = AVP_ERR_EINVAL;
+               goto send_response;
+       }
+
+       clk_bit = 1 << mod->clk_req;
+       mutex_lock(&avp_svc->clk_lock);
+       if (msg->enable) {
+               /* don't allow duplicate clock requests */
+               BUG_ON(avp_svc->clk_reqs & clk_bit);
+
+               clk_enable(avp_svc->sclk);
+               clk_enable(avp_svc->clks[mod->clk_req]);
+               avp_svc->clk_reqs |= clk_bit;
+       } else {
+               BUG_ON(!(avp_svc->clk_reqs & clk_bit));
+
+               avp_svc->clk_reqs &= ~clk_bit;
+               clk_disable(avp_svc->clks[mod->clk_req]);
+               clk_disable(avp_svc->sclk);
+       }
+       mutex_unlock(&avp_svc->clk_lock);
+       resp.err = 0;
+
+send_response:
+       resp.svc_id = SVC_MODULE_CLOCK_RESPONSE;
+       trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+                     sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_null_response(struct avp_svc_info *avp_svc,
+                                struct svc_msg *_msg,
+                                size_t len, u32 resp_svc_id)
+{
+       struct svc_common_resp resp;
+       resp.svc_id = resp_svc_id;
+       resp.err = 0;
+       trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+                     sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_dfs_get_state(struct avp_svc_info *avp_svc,
+                                struct svc_msg *_msg,
+                                size_t len)
+{
+       struct svc_dfs_get_state_resp resp;
+       resp.svc_id = SVC_DFS_GETSTATE_RESPONSE;
+       resp.state = AVP_DFS_STATE_STOPPED;
+       trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+                     sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_dfs_get_clk_util(struct avp_svc_info *avp_svc,
+                                   struct svc_msg *_msg,
+                                   size_t len)
+{
+       struct svc_dfs_get_clk_util_resp resp;
+
+       resp.svc_id = SVC_DFS_GET_CLK_UTIL_RESPONSE;
+       resp.err = 0;
+       memset(&resp.usage, 0, sizeof(struct avp_clk_usage));
+       trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+                     sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_pwr_max_freq(struct avp_svc_info *avp_svc,
+                               struct svc_msg *_msg,
+                               size_t len)
+{
+       struct svc_pwr_max_freq_resp resp;
+
+       resp.svc_id = SVC_POWER_MAXFREQ;
+       resp.freq = 0;
+       trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+                     sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_printf(struct avp_svc_info *avp_svc, struct svc_msg *_msg,
+                         size_t len)
+{
+       struct svc_printf *msg = (struct svc_printf *)_msg;
+       char tmp_str[SVC_MAX_STRING_LEN];
+
+       /* ensure we null terminate the source */
+       strlcpy(tmp_str, msg->str, SVC_MAX_STRING_LEN);
+       pr_info("[AVP]: %s", tmp_str);
+}
+
+static int dispatch_svc_message(struct avp_svc_info *avp_svc,
+                               struct svc_msg *msg,
+                               size_t len)
+{
+       int ret = 0;
+
+       switch (msg->svc_id) {
+       case SVC_NVMAP_CREATE:
+               DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_create\n", __func__);
+               do_svc_nvmap_create(avp_svc, msg, len);
+               break;
+       case SVC_NVMAP_ALLOC:
+               DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_alloc\n", __func__);
+               do_svc_nvmap_alloc(avp_svc, msg, len);
+               break;
+       case SVC_NVMAP_FREE:
+               DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_free\n", __func__);
+               do_svc_nvmap_free(avp_svc, msg, len);
+               break;
+       case SVC_NVMAP_PIN:
+               DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_pin\n", __func__);
+               do_svc_nvmap_pin(avp_svc, msg, len);
+               break;
+       case SVC_NVMAP_UNPIN:
+               DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_unpin\n", __func__);
+               do_svc_nvmap_unpin(avp_svc, msg, len);
+               break;
+       case SVC_NVMAP_FROM_ID:
+               DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_from_id\n", __func__);
+               do_svc_nvmap_from_id(avp_svc, msg, len);
+               break;
+       case SVC_NVMAP_GET_ADDRESS:
+               DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_get_addr\n", __func__);
+               do_svc_nvmap_get_addr(avp_svc, msg, len);
+               break;
+       case SVC_POWER_REGISTER:
+               DBG(AVP_DBG_TRACE_SVC, "%s: got power_register\n", __func__);
+               do_svc_pwr_register(avp_svc, msg, len);
+               break;
+       case SVC_POWER_UNREGISTER:
+               DBG(AVP_DBG_TRACE_SVC, "%s: got power_unregister\n", __func__);
+               /* nothing to do */
+               break;
+       case SVC_POWER_BUSY_HINT_MULTI:
+               DBG(AVP_DBG_TRACE_SVC, "%s: got power_busy_hint_multi\n",
+                   __func__);
+               /* nothing to do */
+               break;
+       case SVC_POWER_BUSY_HINT:
+       case SVC_POWER_STARVATION:
+               DBG(AVP_DBG_TRACE_SVC, "%s: got power busy/starve hint\n",
+                   __func__);
+               do_svc_null_response(avp_svc, msg, len, SVC_POWER_RESPONSE);
+               break;
+       case SVC_POWER_MAXFREQ:
+               DBG(AVP_DBG_TRACE_SVC, "%s: got power get_max_freq\n",
+                   __func__);
+               do_svc_pwr_max_freq(avp_svc, msg, len);
+               break;
+       case SVC_DFS_GETSTATE:
+               DBG(AVP_DBG_TRACE_SVC, "%s: got dfs_get_state\n", __func__);
+               do_svc_dfs_get_state(avp_svc, msg, len);
+               break;
+       case SVC_MODULE_RESET:
+               DBG(AVP_DBG_TRACE_SVC, "%s: got module_reset\n", __func__);
+               do_svc_module_reset(avp_svc, msg, len);
+               break;
+       case SVC_MODULE_CLOCK:
+               DBG(AVP_DBG_TRACE_SVC, "%s: got module_clock\n", __func__);
+               do_svc_module_clock(avp_svc, msg, len);
+               break;
+       case SVC_DFS_GET_CLK_UTIL:
+               DBG(AVP_DBG_TRACE_SVC, "%s: got get_clk_util\n", __func__);
+               do_svc_dfs_get_clk_util(avp_svc, msg, len);
+               break;
+       case SVC_PRINTF:
+               DBG(AVP_DBG_TRACE_SVC, "%s: got remote printf\n", __func__);
+               do_svc_printf(avp_svc, msg, len);
+               break;
+       case SVC_AVP_WDT_RESET:
+               pr_err("avp_svc: AVP has been reset by watchdog\n");
+               break;
+       default:
+               pr_err("avp_svc: invalid SVC call 0x%x\n", msg->svc_id);
+               ret = -ENOMSG;
+               break;
+       }
+
+       return ret;
+}
+
+static int avp_svc_thread(void *data)
+{
+       struct avp_svc_info *avp_svc = data;
+       u8 buf[TEGRA_RPC_MAX_MSG_LEN];
+       struct svc_msg *msg = (struct svc_msg *)buf;
+       int ret;
+
+       BUG_ON(!avp_svc->cpu_ep);
+
+       ret = trpc_wait_peer(avp_svc->cpu_ep, -1);
+       if (ret) {
+               /* XXX: teardown?! */
+               pr_err("%s: no connection from AVP (%d)\n", __func__, ret);
+               goto err;
+       }
+
+       pr_info("%s: got remote peer\n", __func__);
+
+       while (!kthread_should_stop()) {
+               DBG(AVP_DBG_TRACE_SVC, "%s: waiting for message\n", __func__);
+               ret = trpc_recv_msg(avp_svc->rpc_node, avp_svc->cpu_ep, buf,
+                                   TEGRA_RPC_MAX_MSG_LEN, -1);
+               DBG(AVP_DBG_TRACE_SVC, "%s: got message\n", __func__);
+               if (ret < 0) {
+                       pr_err("%s: couldn't receive msg\n", __func__);
+                       /* XXX: port got closed? we should exit? */
+                       goto err;
+               } else if (!ret) {
+                       pr_err("%s: received msg of len 0?!\n", __func__);
+                       continue;
+               }
+               dispatch_svc_message(avp_svc, msg, ret);
+       }
+
+err:
+       trpc_put(avp_svc->cpu_ep);
+       pr_info("%s: done\n", __func__);
+       return ret;
+}
+
+int avp_svc_start(struct avp_svc_info *avp_svc)
+{
+       struct trpc_endpoint *ep;
+       int ret;
+
+       avp_svc->nvmap_remote = nvmap_create_client(nvmap_dev, "avp_remote");
+       if (IS_ERR(avp_svc->nvmap_remote)) {
+               pr_err("%s: cannot create remote nvmap client\n", __func__);
+               ret = PTR_ERR(avp_svc->nvmap_remote);
+               goto err_nvmap_create_remote_client;
+       }
+
+       ep = trpc_create(avp_svc->rpc_node, "RPC_CPU_PORT", NULL, NULL);
+       if (IS_ERR(ep)) {
+               pr_err("%s: can't create RPC_CPU_PORT\n", __func__);
+               ret = PTR_ERR(ep);
+               goto err_cpu_port_create;
+       }
+
+       /* TODO: protect this */
+       avp_svc->cpu_ep = ep;
+
+       /* the service thread should get an extra reference for the port */
+       trpc_get(avp_svc->cpu_ep);
+       avp_svc->svc_thread = kthread_run(avp_svc_thread, avp_svc,
+                                         "avp_svc_thread");
+       if (IS_ERR_OR_NULL(avp_svc->svc_thread)) {
+               avp_svc->svc_thread = NULL;
+               pr_err("%s: can't create svc thread\n", __func__);
+               ret = -ENOMEM;
+               goto err_kthread;
+       }
+       return 0;
+
+err_kthread:
+       trpc_close(avp_svc->cpu_ep);
+       trpc_put(avp_svc->cpu_ep);
+       avp_svc->cpu_ep = NULL;
+err_cpu_port_create:
+       nvmap_client_put(avp_svc->nvmap_remote);
+err_nvmap_create_remote_client:
+       avp_svc->nvmap_remote = NULL;
+       return ret;
+}
+
+void avp_svc_stop(struct avp_svc_info *avp_svc)
+{
+       int ret;
+       int i;
+
+       trpc_close(avp_svc->cpu_ep);
+       ret = kthread_stop(avp_svc->svc_thread);
+       if (ret == -EINTR) {
+               /* the thread never started, drop it's extra reference */
+               trpc_put(avp_svc->cpu_ep);
+       }
+       avp_svc->cpu_ep = NULL;
+
+       nvmap_client_put(avp_svc->nvmap_remote);
+       avp_svc->nvmap_remote = NULL;
+
+       mutex_lock(&avp_svc->clk_lock);
+       for (i = 0; i < NUM_CLK_REQUESTS; i++)
+               if (avp_svc->clk_reqs & (1 << i)) {
+                       pr_info("%s: remote left clock %d on\n", __func__, i);
+                       clk_disable(avp_svc->clks[i]);
+               }
+       avp_svc->clk_reqs = 0;
+       mutex_unlock(&avp_svc->clk_lock);
+}
+
+struct avp_svc_info *avp_svc_init(struct platform_device *pdev,
+                                 struct trpc_node *rpc_node)
+{
+       struct avp_svc_info *avp_svc;
+       int ret;
+       int i;
+       int cnt = 0;
+
+       BUG_ON(!rpc_node);
+
+       avp_svc = kzalloc(sizeof(struct avp_svc_info), GFP_KERNEL);
+       if (!avp_svc) {
+               ret = -ENOMEM;
+               goto err_alloc;
+       }
+
+       BUILD_BUG_ON(NUM_CLK_REQUESTS > BITS_PER_LONG);
+
+       for (i = 0; i < NUM_AVP_MODULES; i++) {
+               struct avp_module *mod = &avp_modules[i];
+               struct clk *clk;
+               if (!mod->name)
+                       continue;
+               BUG_ON(mod->clk_req >= NUM_CLK_REQUESTS ||
+                      cnt++ >= NUM_CLK_REQUESTS);
+
+               clk = clk_get(&pdev->dev, mod->name);
+               if (IS_ERR(clk)) {
+                       ret = PTR_ERR(clk);
+                       pr_err("avp_svc: Couldn't get required clocks\n");
+                       goto err_get_clks;
+               }
+               avp_svc->clks[mod->clk_req] = clk;
+       }
+
+       avp_svc->sclk = clk_get(&pdev->dev, "sclk");
+       if (IS_ERR(avp_svc->sclk)) {
+               pr_err("avp_svc: Couldn't get sclk for dvfs\n");
+               ret = -ENOENT;
+               goto err_get_clks;
+       }
+       avp_svc->rpc_node = rpc_node;
+
+       mutex_init(&avp_svc->clk_lock);
+
+       return avp_svc;
+
+err_get_clks:
+       for (i = 0; i < NUM_CLK_REQUESTS; i++)
+               if (avp_svc->clks[i])
+                       clk_put(avp_svc->clks[i]);
+       if (!IS_ERR_OR_NULL(avp_svc->sclk))
+               clk_put(avp_svc->sclk);
+err_alloc:
+       return ERR_PTR(ret);
+}
+
+void avp_svc_destroy(struct avp_svc_info *avp_svc)
+{
+       int i;
+
+       for (i = 0; i < NUM_CLK_REQUESTS; i++)
+               clk_put(avp_svc->clks[i]);
+       clk_put(avp_svc->sclk);
+
+       kfree(avp_svc);
+}
diff --git a/drivers/media/video/tegra/avp/headavp.S b/drivers/media/video/tegra/avp/headavp.S
new file mode 100644 (file)
index 0000000..5304067
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ * arch/arm/mach-tegra/headavp.S
+ *
+ * AVP kernel launcher stub; programs the AVP MMU and jumps to the
+ * kernel code. Must use ONLY ARMv4 instructions, and must be compiled
+ * in ARM mode.
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+  */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include "headavp.h"
+
+#define PTE0_COMPARE   0
+/* the default translation will translate any VA within
+ * 0x0010:0000..0x001f:ffff to the (megabyte-aligned) value written to
+ * _tegra_avp_boot_stub_data.map_phys_addr
+ */
+#define PTE0_DEFAULT   (AVP_KERNEL_VIRT_BASE | 0x3ff0)
+
+#define PTE0_TRANSLATE 4
+
+#define TRANSLATE_DATA (1 << 11)
+#define TRANSLATE_CODE (1 << 10)
+#define TRANSLATE_WR   (1 << 9)
+#define TRANSLATE_RD   (1 << 8)
+#define TRANSLATE_HIT  (1 << 7)
+#define TRANSLATE_EN   (1 << 2)
+
+#define TRANSLATE_OPT (TRANSLATE_DATA | TRANSLATE_CODE | TRANSLATE_WR | \
+                      TRANSLATE_RD | TRANSLATE_HIT)
+
+ENTRY(_tegra_avp_boot_stub)
+       adr     r4, _tegra_avp_boot_stub_data
+       ldmia   r4, {r0-r3}
+       str     r2, [r0, #PTE0_COMPARE]
+       bic     r3, r3, #0xff0
+       bic     r3, r3, #0x00f
+       orr     r3, r3, #TRANSLATE_OPT
+       orr     r3, r3, #TRANSLATE_EN
+       str     r3, [r0, #PTE0_TRANSLATE]
+       bx      r1
+       b       .
+ENDPROC(_tegra_avp_boot_stub)
+       .type   _tegra_avp_boot_stub_data, %object
+ENTRY(_tegra_avp_boot_stub_data)
+       .long   AVP_MMU_TLB_BASE
+       .long   0xdeadbeef
+       .long   PTE0_DEFAULT
+       .long   0xdeadd00d
+       .size   _tegra_avp_boot_stub_data, . - _tegra_avp_boot_stub_data
diff --git a/drivers/media/video/tegra/avp/headavp.h b/drivers/media/video/tegra/avp/headavp.h
new file mode 100644 (file)
index 0000000..2bcc329
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * arch/arm/mach-tegra/headavp.h
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef _MACH_TEGRA_HEADAVP_H
+#define _MACH_TEGRA_HEADAVP_H
+
+#define AVP_MMU_TLB_BASE               0xF000F000
+#define AVP_KERNEL_VIRT_BASE           0x00100000
+
+#ifndef __ASSEMBLY__
+
+struct tegra_avp_boot_stub_data {
+       unsigned long   mmu_tlb_base;
+       unsigned long   jump_addr;
+       unsigned long   map_virt_addr;
+       unsigned long   map_phys_addr;
+};
+
+extern void _tegra_avp_boot_stub(void);
+extern struct tegra_avp_boot_stub_data _tegra_avp_boot_stub_data;
+
+#endif
+
+#endif
diff --git a/include/linux/tegra_avp.h b/include/linux/tegra_avp.h
new file mode 100644 (file)
index 0000000..2650b55
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_TEGRA_AVP_H
+#define __LINUX_TEGRA_AVP_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define TEGRA_AVP_LIB_MAX_NAME         32
+#define TEGRA_AVP_LIB_MAX_ARGS         220 /* DO NOT CHANGE THIS! */
+
+struct tegra_avp_lib {
+       char            name[TEGRA_AVP_LIB_MAX_NAME];
+       void __user     *args;
+       size_t          args_len;
+       int             greedy;
+       unsigned long   handle;
+};
+
+#define TEGRA_AVP_IOCTL_MAGIC          'r'
+
+#define TEGRA_AVP_IOCTL_LOAD_LIB       _IOWR(TEGRA_AVP_IOCTL_MAGIC, 0x40, struct tegra_avp_lib)
+#define TEGRA_AVP_IOCTL_UNLOAD_LIB     _IOW(TEGRA_AVP_IOCTL_MAGIC, 0x41, unsigned long)
+
+#define TEGRA_AVP_IOCTL_MIN_NR         _IOC_NR(TEGRA_AVP_IOCTL_LOAD_LIB)
+#define TEGRA_AVP_IOCTL_MAX_NR         _IOC_NR(TEGRA_AVP_IOCTL_UNLOAD_LIB)
+
+#endif