staging: nvshm: create new communication channel for RPC
Hervé Fache [Tue, 26 Mar 2013 13:13:28 +0000 (14:13 +0100)]
This patch introduces a new NVSHM channel and support code to easily
invoke and manage function calls through this medium.

Possible call types are:
* one-way: no response is sent back
* synchronous: a response is necessary to unblock the caller
* asynchronous: a callback must be specified to get the response

Bug 1248547

Change-Id: Icdc0fca081257c78ae3a000b8bcdcd7e87691637
Signed-off-by: Hervé Fache <hfache@nvidia.com>
Reviewed-on: http://git-master/r/213916
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
GVS: Gerrit_Virtual_Submit

drivers/staging/nvshm/Makefile
drivers/staging/nvshm/nvshm_iobuf.h
drivers/staging/nvshm/nvshm_ipc.c
drivers/staging/nvshm/nvshm_priv.h
drivers/staging/nvshm/nvshm_rpc.c [new file with mode: 0644]
drivers/staging/nvshm/nvshm_rpc.h [new file with mode: 0644]
drivers/staging/nvshm/nvshm_types.h

index a0117bb..cc02af4 100644 (file)
@@ -7,6 +7,7 @@ nvshm-objs += nvshm_iobuf.o
 nvshm-objs += nvshm_tty.o
 nvshm-objs += nvshm_net.o
 nvshm-objs += nvshm_if.o
+nvshm-objs += nvshm_rpc.o
 
 obj-$(CONFIG_NVSHM)            += nvshm.o
 
index 011ad8d..9e4aaff 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012 NVIDIA Corporation.
+ * Copyright (C) 2012-2013 NVIDIA Corporation.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
                         + NVSHM_IPC_BB_BASE)
 
 /**
+* Payload start address in AP virtual memory space
+*
+* @param h : struct nvshm_handle pointer
+* @param b : pointer to the iobuf
+* @return : pointer to payload in cached kernel space
+*/
+#define NVSHM_IOBUF_PAYLOAD(h, b) \
+       NVSHM_B2A((h), (b)->npduData + (b)->dataOffset)
+
+/**
  * Alloc a nvshm_iobuf descriptor to be used for write operation
  * Failure of allocation is considered as an Xoff situation and
  * will be followed by a call to (*start_tx)() operation when flow
index 6a9af0e..e24f6b1 100644 (file)
@@ -109,11 +109,13 @@ static int ipc_readconfig(struct nvshm_handle *handle)
 
 static int init_interfaces(struct nvshm_handle *handle)
 {
-       int nlog = 0, ntty = 0, nnet = 0;
+       int nlog = 0, ntty = 0, nnet = 0, nrpc = 0;
        int chan;
 
        for (chan = 0; chan < NVSHM_MAX_CHANNELS; chan++) {
                switch (handle->chan[chan].map.type) {
+               case NVSHM_CHAN_UNMAP:
+                       break;
                case NVSHM_CHAN_TTY:
                case NVSHM_CHAN_LOG:
                        ntty++;
@@ -123,6 +125,10 @@ static int init_interfaces(struct nvshm_handle *handle)
                        handle->chan[chan].rate_counter = NVSHM_RATE_LIMIT_NET;
                        nnet++;
                        break;
+               case NVSHM_CHAN_RPC:
+                       handle->chan[chan].rate_counter = NVSHM_RATE_LIMIT_RPC;
+                       nrpc++;
+                       break;
                default:
                        break;
                }
@@ -141,12 +147,17 @@ static int init_interfaces(struct nvshm_handle *handle)
                nvshm_net_init(handle);
        }
 
+       if (nrpc) {
+               pr_debug("%s init %d rpc channels\n", __func__, nrpc);
+               nvshm_rpc_init(handle);
+       }
+
        return 0;
 }
 
 static int cleanup_interfaces(struct nvshm_handle *handle)
 {
-       int nlog = 0, ntty = 0, nnet = 0;
+       int nlog = 0, ntty = 0, nnet = 0, nrpc = 0;
        int chan;
 
        /* No need to protect this as configuration will arrive after cleanup
@@ -163,6 +174,9 @@ static int cleanup_interfaces(struct nvshm_handle *handle)
                case NVSHM_CHAN_NET:
                        nnet++;
                        break;
+               case NVSHM_CHAN_RPC:
+                       nrpc++;
+                       break;
                default:
                        break;
                }
@@ -181,7 +195,12 @@ static int cleanup_interfaces(struct nvshm_handle *handle)
                nvshm_net_cleanup();
        }
 
-       /* Remove serial sysfs entry */
+       if (nrpc) {
+               pr_debug("%s cleanup %d rpc channels\n", __func__, nrpc);
+               nvshm_rpc_cleanup();
+       }
+
+    /* Remove serial sysfs entry */
        tegra_bb_set_ipc_serial(handle->tegra_bb, NULL);
 
        return 0;
index 7d830b9..8618535 100644 (file)
@@ -98,4 +98,7 @@ extern void nvshm_tty_cleanup(void);
 extern int nvshm_net_init(struct nvshm_handle *handle);
 extern void nvshm_net_cleanup(void);
 
+extern int nvshm_rpc_init(struct nvshm_handle *handle);
+extern void nvshm_rpc_cleanup(void);
+
 #endif /* _NVSHM_PRIV_H */
diff --git a/drivers/staging/nvshm/nvshm_rpc.c b/drivers/staging/nvshm/nvshm_rpc.c
new file mode 100644 (file)
index 0000000..bbfb4fe
--- /dev/null
@@ -0,0 +1,393 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/sunrpc/msg_prot.h>
+#include "nvshm_types.h"
+#include "nvshm_if.h"
+#include "nvshm_priv.h"
+#include "nvshm_iobuf.h"
+#include "nvshm_rpc.h"
+
+enum {
+       CONCURRENT_REQUESTS_MAX = 16,   /* MUST be a power of two */
+};
+
+struct nvshm_rpc_header {
+       u32 xid;                        /* Endianness does not matter */
+       enum rpc_msg_type msg_type;
+};
+
+typedef void (*nvshm_rpc_callback_t)(
+       struct nvshm_rpc_message *message,
+       void *context);
+
+struct nvshm_rpc_request {
+       u32 requestid;
+       nvshm_rpc_callback_t callback;
+       void *context;
+};
+
+struct nvshm_rpc {
+       int chanid;
+       struct nvshm_channel *pchan;
+       struct nvshm_handle *handle;
+       nvshm_rpc_callback_t dispatcher_callback;
+       void *dispatcher_context;
+       struct mutex requestid_mutex;
+       u32 requestid;
+       struct nvshm_rpc_request requests[CONCURRENT_REQUESTS_MAX];
+       u32 free_requests_number;
+};
+
+static struct nvshm_rpc rpc_private;
+
+/*
+ * We want the request ID to be unique, even if a rollover happens, so we have
+ * the array index as LSBs and a counter as MSBs.  Hence the requirement for
+ * CONCURRENT_REQUESTS_MAX to be a power of 2.
+ */
+static u32 request_create(nvshm_rpc_callback_t callback, void *context)
+{
+       u32 requestid = 0;
+       int i;
+
+       mutex_lock(&rpc_private.requestid_mutex);
+       if (rpc_private.free_requests_number == 0)
+               goto end;
+
+       for (i = 0; i < CONCURRENT_REQUESTS_MAX; ++i) {
+               struct nvshm_rpc_request *request;
+
+               if (rpc_private.requests[i].requestid)
+                       continue;
+
+               rpc_private.requestid += CONCURRENT_REQUESTS_MAX;
+               /* Make sure we never give out request ID 0 */
+               if (rpc_private.requestid + i == 0)
+                       rpc_private.requestid += CONCURRENT_REQUESTS_MAX;
+
+               request = &rpc_private.requests[i];
+               request->requestid = rpc_private.requestid + i;
+               request->callback = callback;
+               request->context = context;
+               --rpc_private.free_requests_number;
+               requestid = request->requestid;
+               break;
+       }
+end:
+       mutex_unlock(&rpc_private.requestid_mutex);
+       return requestid;
+}
+
+static struct nvshm_rpc_request *request_get(u32 requestid)
+{
+       struct nvshm_rpc_request *request = NULL;
+       int i;
+
+       /*
+        * We only have two threads here: one that creates the message and sends
+        * it, and one that receives the answer to it and reads it, then deletes
+        * it. Creation implies a free slot, so will not interfere. Hence we do
+        * not need to lock.
+        */
+       for (i = 0; i < CONCURRENT_REQUESTS_MAX; ++i)
+               if (rpc_private.requests[i].requestid == requestid) {
+                       request = &rpc_private.requests[i];
+                       break;
+               }
+       return request;
+}
+
+static void request_delete(u32 requestid)
+{
+       int i;
+
+       mutex_lock(&rpc_private.requestid_mutex);
+       for (i = 0; i < CONCURRENT_REQUESTS_MAX; ++i)
+               if (rpc_private.requests[i].requestid == requestid) {
+                       rpc_private.requests[i].requestid = 0;
+                       ++rpc_private.free_requests_number;
+                       break;
+               }
+       mutex_unlock(&rpc_private.requestid_mutex);
+}
+
+static void nvshm_rpc_rx_event(struct nvshm_channel *chan,
+                              struct nvshm_iobuf *iobuf) {
+       u8 *data = NVSHM_IOBUF_PAYLOAD(rpc_private.handle, iobuf);
+       struct nvshm_rpc_header *header;
+       struct nvshm_rpc_message *message;
+
+       header = (struct nvshm_rpc_header *) data;
+       data += sizeof(*header);
+       /* Create message structure */
+       message = kmalloc(sizeof(*message), GFP_KERNEL);
+       if (unlikely(!message)) {
+               pr_err("failed to allocate message\n");
+               goto failed;
+       }
+
+       message->private = iobuf;
+       message->payload = data;
+       message->length = iobuf->length - sizeof(*header);
+       if (header->msg_type == ntohl(RPC_REPLY)) {
+               struct nvshm_rpc_request *request = request_get(header->xid);
+               nvshm_rpc_callback_t callback;
+               void *context;
+
+               if (!request) {
+                       pr_err("invalid request ID %u\n", header->xid);
+                       goto failed;
+               }
+               /* Free the request in case the callback wants to send */
+               callback = request->callback;
+               context = request->context;
+               request_delete(header->xid);
+               /* Call back */
+               if (callback)
+                       callback(message, context);
+               else
+                       nvshm_rpc_free(message);
+       } else {
+               /* Check payload length */
+               if (message->length == 0) {
+                       /* Empty payload: for latency measurement */
+                       struct nvshm_rpc_message *response;
+
+                       response = nvshm_rpc_allocresponse(0, message);
+                       nvshm_rpc_send(response);
+                       nvshm_rpc_free(message);
+               } else if (rpc_private.dispatcher_callback != NULL) {
+                       /* Dispatch */
+                       rpc_private.dispatcher_callback(message,
+                                               rpc_private.dispatcher_context);
+               } else {
+                       nvshm_rpc_free(message);
+               }
+       }
+
+       return;
+failed:
+       kfree(message);
+       nvshm_iobuf_free(iobuf);
+}
+
+static void nvshm_rpc_error_event(struct nvshm_channel *chan,
+                                 enum nvshm_error_id error)
+{
+}
+
+static void nvshm_rpc_start_tx(struct nvshm_channel *chan)
+{
+}
+
+static struct nvshm_if_operations nvshm_rpc_ops = {
+       .rx_event = nvshm_rpc_rx_event,
+       .error_event = nvshm_rpc_error_event,
+       .start_tx = nvshm_rpc_start_tx
+};
+
+int nvshm_rpc_init(struct nvshm_handle *handle)
+{
+       int chan;
+       int i;
+
+       for (chan = 0; chan < NVSHM_MAX_CHANNELS; chan++)
+               if (handle->chan[chan].map.type == NVSHM_CHAN_RPC) {
+                       rpc_private.chanid = chan;
+                       rpc_private.handle = handle;
+                       rpc_private.pchan = nvshm_open_channel(chan,
+                                                              &nvshm_rpc_ops,
+                                                              &rpc_private);
+                       if (!rpc_private.pchan) {
+                               pr_err("failed to open channel\n");
+                               goto fail;
+                       }
+                       /* Only one RPC channel */
+                       break;
+               }
+
+       /* Initialize request ID stuff (never destroyed) */
+       mutex_init(&rpc_private.requestid_mutex);
+       rpc_private.requestid = 0;
+       for (i = 0; i < CONCURRENT_REQUESTS_MAX; ++i)
+               rpc_private.requests[i].requestid = 0;
+       rpc_private.free_requests_number = CONCURRENT_REQUESTS_MAX;
+       return 0;
+fail:
+       return -1;
+}
+
+void nvshm_rpc_cleanup(void)
+{
+       /* FIXME Check module ref count if we ever make this a module */
+       if (!rpc_private.pchan) {
+               pr_err("not initialized\n");
+               return;
+       }
+
+       nvshm_close_channel(rpc_private.pchan);
+       mutex_destroy(&rpc_private.requestid_mutex);
+}
+
+void nvshm_rpc_setdispatcher(nvshm_rpc_callback_t callback, void *context)
+{
+       /*
+        * The dispatcher callback is set at init and unset at cleanup, when no
+        * message can be received. This therefore does not need locking.
+        */
+       rpc_private.dispatcher_callback = callback;
+       rpc_private.dispatcher_context = context;
+}
+
+struct nvshm_rpc_message*
+nvshm_rpc_allocrequest(u32 size,
+                      nvshm_rpc_callback_t callback,
+                      void *context)
+{
+       u32 requestid;
+       struct nvshm_iobuf *iobuf;
+       struct nvshm_rpc_message *message;
+       u8 *data;
+       struct nvshm_rpc_header *header;
+
+       /* Initialize iobuf */
+       if (!rpc_private.pchan) {
+               pr_err("not initialized\n");
+               return NULL;
+       }
+
+       /* Get request ID */
+       do {
+               requestid = request_create(callback, context);
+               /* Should not happen anyway... */
+               if (requestid == 0)
+                       udelay(50);
+       } while (requestid == 0);
+
+       /* Initialize iobuf */
+       iobuf = nvshm_iobuf_alloc(rpc_private.pchan, sizeof(*header) + size);
+       if (!iobuf) {
+               request_delete(requestid);
+               pr_err("failed to allocate iobuf\n");
+               return NULL;
+       }
+
+       iobuf->length = sizeof(*header) + size;
+       data = NVSHM_IOBUF_PAYLOAD(rpc_private.handle, iobuf);
+
+       /* Initialize header */
+       header = (struct nvshm_rpc_header *) data;
+       header->xid = requestid;
+       header->msg_type = htonl(RPC_CALL);
+       data += sizeof(*header);
+
+       /* Initialize message */
+       message = kmalloc(sizeof(*message), GFP_KERNEL);
+       if (!message) {
+               request_delete(requestid);
+               nvshm_iobuf_free(iobuf);
+               pr_err("failed to allocate message\n");
+               return NULL;
+       }
+
+       message->private = iobuf;
+       message->payload = data;
+       message->length = size;
+       return message;
+}
+
+struct nvshm_rpc_message *nvshm_rpc_allocresponse(u32 size,
+                               const struct nvshm_rpc_message *request)
+{
+       struct nvshm_iobuf *req_iobuf = request->private;
+       u8 *req_data;
+       struct nvshm_iobuf *iobuf;
+       struct nvshm_rpc_message *message;
+       u8 *data;
+       struct nvshm_rpc_header *req_header;
+       struct nvshm_rpc_header *header;
+
+       /* Reader request header */
+       if (!req_iobuf) {
+               pr_err("null request iobuf\n");
+               return NULL;
+       }
+
+       req_data = NVSHM_IOBUF_PAYLOAD(rpc_private.handle, req_iobuf);
+
+       /* Initialize iobuf */
+       if (!rpc_private.pchan) {
+               pr_err("not initialized\n");
+               return NULL;
+       }
+       iobuf = nvshm_iobuf_alloc(rpc_private.pchan, sizeof(*header) + size);
+       if (!iobuf) {
+               pr_err("failed to allocate iobuf\n");
+               return NULL;
+       }
+
+       iobuf->length = sizeof(*header) + size;
+       data = NVSHM_IOBUF_PAYLOAD(rpc_private.handle, iobuf);
+
+       /* Copy header and opaque data from request */
+       header = (struct nvshm_rpc_header *) data;
+       req_header = (struct nvshm_rpc_header *) req_data;
+       header->xid = req_header->xid;
+       header->msg_type = htonl(RPC_REPLY);
+       data += sizeof(*header);
+
+       /* Initialize message */
+       message = kmalloc(sizeof(*message), GFP_KERNEL);
+       if (!message) {
+               pr_err("failed to allocate message\n");
+               nvshm_iobuf_free(iobuf);
+               return NULL;
+       }
+
+       message->private = iobuf;
+       message->payload = data;
+       message->length = size;
+       return message;
+}
+EXPORT_SYMBOL_GPL(nvshm_rpc_allocresponse);
+
+void nvshm_rpc_free(struct nvshm_rpc_message *message)
+{
+       struct nvshm_iobuf *iobuf = message->private;
+
+       nvshm_iobuf_free(iobuf);
+       kfree(message);
+}
+
+int nvshm_rpc_send(struct nvshm_rpc_message *message)
+{
+       /* Send */
+       struct nvshm_iobuf *iobuf = message->private;
+       int rc;
+
+       /* Note: as RPC traffic is very low, we don't care about flow control */
+       rc = nvshm_write(rpc_private.pchan, iobuf);
+       /* Do not free iobuf here (see SHM specification for details) */
+       kfree(message);
+       if (rc < 0)
+               nvshm_iobuf_free(iobuf);
+
+       return rc;
+}
diff --git a/drivers/staging/nvshm/nvshm_rpc.h b/drivers/staging/nvshm/nvshm_rpc.h
new file mode 100644 (file)
index 0000000..476b580
--- /dev/null
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_STAGING_NVSHM_NVSHM_RPC_H
+#define __DRIVERS_STAGING_NVSHM_NVSHM_RPC_H
+
+#include <linux/types.h>
+
+/**
+ * Type for a RPC message (request or response.)
+ *
+ * @param payload Payload to send across
+ * @param length Payload length - DO NOT MODIFY
+ * @param private An internal context - DO NOT MODIFY
+ */
+struct nvshm_rpc_message {
+       void *payload;
+       /* The fields below are set at allocation time and are private */
+       u32 length;
+       void *private;
+};
+
+/**
+ * Set a default dispatcher.
+ *
+ * The default dispatcher is the dispatcher that receives requests from clients
+ * on the remote processor, while responses are sent back the originator's
+ * callback automatically.
+ *
+ * Reminder: the callback (or one of its sub-processes) MUST free the message.
+ *
+ * @param callback Callback to use to receive incoming messages
+ * @param context Context to remind at callback time (may be NULL)
+ */
+void nvshm_rpc_setdispatcher(
+       void (*callback)(struct nvshm_rpc_message *message, void *context),
+       void *context);
+
+/**
+ * Allocate a message buffer for request.
+ *
+ * The point here is for the client to fill in this buffer and not make a copy.
+ * NOTE: SENT MESSAGES ARE FREED AUTOMATICALLY.
+ *
+ * Reminder: the callback (or one of its sub-processes) MUST free the message.
+ *
+ * @param size Size of the buffer to allocate
+ * @param callback Callback to use to receive ASYNCHRONOUS responses
+ * @param context A user context to pass to the callback, if relevant
+ * @return a buffer, or NULL on error
+ */
+struct nvshm_rpc_message *nvshm_rpc_allocrequest(
+       u32 size,
+       void (*callback)(struct nvshm_rpc_message *message, void *context),
+       void *context);
+
+/**
+ * Allocate a message buffer for response.
+ *
+ * The point here is for the client to fill in this buffer and avoid making a
+ * copy.
+ * NOTE: SENT MESSAGES ARE FREED AUTOMATICALLY.
+ *
+ * @param size Size of the buffer to allocate
+ * @param request Request message as received
+ * @return a buffer, or NULL on error
+ */
+struct nvshm_rpc_message *nvshm_rpc_allocresponse(
+       u32 size,
+       const struct nvshm_rpc_message *request);
+
+/**
+ * Free a message buffer.
+ *
+ * Use of this function should never be need if the message is sent, as the
+ * destruction is then automatic.  It is needed to destroy the response to
+ * synchronous calls though, and the message passed to both dispatcher and
+ * message callbacks.
+ *
+ * @param message Message to free
+ */
+void nvshm_rpc_free(
+       struct nvshm_rpc_message *message);
+
+/**
+ * Send a request or response message.
+ *
+ * Responses go through the callback (if any)
+ *
+ * @param message Request or response to send, automatically freed once sent
+ * @return 0, or negative on error
+ */
+int nvshm_rpc_send(
+       struct nvshm_rpc_message *message);
+
+#endif /* #ifndef __DRIVERS_STAGING_NVSHM_NVSHM_RPC_H */
index a648a1b..c742ae4 100644 (file)
@@ -44,6 +44,7 @@
 #define NVSHM_RATE_LIMIT_TTY (256)
 #define NVSHM_RATE_LIMIT_LOG (512)
 #define NVSHM_RATE_LIMIT_NET (2048)
+#define NVSHM_RATE_LIMIT_RPC (256)
 #define NVSHM_RATE_LIMIT_TRESHOLD (8)
 
 /* NVSHM_IPC mailbox messages ids */
@@ -76,6 +77,7 @@ enum nvshm_chan_type {
        NVSHM_CHAN_TTY,
        NVSHM_CHAN_LOG,
        NVSHM_CHAN_NET,
+       NVSHM_CHAN_RPC,
 };
 
 /* Channel mapping structure */