drivers: staging: nvshm: change cache invalidation macro
Martin Chabot [Tue, 12 Feb 2013 03:59:19 +0000 (19:59 -0800)]
Reverse order of L1/L2 cache invalidation to avoid
corruption seen with low latency change on BBC

Bug 1234867

Change-Id: Id85fae3b9048952ca49658caa06d48ccad521d8f
Signed-off-by: Martin Chabot <mchabot@nvidia.com>
Reviewed-on: http://git-master/r/199845
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: Vinayak Pane <vpane@nvidia.com>
Reviewed-by: Seshendra Gadagottu <sgadagottu@nvidia.com>

drivers/staging/nvshm/nvshm_priv.h
drivers/staging/nvshm/nvshm_queue.c

index 5bedcde..f6a0c82 100644 (file)
@@ -53,9 +53,8 @@
        do {    \
                unsigned long _pa_ = page_to_phys(vmalloc_to_page((va))) \
                        + ((unsigned long)va & ~PAGE_MASK);              \
-               __cpuc_flush_dcache_area((void *)(va), (size_t)(size)); \
                outer_inv_range(_pa_, _pa_+(size_t)(size));             \
-               dsb(); \
+               __cpuc_flush_dcache_area((void *)(va), (size_t)(size)); \
        } while (0)
 
 struct nvshm_handle {
index 7ba5499..353f4b0 100644 (file)
@@ -92,20 +92,16 @@ struct nvshm_iobuf *nvshm_queue_get(struct nvshm_handle *handle)
                return NULL;
        }
 
+       dummy = handle->shared_queue_head;
        /* Invalidate lower part of iobuf - upper part can be written by AP */
-       INV_CPU_DCACHE(&handle->shared_queue_head->qnext,
+       INV_CPU_DCACHE(&dummy->qnext,
                       sizeof(struct nvshm_iobuf) / 2);
-       dummy = handle->shared_queue_head;
-       ret = NVSHM_B2A(handle, handle->shared_queue_head->qnext);
+       ret = NVSHM_B2A(handle, dummy->qnext);
 
        if (dummy->qnext == NULL)
                return NULL;
 
-       pr_debug("%s (%x)->%x\n", __func__,
-                (unsigned int)dummy, (unsigned int)dummy->qnext);
-
        inv_iob_list(handle, ret);
-       dummy->qnext = NULL;
        handle->shared_queue_head = ret;
 
        /* Update queue_bb_offset for debug purpose */
@@ -119,8 +115,11 @@ struct nvshm_iobuf *nvshm_queue_get(struct nvshm_handle *handle)
                       handle->conf->queue_bb_offset,
                       ret,
                       NVSHM_A2B(handle, ret));
-       nvshm_iobuf_free_cluster(dummy);
 
+       pr_debug("%s (%p)->%p->(%p)\n", __func__,
+                dummy, ret, ret->qnext);
+
+       nvshm_iobuf_free_cluster(dummy);
        return ret;
 }
 
@@ -146,10 +145,10 @@ int nvshm_queue_put(struct nvshm_handle *handle, struct nvshm_iobuf *iob)
                return -EINVAL;
        }
 
-       pr_debug("%s (%x)->%x/%d/%d->0x%x\n", __func__,
-                (unsigned int)handle->shared_queue_tail,
-                (unsigned int)iob, iob->chan, iob->length,
-                (unsigned int)iob->next);
+       pr_debug("%s (%p)->%p/%d/%d->%p\n", __func__,
+               handle->shared_queue_tail,
+               iob, iob->chan, iob->length,
+               iob->next);
 
        /* Take a reference on queued iobufs (all of them!) */
        nvshm_iobuf_ref_cluster(iob);
@@ -194,9 +193,8 @@ void nvshm_process_queue(struct nvshm_handle *handle)
        pm_stay_awake(handle->dev);
        iob = nvshm_queue_get(handle);
        while (iob) {
-               pr_debug("%s %x/%d/%d/%d->0x%x\n", __func__,
-                       (unsigned int)iob, iob->chan, iob->length, iob->ref,
-                       (unsigned int)iob->next);
+               pr_debug("%s %p/%d/%d/%d->%p\n", __func__,
+                       iob, iob->chan, iob->length, iob->ref, iob->next);
                tegra_bb_clear_ipc(handle->tegra_bb);
                chan = iob->chan;
                if (iob->pool_id < NVSHM_AP_POOL_ID) {