gk20a: Moved bind fecs to init_gr_support
[linux-3.10.git] / drivers / gpu / nvgpu / gk20a / pmu_gk20a.c
index 28ba20e..f3d6e00 100644 (file)
@@ -3,7 +3,7 @@
  *
  * GK20A PMU (aka. gPMU outside gk20a context)
  *
- * Copyright (c) 2011-2014, NVIDIA CORPORATION.  All rights reserved.
+ * Copyright (c) 2011-2015, NVIDIA CORPORATION.  All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -27,6 +27,7 @@
 #include <linux/dma-mapping.h>
 
 #include "gk20a.h"
+#include "gr_gk20a.h"
 #include "hw_mc_gk20a.h"
 #include "hw_pwr_gk20a.h"
 #include "hw_top_gk20a.h"
 static void pmu_dump_falcon_stats(struct pmu_gk20a *pmu);
 static int gk20a_pmu_get_elpg_residency_gating(struct gk20a *g,
                u32 *ingating_time, u32 *ungating_time, u32 *gating_cnt);
-static void gk20a_init_pmu_setup_hw2_workqueue(struct work_struct *work);
-static void pmu_save_zbc(struct gk20a *g, u32 entries);
+static void pmu_setup_hw(struct work_struct *work);
 static void ap_callback_init_and_enable_ctrl(
                struct gk20a *g, struct pmu_msg *msg,
                void *param, u32 seq_desc, u32 status);
 static int gk20a_pmu_ap_send_command(struct gk20a *g,
                        union pmu_ap_cmd *p_ap_cmd, bool b_block);
 
+static int pmu_init_powergating(struct gk20a *g);
+
 static u32 pmu_cmdline_size_v0(struct pmu_gk20a *pmu)
 {
        return sizeof(struct pmu_cmdline_args_v0);
@@ -1155,7 +1157,7 @@ int pmu_mutex_acquire(struct pmu_gk20a *pmu, u32 id, u32 *token)
        u32 data, owner, max_retry;
 
        if (!pmu->initialized)
-               return 0;
+               return -EINVAL;
 
        BUG_ON(!token);
        BUG_ON(!PMU_MUTEX_ID_IS_VALID(id));
@@ -1224,7 +1226,7 @@ int pmu_mutex_release(struct pmu_gk20a *pmu, u32 id, u32 *token)
        u32 owner, data;
 
        if (!pmu->initialized)
-               return 0;
+               return -EINVAL;
 
        BUG_ON(!token);
        BUG_ON(!PMU_MUTEX_ID_IS_VALID(id));
@@ -1507,8 +1509,6 @@ int gk20a_init_pmu_reset_enable_hw(struct gk20a *g)
        return 0;
 }
 
-static void pmu_elpg_enable_allow(struct work_struct *work);
-
 int gk20a_init_pmu_setup_sw(struct gk20a *g)
 {
        struct pmu_gk20a *pmu = &g->pmu;
@@ -1525,6 +1525,10 @@ int gk20a_init_pmu_setup_sw(struct gk20a *g)
 
        gk20a_dbg_fn("");
 
+       /* start with elpg disabled until first enable call */
+       mutex_init(&pmu->elpg_mutex);
+       pmu->elpg_refcnt = 0;
+
        if (pmu->sw_ready) {
                for (i = 0; i < pmu->mutex_cnt; i++) {
                        pmu->mutex[i].id    = i;
@@ -1577,9 +1581,7 @@ int gk20a_init_pmu_setup_sw(struct gk20a *g)
        pmu->ucode_image = (u32 *)((u8 *)pmu->desc +
                        pmu->desc->descriptor_size);
 
-
-       INIT_DELAYED_WORK(&pmu->elpg_enable, pmu_elpg_enable_allow);
-       INIT_WORK(&pmu->pg_init, gk20a_init_pmu_setup_hw2_workqueue);
+       INIT_WORK(&pmu->pg_init, pmu_setup_hw);
 
        gk20a_init_pmu_vm(mm);
 
@@ -1605,7 +1607,6 @@ int gk20a_init_pmu_setup_sw(struct gk20a *g)
        }
 
        pmu->seq_buf.iova = iova;
-       init_waitqueue_head(&pmu->pg_wq);
 
        err = gk20a_get_sgtable(d, &sgt_pmu_ucode,
                                pmu->ucode.cpuva,
@@ -1666,9 +1667,11 @@ int gk20a_init_pmu_setup_sw(struct gk20a *g)
        gk20a_free_sgtable(&sgt_pmu_ucode);
        gk20a_free_sgtable(&sgt_seq_buf);
 
+       pmu->sw_ready = true;
+
 skip_init:
-       mutex_init(&pmu->elpg_mutex);
        mutex_init(&pmu->isr_mutex);
+       mutex_init(&pmu->isr_enable_lock);
        mutex_init(&pmu->pmu_copy_lock);
        mutex_init(&pmu->pmu_seq_lock);
 
@@ -1734,12 +1737,13 @@ static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg,
                return;
        }
 
-       if (eng_buf_stat->status == PMU_PG_MSG_ENG_BUF_FAILED) {
-               gk20a_err(dev_from_gk20a(g), "failed to load PGENG buffer");
-       }
-
        pmu->buf_loaded = (eng_buf_stat->status == PMU_PG_MSG_ENG_BUF_LOADED);
-       wake_up(&pmu->pg_wq);
+       if ((!pmu->buf_loaded) &&
+               (pmu->pmu_state == PMU_STATE_LOADING_PG_BUF))
+                       gk20a_err(dev_from_gk20a(g), "failed to load PGENG buffer");
+       else {
+               schedule_work(&pmu->pg_init);
+       }
 }
 
 int gk20a_init_pmu_setup_hw1(struct gk20a *g)
@@ -1749,7 +1753,10 @@ int gk20a_init_pmu_setup_hw1(struct gk20a *g)
 
        gk20a_dbg_fn("");
 
+       mutex_lock(&pmu->isr_enable_lock);
        pmu_reset(pmu);
+       pmu->isr_enabled = true;
+       mutex_unlock(&pmu->isr_enable_lock);
 
        /* setup apertures - virtual */
        gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_UCODE),
@@ -1779,116 +1786,48 @@ int gk20a_init_pmu_setup_hw1(struct gk20a *g)
 static int gk20a_aelpg_init(struct gk20a *g);
 static int gk20a_aelpg_init_and_enable(struct gk20a *g, u8 ctrl_id);
 
+static void pmu_setup_hw_load_zbc(struct gk20a *g);
+static void pmu_setup_hw_enable_elpg(struct gk20a *g);
 
-static void gk20a_init_pmu_setup_hw2_workqueue(struct work_struct *work)
+static void pmu_setup_hw(struct work_struct *work)
 {
        struct pmu_gk20a *pmu = container_of(work, struct pmu_gk20a, pg_init);
        struct gk20a *g = pmu->g;
-       gk20a_init_pmu_setup_hw2(g);
+
+       switch (pmu->pmu_state) {
+       case PMU_STATE_INIT_RECEIVED:
+               gk20a_dbg_pmu("pmu starting");
+               pmu_init_powergating(g);
+               break;
+       case PMU_STATE_ELPG_BOOTED:
+               gk20a_dbg_pmu("elpg booted");
+               gk20a_init_pmu_load_fecs(g);
+               break;
+       case PMU_STATE_LOADING_PG_BUF:
+               gk20a_dbg_pmu("loaded pg buf");
+               pmu_setup_hw_load_zbc(g);
+               break;
+       case PMU_STATE_LOADING_ZBC:
+               gk20a_dbg_pmu("loaded zbc");
+               pmu_setup_hw_enable_elpg(g);
+               break;
+       case PMU_STATE_STARTED:
+               gk20a_dbg_pmu("PMU booted");
+               break;
+       default:
+               gk20a_dbg_pmu("invalid state");
+               break;
+       }
 }
 
-int gk20a_init_pmu_setup_hw2(struct gk20a *g)
+int gk20a_init_pmu_load_fecs(struct gk20a *g)
 {
        struct pmu_gk20a *pmu = &g->pmu;
-       struct mm_gk20a *mm = &g->mm;
-       struct vm_gk20a *vm = &mm->pmu.vm;
-       struct device *d = dev_from_gk20a(g);
        struct pmu_cmd cmd;
        u32 desc;
-       long remain;
-       int err;
-       bool status;
-       u32 size;
-       struct sg_table *sgt_pg_buf;
-       dma_addr_t iova;
-
+       int err = 0;
        gk20a_dbg_fn("");
 
-       if (!support_gk20a_pmu())
-               return 0;
-
-       size = 0;
-       err = gr_gk20a_fecs_get_reglist_img_size(g, &size);
-       if (err) {
-               gk20a_err(dev_from_gk20a(g),
-                       "fail to query fecs pg buffer size");
-               return err;
-       }
-
-       if (!pmu->sw_ready) {
-               pmu->pg_buf.cpuva = dma_alloc_coherent(d, size,
-                                               &iova,
-                                               GFP_KERNEL);
-               if (!pmu->pg_buf.cpuva) {
-                       gk20a_err(d, "failed to allocate memory\n");
-                       err = -ENOMEM;
-                       goto err;
-               }
-
-               pmu->pg_buf.iova = iova;
-               pmu->pg_buf.size = size;
-
-               err = gk20a_get_sgtable(d, &sgt_pg_buf,
-                                       pmu->pg_buf.cpuva,
-                                       pmu->pg_buf.iova,
-                                       size);
-               if (err) {
-                       gk20a_err(d, "failed to create sg table\n");
-                       goto err_free_pg_buf;
-               }
-
-               pmu->pg_buf.pmu_va = gk20a_gmmu_map(vm,
-                                       &sgt_pg_buf,
-                                       size,
-                                       0, /* flags */
-                                       gk20a_mem_flag_none);
-               if (!pmu->pg_buf.pmu_va) {
-                       gk20a_err(d, "failed to map fecs pg buffer");
-                       err = -ENOMEM;
-                       goto err_free_sgtable;
-               }
-
-               gk20a_free_sgtable(&sgt_pg_buf);
-       }
-
-       /*
-        * This is the actual point at which sw setup is complete, so set the
-        * sw_ready flag here.
-        */
-       pmu->sw_ready = true;
-
-       /* TBD: acquire pmu hw mutex */
-
-       /* TBD: post reset again? */
-
-       /* PMU_INIT message handler will send PG_INIT */
-       remain = wait_event_timeout(
-                       pmu->pg_wq,
-                       (status = (pmu->elpg_ready &&
-                               pmu->stat_dmem_offset != 0 &&
-                               pmu->elpg_stat == PMU_ELPG_STAT_OFF)),
-                       msecs_to_jiffies(gk20a_get_gr_idle_timeout(g)));
-       if (status == 0) {
-               gk20a_err(dev_from_gk20a(g),
-                       "PG_INIT_ACK failed, remaining timeout : 0x%lx", remain);
-               pmu_dump_falcon_stats(pmu);
-               return -EBUSY;
-       }
-
-       err = gr_gk20a_fecs_set_reglist_bind_inst(g, mm->pmu.inst_block.cpu_pa);
-       if (err) {
-               gk20a_err(dev_from_gk20a(g),
-                       "fail to bind pmu inst to gr");
-               return err;
-       }
-
-       err = gr_gk20a_fecs_set_reglist_virual_addr(g, pmu->pg_buf.pmu_va);
-       if (err) {
-               gk20a_err(dev_from_gk20a(g),
-                       "fail to set pg buffer pmu va");
-               return err;
-       }
-
        memset(&cmd, 0, sizeof(struct pmu_cmd));
        cmd.hdr.unit_id = PMU_UNIT_PG;
        cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_eng_buf_load);
@@ -1904,17 +1843,15 @@ int gk20a_init_pmu_setup_hw2(struct gk20a *g)
        gk20a_dbg_pmu("cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS");
        gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
                        pmu_handle_pg_buf_config_msg, pmu, &desc, ~0);
+       pmu->pmu_state = PMU_STATE_LOADING_PG_BUF;
+       return err;
+}
 
-       remain = wait_event_timeout(
-                       pmu->pg_wq,
-                       pmu->buf_loaded,
-                       msecs_to_jiffies(gk20a_get_gr_idle_timeout(g)));
-       if (!pmu->buf_loaded) {
-               gk20a_err(dev_from_gk20a(g),
-                       "PGENG FECS buffer load failed, remaining timeout : 0x%lx",
-                       remain);
-               return -EBUSY;
-       }
+static void pmu_setup_hw_load_zbc(struct gk20a *g)
+{
+       struct pmu_gk20a *pmu = &g->pmu;
+       struct pmu_cmd cmd;
+       u32 desc;
 
        memset(&cmd, 0, sizeof(struct pmu_cmd));
        cmd.hdr.unit_id = PMU_UNIT_PG;
@@ -1931,17 +1868,12 @@ int gk20a_init_pmu_setup_hw2(struct gk20a *g)
        gk20a_dbg_pmu("cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_ZBC");
        gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
                        pmu_handle_pg_buf_config_msg, pmu, &desc, ~0);
+       pmu->pmu_state = PMU_STATE_LOADING_ZBC;
+}
 
-       remain = wait_event_timeout(
-                       pmu->pg_wq,
-                       pmu->buf_loaded,
-                       msecs_to_jiffies(gk20a_get_gr_idle_timeout(g)));
-       if (!pmu->buf_loaded) {
-               gk20a_err(dev_from_gk20a(g),
-                       "PGENG ZBC buffer load failed, remaining timeout 0x%lx",
-                       remain);
-               return -EBUSY;
-       }
+static void pmu_setup_hw_enable_elpg(struct gk20a *g)
+{
+       struct pmu_gk20a *pmu = &g->pmu;
 
        /*
         * FIXME: To enable ELPG, we increase the PMU ext2priv timeout unit to
@@ -1952,17 +1884,11 @@ int gk20a_init_pmu_setup_hw2(struct gk20a *g)
        gk20a_writel(g, 0x10a164, 0x109ff);
 
        pmu->initialized = true;
-
-       /*
-        * We can't guarantee that gr code to enable ELPG will be
-        * invoked, so we explicitly call disable-enable here
-        * to enable elpg.
-        */
-       gk20a_pmu_disable_elpg(g);
+       pmu->pmu_state = PMU_STATE_STARTED;
 
        pmu->zbc_ready = true;
        /* Save zbc table after PMU is initialized. */
-       pmu_save_zbc(g, 0xf);
+       gr_gk20a_pmu_save_zbc(g, 0xf);
 
        if (g->elpg_enabled)
                gk20a_pmu_enable_elpg(g);
@@ -1975,17 +1901,7 @@ int gk20a_init_pmu_setup_hw2(struct gk20a *g)
                gk20a_aelpg_init_and_enable(g, PMU_AP_CTRL_ID_GRAPHICS);
        }
 
-       return 0;
-
- err_free_sgtable:
-       gk20a_free_sgtable(&sgt_pg_buf);
- err_free_pg_buf:
-       dma_free_coherent(d, size,
-               pmu->pg_buf.cpuva, pmu->pg_buf.iova);
-       pmu->pg_buf.cpuva = NULL;
-       pmu->pg_buf.iova = 0;
- err:
-       return err;
+       wake_up(&g->pmu.boot_wq);
 }
 
 int gk20a_init_pmu_support(struct gk20a *g)
@@ -2012,6 +1928,8 @@ int gk20a_init_pmu_support(struct gk20a *g)
                err = gk20a_init_pmu_setup_hw1(g);
                if (err)
                        return err;
+
+               pmu->pmu_state = PMU_STATE_STARTING;
        }
 
        return err;
@@ -2034,18 +1952,18 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg,
        switch (elpg_msg->msg) {
        case PMU_PG_ELPG_MSG_INIT_ACK:
                gk20a_dbg_pmu("INIT_PG is acknowledged from PMU");
-               pmu->elpg_ready = true;
-               wake_up(&pmu->pg_wq);
                break;
        case PMU_PG_ELPG_MSG_ALLOW_ACK:
                gk20a_dbg_pmu("ALLOW is acknowledged from PMU");
                pmu->elpg_stat = PMU_ELPG_STAT_ON;
-               wake_up(&pmu->pg_wq);
                break;
        case PMU_PG_ELPG_MSG_DISALLOW_ACK:
                gk20a_dbg_pmu("DISALLOW is acknowledged from PMU");
                pmu->elpg_stat = PMU_ELPG_STAT_OFF;
-               wake_up(&pmu->pg_wq);
+               if (pmu->pmu_state == PMU_STATE_ELPG_BOOTING) {
+                       pmu->pmu_state = PMU_STATE_ELPG_BOOTED;
+                       schedule_work(&pmu->pg_init);
+               }
                break;
        default:
                gk20a_err(dev_from_gk20a(g),
@@ -2072,21 +1990,22 @@ static void pmu_handle_pg_stat_msg(struct gk20a *g, struct pmu_msg *msg,
        case PMU_PG_STAT_MSG_RESP_DMEM_OFFSET:
                gk20a_dbg_pmu("ALLOC_DMEM_OFFSET is acknowledged from PMU");
                pmu->stat_dmem_offset = msg->msg.pg.stat.data;
-               wake_up(&pmu->pg_wq);
                break;
        default:
                break;
        }
 }
 
-static int pmu_init_powergating(struct pmu_gk20a *pmu)
+static int pmu_init_powergating(struct gk20a *g)
 {
-       struct gk20a *g = pmu->g;
+       struct pmu_gk20a *pmu = &g->pmu;
        struct pmu_cmd cmd;
        u32 seq;
 
        gk20a_dbg_fn("");
 
+       mutex_lock(&pmu->isr_mutex);
+
        if (tegra_cpu_is_asim()) {
                /* TBD: calculate threshold for silicon */
                gk20a_writel(g, pwr_pmu_pg_idlefilth_r(ENGINE_GR_GK20A),
@@ -2101,6 +2020,8 @@ static int pmu_init_powergating(struct pmu_gk20a *pmu)
                                PMU_PG_POST_POWERUP_IDLE_THRESHOLD);
        }
 
+       gk20a_gr_wait_initialized(g);
+
        /* init ELPG */
        memset(&cmd, 0, sizeof(struct pmu_cmd));
        cmd.hdr.unit_id = PMU_UNIT_PG;
@@ -2129,7 +2050,7 @@ static int pmu_init_powergating(struct pmu_gk20a *pmu)
 
        /* disallow ELPG initially
           PMU ucode requires a disallow cmd before allow cmd */
-       pmu->elpg_stat = PMU_ELPG_STAT_ON; /* set for wait_event PMU_ELPG_STAT_OFF */
+       pmu->elpg_stat = PMU_ELPG_STAT_OFF; /* set for wait_event PMU_ELPG_STAT_OFF */
        memset(&cmd, 0, sizeof(struct pmu_cmd));
        cmd.hdr.unit_id = PMU_UNIT_PG;
        cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_elpg_cmd);
@@ -2142,7 +2063,12 @@ static int pmu_init_powergating(struct pmu_gk20a *pmu)
                        pmu_handle_pg_elpg_msg, pmu, &seq, ~0);
 
        /* start with elpg disabled until first enable call */
-       pmu->elpg_refcnt = 1;
+       pmu->elpg_refcnt = 0;
+
+       if (pmu->pmu_state == PMU_STATE_INIT_RECEIVED)
+               pmu->pmu_state = PMU_STATE_ELPG_BOOTING;
+
+       mutex_unlock(&pmu->isr_mutex);
 
        return 0;
 }
@@ -2257,6 +2183,7 @@ static int pmu_process_init_msg(struct pmu_gk20a *pmu,
        union pmu_init_msg_pmu *init;
        struct pmu_sha1_gid_data gid_data;
        u32 i, tail = 0;
+       gk20a_dbg_pmu("init received\n");
 
        tail = pwr_pmu_msgq_tail_val_v(
                gk20a_readl(g, pwr_pmu_msgq_tail_r()));
@@ -2314,6 +2241,9 @@ static int pmu_process_init_msg(struct pmu_gk20a *pmu,
                                PMU_DMEM_ALLOC_ALIGNMENT);
 
        pmu->pmu_ready = true;
+       pmu->pmu_state = PMU_STATE_INIT_RECEIVED;
+       schedule_work(&pmu->pg_init);
+       gk20a_dbg_pmu("init received end\n");
 
        return 0;
 }
@@ -2480,7 +2410,7 @@ static void pmu_handle_zbc_msg(struct gk20a *g, struct pmu_msg *msg,
        pmu->zbc_save_done = 1;
 }
 
-static void pmu_save_zbc(struct gk20a *g, u32 entries)
+void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
 {
        struct pmu_gk20a *pmu = &g->pmu;
        struct pmu_cmd cmd;
@@ -2506,12 +2436,6 @@ static void pmu_save_zbc(struct gk20a *g, u32 entries)
                gk20a_err(dev_from_gk20a(g), "ZBC save timeout");
 }
 
-void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
-{
-       if (g->pmu.zbc_ready)
-               pmu_save_zbc(g, entries);
-}
-
 static int pmu_perfmon_start_sampling(struct pmu_gk20a *pmu)
 {
        struct gk20a *g = pmu->g;
@@ -2650,7 +2574,6 @@ static int pmu_process_message(struct pmu_gk20a *pmu)
 
        if (unlikely(!pmu->pmu_ready)) {
                pmu_process_init_msg(pmu, &msg);
-               pmu_init_powergating(pmu);
                pmu_init_perfmon(pmu);
                return 0;
        }
@@ -2902,6 +2825,12 @@ void gk20a_pmu_isr(struct gk20a *g)
 
        gk20a_dbg_fn("");
 
+       mutex_lock(&pmu->isr_enable_lock);
+       if (!pmu->isr_enabled) {
+               mutex_unlock(&pmu->isr_enable_lock);
+               return;
+       }
+
        mutex_lock(&pmu->isr_mutex);
 
        mask = gk20a_readl(g, pwr_falcon_irqmask_r()) &
@@ -2911,8 +2840,10 @@ void gk20a_pmu_isr(struct gk20a *g)
 
        gk20a_dbg_pmu("received falcon interrupt: 0x%08x", intr);
 
-       if (!intr) {
+       if (!intr || pmu->pmu_state == PMU_STATE_OFF) {
+               gk20a_writel(g, pwr_falcon_irqsclr_r(), intr);
                mutex_unlock(&pmu->isr_mutex);
+               mutex_unlock(&pmu->isr_enable_lock);
                return;
        }
 
@@ -2945,6 +2876,7 @@ void gk20a_pmu_isr(struct gk20a *g)
        }
 
        mutex_unlock(&pmu->isr_mutex);
+       mutex_unlock(&pmu->isr_enable_lock);
 }
 
 static bool pmu_validate_cmd(struct pmu_gk20a *pmu, struct pmu_cmd *cmd,
@@ -3212,9 +3144,6 @@ int gk20a_pmu_enable_elpg(struct gk20a *g)
 
        gk20a_dbg_fn("");
 
-       if (!pmu->elpg_ready || !pmu->initialized)
-               goto exit;
-
        mutex_lock(&pmu->elpg_mutex);
 
        pmu->elpg_refcnt++;
@@ -3238,46 +3167,15 @@ int gk20a_pmu_enable_elpg(struct gk20a *g)
        if (pmu->elpg_stat != PMU_ELPG_STAT_OFF)
                goto exit_unlock;
 
-       /* if ELPG is not allowed right now, mark that it should be enabled
-        * immediately after it is allowed */
-       if (!pmu->elpg_enable_allow) {
-               pmu->elpg_stat = PMU_ELPG_STAT_OFF_ON_PENDING;
-               goto exit_unlock;
-       }
-
        ret = gk20a_pmu_enable_elpg_locked(g);
 
 exit_unlock:
        mutex_unlock(&pmu->elpg_mutex);
-exit:
        gk20a_dbg_fn("done");
        return ret;
 }
 
-static void pmu_elpg_enable_allow(struct work_struct *work)
-{
-       struct pmu_gk20a *pmu = container_of(to_delayed_work(work),
-                                       struct pmu_gk20a, elpg_enable);
-
-       gk20a_dbg_fn("");
-
-       mutex_lock(&pmu->elpg_mutex);
-
-       /* It is ok to enabled powergating now */
-       pmu->elpg_enable_allow = true;
-
-       /* do we have pending requests? */
-       if (pmu->elpg_stat == PMU_ELPG_STAT_OFF_ON_PENDING) {
-               pmu->elpg_stat = PMU_ELPG_STAT_OFF;
-               gk20a_pmu_enable_elpg_locked(pmu->g);
-       }
-
-       mutex_unlock(&pmu->elpg_mutex);
-
-       gk20a_dbg_fn("done");
-}
-
-static int gk20a_pmu_disable_elpg_defer_enable(struct gk20a *g, bool enable)
+int gk20a_pmu_disable_elpg(struct gk20a *g)
 {
        struct pmu_gk20a *pmu = &g->pmu;
        struct pmu_cmd cmd;
@@ -3286,12 +3184,6 @@ static int gk20a_pmu_disable_elpg_defer_enable(struct gk20a *g, bool enable)
 
        gk20a_dbg_fn("");
 
-       if (!pmu->elpg_ready || !pmu->initialized)
-               return 0;
-
-       /* remove the work from queue */
-       cancel_delayed_work_sync(&pmu->elpg_enable);
-
        mutex_lock(&pmu->elpg_mutex);
 
        pmu->elpg_refcnt--;
@@ -3357,25 +3249,12 @@ static int gk20a_pmu_disable_elpg_defer_enable(struct gk20a *g, bool enable)
        }
 
 exit_reschedule:
-       if (enable) {
-               pmu->elpg_enable_allow = false;
-               schedule_delayed_work(&pmu->elpg_enable,
-                       msecs_to_jiffies(PMU_ELPG_ENABLE_ALLOW_DELAY_MSEC));
-       } else
-               pmu->elpg_enable_allow = true;
-
-
 exit_unlock:
        mutex_unlock(&pmu->elpg_mutex);
        gk20a_dbg_fn("done");
        return ret;
 }
 
-int gk20a_pmu_disable_elpg(struct gk20a *g)
-{
-       return gk20a_pmu_disable_elpg_defer_enable(g, true);
-}
-
 int gk20a_pmu_perfmon_enable(struct gk20a *g, bool enable)
 {
        struct pmu_gk20a *pmu = &g->pmu;
@@ -3402,13 +3281,12 @@ int gk20a_pmu_destroy(struct gk20a *g)
                return 0;
 
        /* make sure the pending operations are finished before we continue */
-       cancel_delayed_work_sync(&pmu->elpg_enable);
        cancel_work_sync(&pmu->pg_init);
 
        gk20a_pmu_get_elpg_residency_gating(g, &elpg_ingating_time,
                &elpg_ungating_time, &gating_cnt);
 
-       gk20a_pmu_disable_elpg_defer_enable(g, false);
+       gk20a_pmu_disable_elpg(g);
        pmu->initialized = false;
 
        /* update the s/w ELPG residency counters */
@@ -3416,11 +3294,15 @@ int gk20a_pmu_destroy(struct gk20a *g)
        g->pg_ungating_time_us += (u64)elpg_ungating_time;
        g->pg_gating_cnt += gating_cnt;
 
+       mutex_lock(&pmu->isr_enable_lock);
        pmu_enable(pmu, false);
+       pmu->isr_enabled = false;
+       mutex_unlock(&pmu->isr_enable_lock);
+
+       pmu->pmu_state = PMU_STATE_OFF;
        pmu->pmu_ready = false;
        pmu->perfmon_ready = false;
        pmu->zbc_ready = false;
-       pmu->elpg_ready = false;
 
        gk20a_dbg_fn("done");
        return 0;