return ret;
}
+static int nvhost_ioctl_channel_set_error_notifier(
+ struct nvhost_channel_userctx *ctx,
+ struct nvhost_set_error_notifier *args)
+{
+ int ret;
+ BUG_ON(!channel_op(ctx->ch).set_error_notifier);
+ ret = channel_op(ctx->ch).set_error_notifier(ctx->hwctx, args);
+ return ret;
+}
+
static int nvhost_ioctl_channel_submit_gpfifo(
struct nvhost_channel_userctx *ctx,
struct nvhost_submit_gpfifo_args *args)
case NVHOST_IOCTL_CHANNEL_ZCULL_BIND:
err = nvhost_ioctl_channel_zcull_bind(priv, (void *)buf);
break;
+ case NVHOST_IOCTL_CHANNEL_SET_ERROR_NOTIFIER:
+ err = nvhost_ioctl_channel_set_error_notifier(priv,
+ (void *)buf);
+ break;
#if defined(CONFIG_TEGRA_GPU_CYCLE_STATS)
case NVHOST_IOCTL_CHANNEL_CYCLE_STATS:
err = nvhost_ioctl_channel_cycle_stats(priv, (void *)buf);
}
#endif
+int gk20a_init_error_notifier(struct nvhost_hwctx *ctx,
+ u32 memhandle, u64 offset) {
+ struct channel_gk20a *ch = ctx->priv;
+ struct platform_device *dev = ch->ch->dev;
+ void *va;
+
+ struct mem_mgr *memmgr;
+ struct mem_handle *handle_ref;
+
+ if (!memhandle) {
+ pr_err("gk20a_init_error_notifier: invalid memory handle\n");
+ return -EINVAL;
+ }
+
+ memmgr = gk20a_channel_mem_mgr(ch);
+ handle_ref = nvhost_memmgr_get(memmgr, memhandle, dev);
+
+ if (ctx->error_notifier_ref)
+ gk20a_free_error_notifiers(ctx);
+
+ if (IS_ERR(handle_ref)) {
+ pr_err("Invalid handle: %d\n", memhandle);
+ return -EINVAL;
+ }
+ /* map handle */
+ va = nvhost_memmgr_mmap(handle_ref);
+ if (!va) {
+ nvhost_memmgr_put(memmgr, memhandle);
+ pr_err("Cannot map notifier handle\n");
+ return -ENOMEM;
+ }
+
+ /* set hwctx notifiers pointer */
+ ctx->error_notifier_ref = handle_ref;
+ ctx->error_notifier = va + offset;
+ ctx->error_notifier_va = va;
+ return 0;
+}
+
+void gk20a_set_timeout_error(struct nvhost_hwctx *ctx)
+{
+ ctx->has_timedout = true;
+ if (ctx->error_notifier_ref) {
+ struct timespec time_data;
+ u64 nsec;
+ getnstimeofday(&time_data);
+ nsec = ((u64)time_data.tv_sec) * 1000000000u +
+ (u64)time_data.tv_nsec;
+ ctx->error_notifier->time_stamp.nanoseconds[0] =
+ (u32)nsec;
+ ctx->error_notifier->time_stamp.nanoseconds[1] =
+ (u32)(nsec >> 32);
+ ctx->error_notifier->info32 =
+ NVHOST_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT;
+ ctx->error_notifier->status = 0xffff;
+ pr_err("Timeout notifier is set\n");
+ }
+}
+
+void gk20a_free_error_notifiers(struct nvhost_hwctx *ctx)
+{
+ if (ctx->error_notifier_ref) {
+ struct channel_gk20a *ch = ctx->priv;
+ struct mem_mgr *memmgr = gk20a_channel_mem_mgr(ch);
+ nvhost_memmgr_munmap(ctx->error_notifier_ref,
+ ctx->error_notifier_va);
+ nvhost_memmgr_put(memmgr, ctx->error_notifier_ref);
+ ctx->error_notifier_ref = 0;
+ }
+}
+
void gk20a_free_channel(struct nvhost_hwctx *ctx, bool finish)
{
struct channel_gk20a *ch = ctx->priv;
gk20a_disable_channel(ch, finish, timeout);
+ gk20a_free_error_notifiers(ctx);
+
/* release channel ctx */
gk20a_free_channel_ctx(ch);
struct nvhost_gpfifo *gpfifo, u32 num_entries,
struct nvhost_fence *fence, u32 flags);
void gk20a_free_channel(struct nvhost_hwctx *ctx, bool finish);
+int gk20a_init_error_notifier(struct nvhost_hwctx *ctx, u32 memhandle,
+ u64 offset);
+void gk20a_free_error_notifiers(struct nvhost_hwctx *ctx);
void gk20a_disable_channel(struct channel_gk20a *ch,
bool wait_for_finish,
unsigned long finish_timeout);
nvhost_dbg_fn("");
nvhost_module_busy(ctx->channel->dev);
+
if (ctx->priv)
gk20a_free_channel(ctx, true);
+
nvhost_module_idle(ctx->channel->dev);
kfree(ctx);
u32 num_entries,
struct nvhost_fence *fence,
u32 flags);
+ int (*set_error_notifier)(struct nvhost_hwctx *hwctx,
+ struct nvhost_set_error_notifier *args);
int (*wait)(struct nvhost_hwctx *,
struct nvhost_wait_args *args);
#if defined(CONFIG_TEGRA_GPU_CYCLE_STATS)
bool has_timedout;
struct mem_mgr *memmgr;
+ struct mem_handle *error_notifier_ref;
+ struct nvhost_notification *error_notifier;
+ void *error_notifier_va;
+
u32 save_incrs;
u32 save_thresh;
u32 save_slots;
return gk20a_alloc_channel_gpfifo(hwctx->priv, args);
}
+static int t124_channel_set_error_notifier(struct nvhost_hwctx *hwctx,
+ struct nvhost_set_error_notifier *args) {
+ return gk20a_init_error_notifier(hwctx, args->mem, args->offset);
+}
+
static int t124_channel_submit_gpfifo(struct nvhost_hwctx *hwctx,
struct nvhost_gpfifo *gpfifo, u32 num_entries,
struct nvhost_fence *fence, u32 flags)
ch->ops.alloc_gpfifo = t124_channel_alloc_gpfifo;
ch->ops.submit_gpfifo = t124_channel_submit_gpfifo;
ch->ops.wait = t124_channel_wait;
+ ch->ops.set_error_notifier =
+ t124_channel_set_error_notifier;
#if defined(CONFIG_TEGRA_GPU_CYCLE_STATS)
ch->ops.cycle_stats = t124_channel_cycle_stats;
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_qos.h>
+#include <linux/time.h>
struct nvhost_master;
struct nvhost_hwctx;
NVHOST_POWER_SYSFS_ATTRIB_MAX
};
+struct nvhost_notification {
+ struct { /* 0000- */
+ __u32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 */
+ } time_stamp; /* -0007 */
+ __u32 info32; /* info returned depends on method 0008-000b */
+#define NVHOST_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT 8
+ __u16 info16; /* info returned depends on method 000c-000d */
+ __u16 status; /* user sets bit 15, NV sets status 000e-000f */
+};
+
struct nvhost_clock {
char *name;
unsigned long default_rate;
__u32 padding;
};
+struct nvhost_set_error_notifier {
+ __u64 offset;
+ __u64 size;
+ __u32 mem;
+ __u32 padding;
+};
+
struct nvhost_ctrl_module_regrdwr_args {
__u32 id;
__u32 num_offsets;
_IOR(NVHOST_IOCTL_MAGIC, 109, struct nvhost_free_obj_ctx_args)
#define NVHOST_IOCTL_CHANNEL_ZCULL_BIND \
_IOWR(NVHOST_IOCTL_MAGIC, 110, struct nvhost_zcull_bind_args)
+#define NVHOST_IOCTL_CHANNEL_SET_ERROR_NOTIFIER \
+ _IOWR(NVHOST_IOCTL_MAGIC, 111, struct nvhost_set_error_notifier)
-#define NVHOST_IOCTL_CHANNEL_LAST \
- _IOC_NR(NVHOST_IOCTL_CHANNEL_ZCULL_BIND)
-
+#define NVHOST_IOCTL_CHANNEL_LAST \
+ _IOC_NR(NVHOST_IOCTL_CHANNEL_SET_ERROR_NOTIFIER)
#define NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE sizeof(struct nvhost_submit_args)
struct nvhost_ctrl_syncpt_read_args {