Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied...
Linus Torvalds [Fri, 11 Dec 2009 05:56:47 +0000 (21:56 -0800)]
* 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (189 commits)
  drm/radeon/kms: fix warning about cur_placement being uninitialised.
  drm/ttm: Print debug information on memory manager when eviction fails
  drm: Add memory manager debug function
  drm/radeon/kms: restore surface registers on resume.
  drm/radeon/kms/r600/r700: fallback gracefully on ucode failure
  drm/ttm: Initialize eviction placement in case the driver callback doesn't
  drm/radeon/kms: cleanup structure and module if initialization fails
  drm/radeon/kms: actualy set the eviction placements we choose
  drm/radeon/kms: Fix NULL ptr dereference
  drm/radeon/kms/avivo: add support for new pll selection algo
  drm/radeon/kms/avivo: fix some bugs in the display bandwidth setup
  drm/radeon/kms: fix return value from fence function.
  drm/radeon: Remove tests for -ERESTART from the TTM code.
  drm/ttm: Have the TTM code return -ERESTARTSYS instead of -ERESTART.
  drm/radeon/kms: Convert radeon to new TTM validation API (V2)
  drm/ttm: Rework validation & memory space allocation (V3)
  drm: Add search/get functions to get a block in a specific range
  drm/radeon/kms: fix avivo tiling regression since radeon object rework
  drm/i915: Remove a debugging printk from hangcheck
  drm/radeon/kms: make sure i2c id matches
  ...

1  2 
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/intel_fb.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/radeon/atombios.h
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon_fb.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/ttm/ttm_bo_util.c

@@@ -125,6 -125,15 +125,15 @@@ static struct drm_prop_enum_list drm_tv
  DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
                 drm_tv_subconnector_enum_list)
  
+ static struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
+       { DRM_MODE_DIRTY_OFF,      "Off"      },
+       { DRM_MODE_DIRTY_ON,       "On"       },
+       { DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
+ };
+ DRM_ENUM_NAME_FN(drm_get_dirty_info_name,
+                drm_dirty_info_enum_list)
  struct drm_conn_prop_enum_list {
        int type;
        char *name;
@@@ -247,7 -256,8 +256,8 @@@ static void drm_mode_object_put(struct 
        mutex_unlock(&dev->mode_config.idr_mutex);
  }
  
- void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type)
+ struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+               uint32_t id, uint32_t type)
  {
        struct drm_mode_object *obj = NULL;
  
@@@ -272,7 -282,7 +282,7 @@@ EXPORT_SYMBOL(drm_mode_object_find)
   * functions & device file and adds it to the master fd list.
   *
   * RETURNS:
 - * Zero on success, error code on falure.
 + * Zero on success, error code on failure.
   */
  int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
                         const struct drm_framebuffer_funcs *funcs)
@@@ -802,6 -812,36 +812,36 @@@ int drm_mode_create_dithering_property(
  EXPORT_SYMBOL(drm_mode_create_dithering_property);
  
  /**
+  * drm_mode_create_dirty_property - create dirty property
+  * @dev: DRM device
+  *
+  * Called by a driver the first time it's needed, must be attached to desired
+  * connectors.
+  */
+ int drm_mode_create_dirty_info_property(struct drm_device *dev)
+ {
+       struct drm_property *dirty_info;
+       int i;
+       if (dev->mode_config.dirty_info_property)
+               return 0;
+       dirty_info =
+               drm_property_create(dev, DRM_MODE_PROP_ENUM |
+                                   DRM_MODE_PROP_IMMUTABLE,
+                                   "dirty",
+                                   ARRAY_SIZE(drm_dirty_info_enum_list));
+       for (i = 0; i < ARRAY_SIZE(drm_dirty_info_enum_list); i++)
+               drm_property_add_enum(dirty_info, i,
+                                     drm_dirty_info_enum_list[i].type,
+                                     drm_dirty_info_enum_list[i].name);
+       dev->mode_config.dirty_info_property = dirty_info;
+       return 0;
+ }
+ EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
+ /**
   * drm_mode_config_init - initialize DRM mode_configuration structure
   * @dev: DRM device
   *
        return ret;
  }
  
+ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
+                          void *data, struct drm_file *file_priv)
+ {
+       struct drm_clip_rect __user *clips_ptr;
+       struct drm_clip_rect *clips = NULL;
+       struct drm_mode_fb_dirty_cmd *r = data;
+       struct drm_mode_object *obj;
+       struct drm_framebuffer *fb;
+       unsigned flags;
+       int num_clips;
+       int ret = 0;
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
+       if (!obj) {
+               DRM_ERROR("invalid framebuffer id\n");
+               ret = -EINVAL;
+               goto out_err1;
+       }
+       fb = obj_to_fb(obj);
+       num_clips = r->num_clips;
+       clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
+       if (!num_clips != !clips_ptr) {
+               ret = -EINVAL;
+               goto out_err1;
+       }
+       flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags;
+       /* If userspace annotates copy, clips must come in pairs */
+       if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) {
+               ret = -EINVAL;
+               goto out_err1;
+       }
+       if (num_clips && clips_ptr) {
+               clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+               if (!clips) {
+                       ret = -ENOMEM;
+                       goto out_err1;
+               }
+               ret = copy_from_user(clips, clips_ptr,
+                                    num_clips * sizeof(*clips));
+               if (ret)
+                       goto out_err2;
+       }
+       if (fb->funcs->dirty) {
+               ret = fb->funcs->dirty(fb, flags, r->color, clips, num_clips);
+       } else {
+               ret = -ENOSYS;
+               goto out_err2;
+       }
+ out_err2:
+       kfree(clips);
+ out_err1:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+ }
  /**
   * drm_fb_release - remove and free the FBs on this file
   * @filp: file * from the ioctl
@@@ -2328,7 -2433,7 +2433,7 @@@ int drm_mode_connector_property_set_ioc
        } else if (connector->funcs->set_property)
                ret = connector->funcs->set_property(connector, property, out_resp->value);
  
 -      /* store the property value if succesful */
 +      /* store the property value if successful */
        if (!ret)
                drm_connector_property_set_value(connector, property, out_resp->value);
  out:
        mutex_unlock(&dev->mode_config.mutex);
        return ret;
  }
+ int drm_mode_page_flip_ioctl(struct drm_device *dev,
+                            void *data, struct drm_file *file_priv)
+ {
+       struct drm_mode_crtc_page_flip *page_flip = data;
+       struct drm_mode_object *obj;
+       struct drm_crtc *crtc;
+       struct drm_framebuffer *fb;
+       struct drm_pending_vblank_event *e = NULL;
+       unsigned long flags;
+       int ret = -EINVAL;
+       if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
+           page_flip->reserved != 0)
+               return -EINVAL;
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
+       if (!obj)
+               goto out;
+       crtc = obj_to_crtc(obj);
+       if (crtc->funcs->page_flip == NULL)
+               goto out;
+       obj = drm_mode_object_find(dev, page_flip->fb_id, DRM_MODE_OBJECT_FB);
+       if (!obj)
+               goto out;
+       fb = obj_to_fb(obj);
+       if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+               ret = -ENOMEM;
+               spin_lock_irqsave(&dev->event_lock, flags);
+               if (file_priv->event_space < sizeof e->event) {
+                       spin_unlock_irqrestore(&dev->event_lock, flags);
+                       goto out;
+               }
+               file_priv->event_space -= sizeof e->event;
+               spin_unlock_irqrestore(&dev->event_lock, flags);
+               e = kzalloc(sizeof *e, GFP_KERNEL);
+               if (e == NULL) {
+                       spin_lock_irqsave(&dev->event_lock, flags);
+                       file_priv->event_space += sizeof e->event;
+                       spin_unlock_irqrestore(&dev->event_lock, flags);
+                       goto out;
+               }
+               e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
+               e->event.base.length = sizeof e->event;
+               e->event.user_data = page_flip->user_data;
+               e->base.event = &e->event.base;
+               e->base.file_priv = file_priv;
+               e->base.destroy =
+                       (void (*) (struct drm_pending_event *)) kfree;
+       }
+       ret = crtc->funcs->page_flip(crtc, fb, e);
+       if (ret) {
+               spin_lock_irqsave(&dev->event_lock, flags);
+               file_priv->event_space += sizeof e->event;
+               spin_unlock_irqrestore(&dev->event_lock, flags);
+               kfree(e);
+       }
+ out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+ }
@@@ -1288,6 -1288,7 +1288,7 @@@ i915_gem_create_mmap_offset(struct drm_
        list->hash.key = list->file_offset_node->start;
        if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
                DRM_ERROR("failed to add to map hash\n");
+               ret = -ENOMEM;
                goto out_free_mm;
        }
  
@@@ -1309,7 -1310,7 +1310,7 @@@ out_free_list
   * i915_gem_release_mmap - remove physical page mappings
   * @obj: obj in question
   *
 - * Preserve the reservation of the mmaping with the DRM core code, but
 + * Preserve the reservation of the mmapping with the DRM core code, but
   * relinquish ownership of the pages back to the system.
   *
   * It is vital that we remove the page mapping if we have mapped a tiled
@@@ -1583,7 -1584,7 +1584,7 @@@ i915_gem_object_move_to_inactive(struc
   *
   * Returned sequence numbers are nonzero on success.
   */
- static uint32_t
+ uint32_t
  i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
                 uint32_t flush_domains)
  {
        OUT_RING(MI_USER_INTERRUPT);
        ADVANCE_LP_RING();
  
-       DRM_DEBUG("%d\n", seqno);
+       DRM_DEBUG_DRIVER("%d\n", seqno);
  
        request->seqno = seqno;
        request->emitted_jiffies = jiffies;
@@@ -1820,12 -1821,8 +1821,8 @@@ i915_gem_retire_work_handler(struct wor
        mutex_unlock(&dev->struct_mutex);
  }
  
- /**
-  * Waits for a sequence number to be signaled, and cleans up the
-  * request and object lists appropriately for that event.
-  */
- static int
- i915_wait_request(struct drm_device *dev, uint32_t seqno)
+ int
+ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
        u32 ier;
                return -EIO;
  
        if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
-               if (IS_IGDNG(dev))
+               if (IS_IRONLAKE(dev))
                        ier = I915_READ(DEIER) | I915_READ(GTIER);
                else
                        ier = I915_READ(IER);
  
                dev_priv->mm.waiting_gem_seqno = seqno;
                i915_user_irq_get(dev);
-               ret = wait_event_interruptible(dev_priv->irq_queue,
-                                              i915_seqno_passed(i915_get_gem_seqno(dev),
-                                                                seqno) ||
-                                              atomic_read(&dev_priv->mm.wedged));
+               if (interruptible)
+                       ret = wait_event_interruptible(dev_priv->irq_queue,
+                               i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
+                               atomic_read(&dev_priv->mm.wedged));
+               else
+                       wait_event(dev_priv->irq_queue,
+                               i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
+                               atomic_read(&dev_priv->mm.wedged));
                i915_user_irq_put(dev);
                dev_priv->mm.waiting_gem_seqno = 0;
  
        return ret;
  }
  
+ /**
+  * Waits for a sequence number to be signaled, and cleans up the
+  * request and object lists appropriately for that event.
+  */
+ static int
+ i915_wait_request(struct drm_device *dev, uint32_t seqno)
+ {
+       return i915_do_wait_request(dev, seqno, 1);
+ }
  static void
  i915_gem_flush(struct drm_device *dev,
               uint32_t invalidate_domains,
  #endif
                BEGIN_LP_RING(2);
                OUT_RING(cmd);
-               OUT_RING(0); /* noop */
+               OUT_RING(MI_NOOP);
                ADVANCE_LP_RING();
        }
  }
@@@ -2760,6 -2772,22 +2772,22 @@@ i915_gem_object_flush_cpu_write_domain(
                                            old_write_domain);
  }
  
+ void
+ i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
+ {
+       switch (obj->write_domain) {
+       case I915_GEM_DOMAIN_GTT:
+               i915_gem_object_flush_gtt_write_domain(obj);
+               break;
+       case I915_GEM_DOMAIN_CPU:
+               i915_gem_object_flush_cpu_write_domain(obj);
+               break;
+       default:
+               i915_gem_object_flush_gpu_write_domain(obj);
+               break;
+       }
+ }
  /**
   * Moves a single object to the GTT read, and possibly write domain.
   *
@@@ -3525,6 -3553,41 +3553,41 @@@ i915_gem_check_execbuffer (struct drm_i
        return 0;
  }
  
+ static int
+ i915_gem_wait_for_pending_flip(struct drm_device *dev,
+                              struct drm_gem_object **object_list,
+                              int count)
+ {
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv;
+       DEFINE_WAIT(wait);
+       int i, ret = 0;
+       for (;;) {
+               prepare_to_wait(&dev_priv->pending_flip_queue,
+                               &wait, TASK_INTERRUPTIBLE);
+               for (i = 0; i < count; i++) {
+                       obj_priv = object_list[i]->driver_private;
+                       if (atomic_read(&obj_priv->pending_flip) > 0)
+                               break;
+               }
+               if (i == count)
+                       break;
+               if (!signal_pending(current)) {
+                       mutex_unlock(&dev->struct_mutex);
+                       schedule();
+                       mutex_lock(&dev->struct_mutex);
+                       continue;
+               }
+               ret = -ERESTARTSYS;
+               break;
+       }
+       finish_wait(&dev_priv->pending_flip_queue, &wait);
+       return ret;
+ }
  int
  i915_gem_execbuffer(struct drm_device *dev, void *data,
                    struct drm_file *file_priv)
        int ret, ret2, i, pinned = 0;
        uint64_t exec_offset;
        uint32_t seqno, flush_domains, reloc_index;
-       int pin_tries;
+       int pin_tries, flips;
  
  #if WATCH_EXEC
        DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
                return -EINVAL;
        }
        /* Copy in the exec list from userland */
-       exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count);
-       object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count);
+       exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
+       object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
        if (exec_list == NULL || object_list == NULL) {
                DRM_ERROR("Failed to allocate exec or object list "
                          "for %d buffers\n",
        i915_verify_inactive(dev, __FILE__, __LINE__);
  
        if (atomic_read(&dev_priv->mm.wedged)) {
-               DRM_ERROR("Execbuf while wedged\n");
                mutex_unlock(&dev->struct_mutex);
                ret = -EIO;
                goto pre_mutex_err;
        }
  
        if (dev_priv->mm.suspended) {
-               DRM_ERROR("Execbuf while VT-switched.\n");
                mutex_unlock(&dev->struct_mutex);
                ret = -EBUSY;
                goto pre_mutex_err;
        }
  
        /* Look up object handles */
+       flips = 0;
        for (i = 0; i < args->buffer_count; i++) {
                object_list[i] = drm_gem_object_lookup(dev, file_priv,
                                                       exec_list[i].handle);
                        goto err;
                }
                obj_priv->in_execbuffer = true;
+               flips += atomic_read(&obj_priv->pending_flip);
+       }
+       if (flips > 0) {
+               ret = i915_gem_wait_for_pending_flip(dev, object_list,
+                                                    args->buffer_count);
+               if (ret)
+                       goto err;
        }
  
        /* Pin and relocate */
@@@ -4356,7 -4426,7 +4426,7 @@@ i915_gem_init_hws(struct drm_device *de
        memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
        I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
        I915_READ(HWS_PGA); /* posting read */
-       DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
+       DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
  
        return 0;
  }
@@@ -4614,8 -4684,8 +4684,8 @@@ i915_gem_load(struct drm_device *dev
                        for (i = 0; i < 8; i++)
                                I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
        }
        i915_gem_detect_bit_6_swizzle(dev);
+       init_waitqueue_head(&dev_priv->pending_flip_queue);
  }
  
  /*
@@@ -4790,7 -4860,7 +4860,7 @@@ i915_gem_phys_pwrite(struct drm_device 
        user_data = (char __user *) (uintptr_t) args->data_ptr;
        obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
  
-       DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size);
+       DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
        ret = copy_from_user(obj_addr, user_data, args->size);
        if (ret)
                return -EFAULT;
@@@ -70,7 -70,7 +70,7 @@@ static struct drm_fb_helper_funcs intel
  
  
  /**
 - * Curretly it is assumed that the old framebuffer is reused.
 + * Currently it is assumed that the old framebuffer is reused.
   *
   * LOCKING
   * caller should hold the mode config lock.
@@@ -230,8 -230,9 +230,9 @@@ static int intelfb_create(struct drm_de
        par->intel_fb = intel_fb;
  
        /* To allow resizeing without swapping buffers */
-       DRM_DEBUG("allocated %dx%d fb: 0x%08x, bo %p\n", intel_fb->base.width,
-                 intel_fb->base.height, obj_priv->gtt_offset, fbo);
+       DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
+                       intel_fb->base.width, intel_fb->base.height,
+                       obj_priv->gtt_offset, fbo);
  
        mutex_unlock(&dev->struct_mutex);
        return 0;
@@@ -249,7 -250,7 +250,7 @@@ int intelfb_probe(struct drm_device *de
  {
        int ret;
  
-       DRM_DEBUG("\n");
+       DRM_DEBUG_KMS("\n");
        ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create);
        return ret;
  }
@@@ -56,7 -56,7 +56,7 @@@ static void intel_lvds_set_backlight(st
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 blc_pwm_ctl, reg;
  
-       if (IS_IGDNG(dev))
+       if (IS_IRONLAKE(dev))
                reg = BLC_PWM_CPU_CTL;
        else
                reg = BLC_PWM_CTL;
@@@ -74,7 -74,7 +74,7 @@@ static u32 intel_lvds_get_max_backlight
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 reg;
  
-       if (IS_IGDNG(dev))
+       if (IS_IRONLAKE(dev))
                reg = BLC_PWM_PCH_CTL2;
        else
                reg = BLC_PWM_CTL;
@@@ -91,7 -91,7 +91,7 @@@ static void intel_lvds_set_power(struc
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 pp_status, ctl_reg, status_reg;
  
-       if (IS_IGDNG(dev)) {
+       if (IS_IRONLAKE(dev)) {
                ctl_reg = PCH_PP_CONTROL;
                status_reg = PCH_PP_STATUS;
        } else {
@@@ -137,7 -137,7 +137,7 @@@ static void intel_lvds_save(struct drm_
        u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
        u32 pwm_ctl_reg;
  
-       if (IS_IGDNG(dev)) {
+       if (IS_IRONLAKE(dev)) {
                pp_on_reg = PCH_PP_ON_DELAYS;
                pp_off_reg = PCH_PP_OFF_DELAYS;
                pp_ctl_reg = PCH_PP_CONTROL;
@@@ -174,7 -174,7 +174,7 @@@ static void intel_lvds_restore(struct d
        u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
        u32 pwm_ctl_reg;
  
-       if (IS_IGDNG(dev)) {
+       if (IS_IRONLAKE(dev)) {
                pp_on_reg = PCH_PP_ON_DELAYS;
                pp_off_reg = PCH_PP_OFF_DELAYS;
                pp_ctl_reg = PCH_PP_CONTROL;
@@@ -297,7 -297,7 +297,7 @@@ static bool intel_lvds_mode_fixup(struc
        }
  
        /* full screen scale for now */
-       if (IS_IGDNG(dev))
+       if (IS_IRONLAKE(dev))
                goto out;
  
        /* 965+ wants fuzzy fitting */
         * to register description and PRM.
         * Change the value here to see the borders for debugging
         */
-       if (!IS_IGDNG(dev)) {
+       if (!IS_IRONLAKE(dev)) {
                I915_WRITE(BCLRPAT_A, 0);
                I915_WRITE(BCLRPAT_B, 0);
        }
@@@ -548,7 -548,7 +548,7 @@@ static void intel_lvds_prepare(struct d
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 reg;
  
-       if (IS_IGDNG(dev))
+       if (IS_IRONLAKE(dev))
                reg = BLC_PWM_CPU_CTL;
        else
                reg = BLC_PWM_CTL;
@@@ -587,7 -587,7 +587,7 @@@ static void intel_lvds_mode_set(struct 
         * settings.
         */
  
-       if (IS_IGDNG(dev))
+       if (IS_IRONLAKE(dev))
                return;
  
        /*
@@@ -899,7 -899,7 +899,7 @@@ static int intel_lid_present(void
  
        acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
                                ACPI_UINT32_MAX,
 -                              check_lid_device, &lid_present, NULL);
 +                              check_lid_device, NULL, &lid_present, NULL);
  
        return lid_present;
  }
@@@ -914,6 -914,101 +914,101 @@@ static int intel_lid_present(void
  #endif
  
  /**
+  * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID
+  * @dev: drm device
+  * @connector: LVDS connector
+  *
+  * Find the reduced downclock for LVDS in EDID.
+  */
+ static void intel_find_lvds_downclock(struct drm_device *dev,
+                               struct drm_connector *connector)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_display_mode *scan, *panel_fixed_mode;
+       int temp_downclock;
+       panel_fixed_mode = dev_priv->panel_fixed_mode;
+       temp_downclock = panel_fixed_mode->clock;
+       mutex_lock(&dev->mode_config.mutex);
+       list_for_each_entry(scan, &connector->probed_modes, head) {
+               /*
+                * If one mode has the same resolution with the fixed_panel
+                * mode while they have the different refresh rate, it means
+                * that the reduced downclock is found for the LVDS. In such
+                * case we can set the different FPx0/1 to dynamically select
+                * between low and high frequency.
+                */
+               if (scan->hdisplay == panel_fixed_mode->hdisplay &&
+                       scan->hsync_start == panel_fixed_mode->hsync_start &&
+                       scan->hsync_end == panel_fixed_mode->hsync_end &&
+                       scan->htotal == panel_fixed_mode->htotal &&
+                       scan->vdisplay == panel_fixed_mode->vdisplay &&
+                       scan->vsync_start == panel_fixed_mode->vsync_start &&
+                       scan->vsync_end == panel_fixed_mode->vsync_end &&
+                       scan->vtotal == panel_fixed_mode->vtotal) {
+                       if (scan->clock < temp_downclock) {
+                               /*
+                                * The downclock is already found. But we
+                                * expect to find the lower downclock.
+                                */
+                               temp_downclock = scan->clock;
+                       }
+               }
+       }
+       mutex_unlock(&dev->mode_config.mutex);
+       if (temp_downclock < panel_fixed_mode->clock) {
+               /* We found the downclock for LVDS. */
+               dev_priv->lvds_downclock_avail = 1;
+               dev_priv->lvds_downclock = temp_downclock;
+               DRM_DEBUG_KMS("LVDS downclock is found in EDID. "
+                               "Normal clock %dKhz, downclock %dKhz\n",
+                               panel_fixed_mode->clock, temp_downclock);
+       }
+       return;
+ }
+ /*
+  * Enumerate the child dev array parsed from VBT to check whether
+  * the LVDS is present.
+  * If it is present, return 1.
+  * If it is not present, return false.
+  * If no child dev is parsed from VBT, it assumes that the LVDS is present.
+  * Note: The addin_offset should also be checked for LVDS panel.
+  * Only when it is non-zero, it is assumed that it is present.
+  */
+ static int lvds_is_present_in_vbt(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct child_device_config *p_child;
+       int i, ret;
+       if (!dev_priv->child_dev_num)
+               return 1;
+       ret = 0;
+       for (i = 0; i < dev_priv->child_dev_num; i++) {
+               p_child = dev_priv->child_dev + i;
+               /*
+                * If the device type is not LFP, continue.
+                * If the device type is 0x22, it is also regarded as LFP.
+                */
+               if (p_child->device_type != DEVICE_TYPE_INT_LFP &&
+                       p_child->device_type != DEVICE_TYPE_LFP)
+                       continue;
+               /* The addin_offset should be checked. Only when it is
+                * non-zero, it is regarded as present.
+                */
+               if (p_child->addin_offset) {
+                       ret = 1;
+                       break;
+               }
+       }
+       return ret;
+ }
+ /**
   * intel_lvds_init - setup LVDS connectors on this device
   * @dev: drm device
   *
@@@ -936,21 -1031,20 +1031,20 @@@ void intel_lvds_init(struct drm_device 
        if (dmi_check_system(intel_no_lvds))
                return;
  
-       /* Assume that any device without an ACPI LID device also doesn't
-        * have an integrated LVDS.  We would be better off parsing the BIOS
-        * to get a reliable indicator, but that code isn't written yet.
-        *
-        * In the case of all-in-one desktops using LVDS that we've seen,
-        * they're using SDVO LVDS.
+       /*
+        * Assume LVDS is present if there's an ACPI lid device or if the
+        * device is present in the VBT.
         */
-       if (!intel_lid_present())
+       if (!lvds_is_present_in_vbt(dev) && !intel_lid_present()) {
+               DRM_DEBUG_KMS("LVDS is not present in VBT and no lid detected\n");
                return;
+       }
  
-       if (IS_IGDNG(dev)) {
+       if (IS_IRONLAKE(dev)) {
                if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
                        return;
                if (dev_priv->edp_support) {
-                       DRM_DEBUG("disable LVDS for eDP support\n");
+                       DRM_DEBUG_KMS("disable LVDS for eDP support\n");
                        return;
                }
                gpio = PCH_GPIOC;
                        dev_priv->panel_fixed_mode =
                                drm_mode_duplicate(dev, scan);
                        mutex_unlock(&dev->mode_config.mutex);
+                       intel_find_lvds_downclock(dev, connector);
                        goto out;
                }
                mutex_unlock(&dev->mode_config.mutex);
         * correct mode.
         */
  
-       /* IGDNG: FIXME if still fail, not try pipe mode now */
-       if (IS_IGDNG(dev))
+       /* Ironlake: FIXME if still fail, not try pipe mode now */
+       if (IS_IRONLAKE(dev))
                goto failed;
  
        lvds = I915_READ(LVDS);
                goto failed;
  
  out:
-       if (IS_IGDNG(dev)) {
+       if (IS_IRONLAKE(dev)) {
                u32 pwm;
                /* make sure PWM is enabled */
                pwm = I915_READ(BLC_PWM_CPU_CTL2);
        }
        dev_priv->lid_notifier.notifier_call = intel_lid_notify;
        if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) {
-               DRM_DEBUG("lid notifier registration failed\n");
+               DRM_DEBUG_KMS("lid notifier registration failed\n");
                dev_priv->lid_notifier.notifier_call = NULL;
        }
        drm_sysfs_connector_add(connector);
@@@ -1093,5 -1188,6 +1188,6 @@@ failed
        if (intel_output->ddc_bus)
                intel_i2c_destroy(intel_output->ddc_bus);
        drm_connector_cleanup(connector);
+       drm_encoder_cleanup(encoder);
        kfree(intel_output);
  }
@@@ -36,8 -36,6 +36,6 @@@
  #include "i915_drv.h"
  #include "intel_sdvo_regs.h"
  
- #undef SDVO_DEBUG
  static char *tv_format_names[] = {
        "NTSC_M"   , "NTSC_J"  , "NTSC_443",
        "PAL_B"    , "PAL_D"   , "PAL_G"   ,
@@@ -356,7 -354,6 +354,6 @@@ static const struct _sdvo_cmd_name 
  #define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC")
  #define SDVO_PRIV(output)   ((struct intel_sdvo_priv *) (output)->dev_priv)
  
- #ifdef SDVO_DEBUG
  static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd,
                                   void *args, int args_len)
  {
                DRM_LOG_KMS("(%02X)", cmd);
        DRM_LOG_KMS("\n");
  }
- #else
- #define intel_sdvo_debug_write(o, c, a, l)
- #endif
  
  static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd,
                                 void *args, int args_len)
        intel_sdvo_write_byte(intel_output, SDVO_I2C_OPCODE, cmd);
  }
  
- #ifdef SDVO_DEBUG
  static const char *cmd_status_names[] = {
        "Power on",
        "Success",
@@@ -427,9 -420,6 +420,6 @@@ static void intel_sdvo_debug_response(s
                DRM_LOG_KMS("(??? %d)", status);
        DRM_LOG_KMS("\n");
  }
- #else
- #define intel_sdvo_debug_response(o, r, l, s)
- #endif
  
  static u8 intel_sdvo_read_response(struct intel_output *intel_output,
                                   void *response, int response_len)
@@@ -1627,6 -1617,10 +1617,10 @@@ static enum drm_connector_status intel_
  
        intel_sdvo_write_cmd(intel_output,
                             SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
+       if (sdvo_priv->is_tv) {
+               /* add 30ms delay when the output type is SDVO-TV */
+               mdelay(30);
+       }
        status = intel_sdvo_read_response(intel_output, &response, 2);
  
        DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8);
@@@ -2726,7 -2720,7 +2720,7 @@@ bool intel_sdvo_init(struct drm_device 
        /* Wrap with our custom algo which switches to DDC mode */
        intel_output->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
  
 -      /* In defaut case sdvo lvds is false */
 +      /* In default case sdvo lvds is false */
        intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps);
  
        if (intel_sdvo_output_setup(intel_output,
@@@ -1141,7 -1141,7 +1141,7 @@@ typedef struct _LVDS_ENCODER_CONTROL_PA
  /* ucTableFormatRevision=1,ucTableContentRevision=2 */
  typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2 {
        USHORT usPixelClock;    /*  in 10KHz; for bios convenient */
 -      UCHAR ucMisc;           /*  see PANEL_ENCODER_MISC_xx defintions below */
 +      UCHAR ucMisc;           /*  see PANEL_ENCODER_MISC_xx definitions below */
        UCHAR ucAction;         /*  0: turn off encoder */
        /*  1: setup and turn on encoder */
        UCHAR ucTruncate;       /*  bit0=0: Disable truncate */
@@@ -1424,7 -1424,7 +1424,7 @@@ typedef struct _ATOM_MULTIMEDIA_CONFIG_
  /*  Structures used in FirmwareInfoTable */
  /****************************************************************************/
  
 -/*  usBIOSCapability Defintion: */
 +/*  usBIOSCapability Definition: */
  /*  Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted; */
  /*  Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported; */
  /*  Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported; */
@@@ -2386,7 -2386,7 +2386,7 @@@ typedef struct _ATOM_ANALOG_TV_INFO_V1_
  } ATOM_ANALOG_TV_INFO_V1_2;
  
  /**************************************************************************/
 -/*  VRAM usage and their defintions */
 +/*  VRAM usage and their definitions */
  
  /*  One chunk of VRAM used by Bios are for HWICON surfaces,EDID data. */
  /*  Current Mode timing and Dail Timing and/or STD timing data EACH device. They can be broken down as below. */
@@@ -2680,7 -2680,7 +2680,7 @@@ typedef struct _ATOM_I2C_RECORD 
  typedef struct _ATOM_HPD_INT_RECORD {
        ATOM_COMMON_RECORD_HEADER sheader;
        UCHAR ucHPDIntGPIOID;   /* Corresponding block in GPIO_PIN_INFO table gives the pin info */
-       UCHAR ucPluggged_PinState;
+       UCHAR ucPlugged_PinState;
  } ATOM_HPD_INT_RECORD;
  
  typedef struct _ATOM_OUTPUT_PROTECTION_RECORD {
@@@ -3046,7 -3046,7 +3046,7 @@@ typedef struct _ATOM_ASIC_INTERNAL_SS_I
  #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC     2
  #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3
  
 -/* Byte aligned defintion for BIOS usage */
 +/* Byte aligned definition for BIOS usage */
  #define ATOM_S0_CRT1_MONOb0             0x01
  #define ATOM_S0_CRT1_COLORb0            0x02
  #define ATOM_S0_CRT1_MASKb0             (ATOM_S0_CRT1_MONOb0+ATOM_S0_CRT1_COLORb0)
  #define ATOM_S2_DISPLAY_ROTATION_DEGREE_SHIFT 30
  #define ATOM_S2_DISPLAY_ROTATION_ANGLE_MASK   0xC0000000L
  
 -/* Byte aligned defintion for BIOS usage */
 +/* Byte aligned definition for BIOS usage */
  #define ATOM_S2_TV1_STANDARD_MASKb0     0x0F
  #define ATOM_S2_CURRENT_BL_LEVEL_MASKb1 0xFF
  #define ATOM_S2_CRT1_DPMS_STATEb2       0x01
  #define ATOM_S3_ALLOW_FAST_PWR_SWITCH   0x40000000L
  #define ATOM_S3_RQST_GPU_USE_MIN_PWR    0x80000000L
  
 -/* Byte aligned defintion for BIOS usage */
 +/* Byte aligned definition for BIOS usage */
  #define ATOM_S3_CRT1_ACTIVEb0           0x01
  #define ATOM_S3_LCD1_ACTIVEb0           0x02
  #define ATOM_S3_TV1_ACTIVEb0            0x04
  #define ATOM_S4_LCD1_REFRESH_MASK       0x0000FF00L
  #define ATOM_S4_LCD1_REFRESH_SHIFT      8
  
 -/* Byte aligned defintion for BIOS usage */
 +/* Byte aligned definition for BIOS usage */
  #define ATOM_S4_LCD1_PANEL_ID_MASKb0    0x0FF
  #define ATOM_S4_LCD1_REFRESH_MASKb1             ATOM_S4_LCD1_PANEL_ID_MASKb0
  #define ATOM_S4_VRAM_INFO_MASKb2        ATOM_S4_LCD1_PANEL_ID_MASKb0
  #define ATOM_S6_VRI_BRIGHTNESS_CHANGE       0x40000000L
  #define ATOM_S6_CONFIG_DISPLAY_CHANGE_MASK  0x80000000L
  
 -/* Byte aligned defintion for BIOS usage */
 +/* Byte aligned definition for BIOS usage */
  #define ATOM_S6_DEVICE_CHANGEb0         0x01
  #define ATOM_S6_SCALER_CHANGEb0         0x02
  #define ATOM_S6_LID_CHANGEb0            0x04
  
  #define PFP_UCODE_SIZE 576
  #define PM4_UCODE_SIZE 1792
+ #define RLC_UCODE_SIZE 768
  #define R700_PFP_UCODE_SIZE 848
  #define R700_PM4_UCODE_SIZE 1360
+ #define R700_RLC_UCODE_SIZE 1024
  
  /* Firmware Names */
  MODULE_FIRMWARE("radeon/R600_pfp.bin");
@@@ -62,6 -64,8 +64,8 @@@ MODULE_FIRMWARE("radeon/RV730_pfp.bin")
  MODULE_FIRMWARE("radeon/RV730_me.bin");
  MODULE_FIRMWARE("radeon/RV710_pfp.bin");
  MODULE_FIRMWARE("radeon/RV710_me.bin");
+ MODULE_FIRMWARE("radeon/R600_rlc.bin");
+ MODULE_FIRMWARE("radeon/R700_rlc.bin");
  
  int r600_debugfs_mc_info_init(struct radeon_device *rdev);
  
@@@ -70,6 -74,281 +74,281 @@@ int r600_mc_wait_for_idle(struct radeon
  void r600_gpu_init(struct radeon_device *rdev);
  void r600_fini(struct radeon_device *rdev);
  
+ /* hpd for digital panel detect/disconnect */
+ bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
+ {
+       bool connected = false;
+       if (ASIC_IS_DCE3(rdev)) {
+               switch (hpd) {
+               case RADEON_HPD_1:
+                       if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
+                               connected = true;
+                       break;
+               case RADEON_HPD_2:
+                       if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
+                               connected = true;
+                       break;
+               case RADEON_HPD_3:
+                       if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
+                               connected = true;
+                       break;
+               case RADEON_HPD_4:
+                       if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
+                               connected = true;
+                       break;
+                       /* DCE 3.2 */
+               case RADEON_HPD_5:
+                       if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
+                               connected = true;
+                       break;
+               case RADEON_HPD_6:
+                       if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
+                               connected = true;
+                       break;
+               default:
+                       break;
+               }
+       } else {
+               switch (hpd) {
+               case RADEON_HPD_1:
+                       if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
+                               connected = true;
+                       break;
+               case RADEON_HPD_2:
+                       if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
+                               connected = true;
+                       break;
+               case RADEON_HPD_3:
+                       if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
+                               connected = true;
+                       break;
+               default:
+                       break;
+               }
+       }
+       return connected;
+ }
+ void r600_hpd_set_polarity(struct radeon_device *rdev,
+                          enum radeon_hpd_id hpd)
+ {
+       u32 tmp;
+       bool connected = r600_hpd_sense(rdev, hpd);
+       if (ASIC_IS_DCE3(rdev)) {
+               switch (hpd) {
+               case RADEON_HPD_1:
+                       tmp = RREG32(DC_HPD1_INT_CONTROL);
+                       if (connected)
+                               tmp &= ~DC_HPDx_INT_POLARITY;
+                       else
+                               tmp |= DC_HPDx_INT_POLARITY;
+                       WREG32(DC_HPD1_INT_CONTROL, tmp);
+                       break;
+               case RADEON_HPD_2:
+                       tmp = RREG32(DC_HPD2_INT_CONTROL);
+                       if (connected)
+                               tmp &= ~DC_HPDx_INT_POLARITY;
+                       else
+                               tmp |= DC_HPDx_INT_POLARITY;
+                       WREG32(DC_HPD2_INT_CONTROL, tmp);
+                       break;
+               case RADEON_HPD_3:
+                       tmp = RREG32(DC_HPD3_INT_CONTROL);
+                       if (connected)
+                               tmp &= ~DC_HPDx_INT_POLARITY;
+                       else
+                               tmp |= DC_HPDx_INT_POLARITY;
+                       WREG32(DC_HPD3_INT_CONTROL, tmp);
+                       break;
+               case RADEON_HPD_4:
+                       tmp = RREG32(DC_HPD4_INT_CONTROL);
+                       if (connected)
+                               tmp &= ~DC_HPDx_INT_POLARITY;
+                       else
+                               tmp |= DC_HPDx_INT_POLARITY;
+                       WREG32(DC_HPD4_INT_CONTROL, tmp);
+                       break;
+               case RADEON_HPD_5:
+                       tmp = RREG32(DC_HPD5_INT_CONTROL);
+                       if (connected)
+                               tmp &= ~DC_HPDx_INT_POLARITY;
+                       else
+                               tmp |= DC_HPDx_INT_POLARITY;
+                       WREG32(DC_HPD5_INT_CONTROL, tmp);
+                       break;
+                       /* DCE 3.2 */
+               case RADEON_HPD_6:
+                       tmp = RREG32(DC_HPD6_INT_CONTROL);
+                       if (connected)
+                               tmp &= ~DC_HPDx_INT_POLARITY;
+                       else
+                               tmp |= DC_HPDx_INT_POLARITY;
+                       WREG32(DC_HPD6_INT_CONTROL, tmp);
+                       break;
+               default:
+                       break;
+               }
+       } else {
+               switch (hpd) {
+               case RADEON_HPD_1:
+                       tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
+                       if (connected)
+                               tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
+                       else
+                               tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
+                       WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+                       break;
+               case RADEON_HPD_2:
+                       tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
+                       if (connected)
+                               tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
+                       else
+                               tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
+                       WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+                       break;
+               case RADEON_HPD_3:
+                       tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
+                       if (connected)
+                               tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
+                       else
+                               tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
+                       WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
+                       break;
+               default:
+                       break;
+               }
+       }
+ }
+ void r600_hpd_init(struct radeon_device *rdev)
+ {
+       struct drm_device *dev = rdev->ddev;
+       struct drm_connector *connector;
+       if (ASIC_IS_DCE3(rdev)) {
+               u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
+               if (ASIC_IS_DCE32(rdev))
+                       tmp |= DC_HPDx_EN;
+               list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+                       struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+                       switch (radeon_connector->hpd.hpd) {
+                       case RADEON_HPD_1:
+                               WREG32(DC_HPD1_CONTROL, tmp);
+                               rdev->irq.hpd[0] = true;
+                               break;
+                       case RADEON_HPD_2:
+                               WREG32(DC_HPD2_CONTROL, tmp);
+                               rdev->irq.hpd[1] = true;
+                               break;
+                       case RADEON_HPD_3:
+                               WREG32(DC_HPD3_CONTROL, tmp);
+                               rdev->irq.hpd[2] = true;
+                               break;
+                       case RADEON_HPD_4:
+                               WREG32(DC_HPD4_CONTROL, tmp);
+                               rdev->irq.hpd[3] = true;
+                               break;
+                               /* DCE 3.2 */
+                       case RADEON_HPD_5:
+                               WREG32(DC_HPD5_CONTROL, tmp);
+                               rdev->irq.hpd[4] = true;
+                               break;
+                       case RADEON_HPD_6:
+                               WREG32(DC_HPD6_CONTROL, tmp);
+                               rdev->irq.hpd[5] = true;
+                               break;
+                       default:
+                               break;
+                       }
+               }
+       } else {
+               list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+                       struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+                       switch (radeon_connector->hpd.hpd) {
+                       case RADEON_HPD_1:
+                               WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
+                               rdev->irq.hpd[0] = true;
+                               break;
+                       case RADEON_HPD_2:
+                               WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
+                               rdev->irq.hpd[1] = true;
+                               break;
+                       case RADEON_HPD_3:
+                               WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
+                               rdev->irq.hpd[2] = true;
+                               break;
+                       default:
+                               break;
+                       }
+               }
+       }
+       r600_irq_set(rdev);
+ }
+ void r600_hpd_fini(struct radeon_device *rdev)
+ {
+       struct drm_device *dev = rdev->ddev;
+       struct drm_connector *connector;
+       if (ASIC_IS_DCE3(rdev)) {
+               list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+                       struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+                       switch (radeon_connector->hpd.hpd) {
+                       case RADEON_HPD_1:
+                               WREG32(DC_HPD1_CONTROL, 0);
+                               rdev->irq.hpd[0] = false;
+                               break;
+                       case RADEON_HPD_2:
+                               WREG32(DC_HPD2_CONTROL, 0);
+                               rdev->irq.hpd[1] = false;
+                               break;
+                       case RADEON_HPD_3:
+                               WREG32(DC_HPD3_CONTROL, 0);
+                               rdev->irq.hpd[2] = false;
+                               break;
+                       case RADEON_HPD_4:
+                               WREG32(DC_HPD4_CONTROL, 0);
+                               rdev->irq.hpd[3] = false;
+                               break;
+                               /* DCE 3.2 */
+                       case RADEON_HPD_5:
+                               WREG32(DC_HPD5_CONTROL, 0);
+                               rdev->irq.hpd[4] = false;
+                               break;
+                       case RADEON_HPD_6:
+                               WREG32(DC_HPD6_CONTROL, 0);
+                               rdev->irq.hpd[5] = false;
+                               break;
+                       default:
+                               break;
+                       }
+               }
+       } else {
+               list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+                       struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+                       switch (radeon_connector->hpd.hpd) {
+                       case RADEON_HPD_1:
+                               WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
+                               rdev->irq.hpd[0] = false;
+                               break;
+                       case RADEON_HPD_2:
+                               WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
+                               rdev->irq.hpd[1] = false;
+                               break;
+                       case RADEON_HPD_3:
+                               WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
+                               rdev->irq.hpd[2] = false;
+                               break;
+                       default:
+                               break;
+                       }
+               }
+       }
+ }
  /*
   * R600 PCIE GART
   */
@@@ -180,7 -459,7 +459,7 @@@ int r600_pcie_gart_enable(struct radeon
  void r600_pcie_gart_disable(struct radeon_device *rdev)
  {
        u32 tmp;
-       int i;
+       int i, r;
  
        /* Disable all tables */
        for (i = 0; i < 7; i++)
        WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
        WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
        if (rdev->gart.table.vram.robj) {
-               radeon_object_kunmap(rdev->gart.table.vram.robj);
-               radeon_object_unpin(rdev->gart.table.vram.robj);
+               r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+               if (likely(r == 0)) {
+                       radeon_bo_kunmap(rdev->gart.table.vram.robj);
+                       radeon_bo_unpin(rdev->gart.table.vram.robj);
+                       radeon_bo_unreserve(rdev->gart.table.vram.robj);
+               }
        }
  }
  
@@@ -394,11 -677,11 +677,11 @@@ int r600_mc_init(struct radeon_device *
                 * AGP so that GPU can catch out of VRAM/AGP access
                 */
                if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
 -                      /* Enought place before */
 +                      /* Enough place before */
                        rdev->mc.vram_location = rdev->mc.gtt_location -
                                                        rdev->mc.mc_vram_size;
                } else if (tmp > rdev->mc.mc_vram_size) {
 -                      /* Enought place after */
 +                      /* Enough place after */
                        rdev->mc.vram_location = rdev->mc.gtt_location +
                                                        rdev->mc.gtt_size;
                } else {
@@@ -1101,6 -1384,10 +1384,10 @@@ void r600_pciep_wreg(struct radeon_devi
        (void)RREG32(PCIE_PORT_DATA);
  }
  
+ void r600_hdp_flush(struct radeon_device *rdev)
+ {
+       WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+ }
  
  /*
   * CP & Ring
@@@ -1110,11 -1397,12 +1397,12 @@@ void r600_cp_stop(struct radeon_device 
        WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
  }
  
- int r600_cp_init_microcode(struct radeon_device *rdev)
+ int r600_init_microcode(struct radeon_device *rdev)
  {
        struct platform_device *pdev;
        const char *chip_name;
-       size_t pfp_req_size, me_req_size;
+       const char *rlc_chip_name;
+       size_t pfp_req_size, me_req_size, rlc_req_size;
        char fw_name[30];
        int err;
  
        }
  
        switch (rdev->family) {
-       case CHIP_R600: chip_name = "R600"; break;
-       case CHIP_RV610: chip_name = "RV610"; break;
-       case CHIP_RV630: chip_name = "RV630"; break;
-       case CHIP_RV620: chip_name = "RV620"; break;
-       case CHIP_RV635: chip_name = "RV635"; break;
-       case CHIP_RV670: chip_name = "RV670"; break;
+       case CHIP_R600:
+               chip_name = "R600";
+               rlc_chip_name = "R600";
+               break;
+       case CHIP_RV610:
+               chip_name = "RV610";
+               rlc_chip_name = "R600";
+               break;
+       case CHIP_RV630:
+               chip_name = "RV630";
+               rlc_chip_name = "R600";
+               break;
+       case CHIP_RV620:
+               chip_name = "RV620";
+               rlc_chip_name = "R600";
+               break;
+       case CHIP_RV635:
+               chip_name = "RV635";
+               rlc_chip_name = "R600";
+               break;
+       case CHIP_RV670:
+               chip_name = "RV670";
+               rlc_chip_name = "R600";
+               break;
        case CHIP_RS780:
-       case CHIP_RS880: chip_name = "RS780"; break;
-       case CHIP_RV770: chip_name = "RV770"; break;
+       case CHIP_RS880:
+               chip_name = "RS780";
+               rlc_chip_name = "R600";
+               break;
+       case CHIP_RV770:
+               chip_name = "RV770";
+               rlc_chip_name = "R700";
+               break;
        case CHIP_RV730:
-       case CHIP_RV740: chip_name = "RV730"; break;
-       case CHIP_RV710: chip_name = "RV710"; break;
+       case CHIP_RV740:
+               chip_name = "RV730";
+               rlc_chip_name = "R700";
+               break;
+       case CHIP_RV710:
+               chip_name = "RV710";
+               rlc_chip_name = "R700";
+               break;
        default: BUG();
        }
  
        if (rdev->family >= CHIP_RV770) {
                pfp_req_size = R700_PFP_UCODE_SIZE * 4;
                me_req_size = R700_PM4_UCODE_SIZE * 4;
+               rlc_req_size = R700_RLC_UCODE_SIZE * 4;
        } else {
                pfp_req_size = PFP_UCODE_SIZE * 4;
                me_req_size = PM4_UCODE_SIZE * 12;
+               rlc_req_size = RLC_UCODE_SIZE * 4;
        }
  
-       DRM_INFO("Loading %s CP Microcode\n", chip_name);
+       DRM_INFO("Loading %s Microcode\n", chip_name);
  
        snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
        err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
                       rdev->me_fw->size, fw_name);
                err = -EINVAL;
        }
+       snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
+       err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
+       if (err)
+               goto out;
+       if (rdev->rlc_fw->size != rlc_req_size) {
+               printk(KERN_ERR
+                      "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
+                      rdev->rlc_fw->size, fw_name);
+               err = -EINVAL;
+       }
  out:
        platform_device_unregister(pdev);
  
                rdev->pfp_fw = NULL;
                release_firmware(rdev->me_fw);
                rdev->me_fw = NULL;
+               release_firmware(rdev->rlc_fw);
+               rdev->rlc_fw = NULL;
        }
        return err;
  }
@@@ -1381,10 -1715,16 +1715,16 @@@ int r600_ring_test(struct radeon_devic
  
  void r600_wb_disable(struct radeon_device *rdev)
  {
+       int r;
        WREG32(SCRATCH_UMSK, 0);
        if (rdev->wb.wb_obj) {
-               radeon_object_kunmap(rdev->wb.wb_obj);
-               radeon_object_unpin(rdev->wb.wb_obj);
+               r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+               if (unlikely(r != 0))
+                       return;
+               radeon_bo_kunmap(rdev->wb.wb_obj);
+               radeon_bo_unpin(rdev->wb.wb_obj);
+               radeon_bo_unreserve(rdev->wb.wb_obj);
        }
  }
  
@@@ -1392,7 -1732,7 +1732,7 @@@ void r600_wb_fini(struct radeon_device 
  {
        r600_wb_disable(rdev);
        if (rdev->wb.wb_obj) {
-               radeon_object_unref(&rdev->wb.wb_obj);
+               radeon_bo_unref(&rdev->wb.wb_obj);
                rdev->wb.wb = NULL;
                rdev->wb.wb_obj = NULL;
        }
@@@ -1403,22 -1743,29 +1743,29 @@@ int r600_wb_enable(struct radeon_devic
        int r;
  
        if (rdev->wb.wb_obj == NULL) {
-               r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
-                               RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj);
+               r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
+                               RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
                if (r) {
-                       dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r);
+                       dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
                        return r;
                }
-               r = radeon_object_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
+               r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+               if (unlikely(r != 0)) {
+                       r600_wb_fini(rdev);
+                       return r;
+               }
+               r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
                                &rdev->wb.gpu_addr);
                if (r) {
-                       dev_warn(rdev->dev, "failed to pin WB buffer (%d).\n", r);
+                       radeon_bo_unreserve(rdev->wb.wb_obj);
+                       dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
                        r600_wb_fini(rdev);
                        return r;
                }
-               r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+               r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+               radeon_bo_unreserve(rdev->wb.wb_obj);
                if (r) {
-                       dev_warn(rdev->dev, "failed to map WB buffer (%d).\n", r);
+                       dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
                        r600_wb_fini(rdev);
                        return r;
                }
  void r600_fence_ring_emit(struct radeon_device *rdev,
                          struct radeon_fence *fence)
  {
+       /* Also consider EVENT_WRITE_EOP.  it handles the interrupts + timestamps + events */
        /* Emit fence sequence & fire IRQ */
        radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
        radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
        radeon_ring_write(rdev, fence->seq);
+       /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
+       radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
+       radeon_ring_write(rdev, RB_INT_STAT);
  }
  
  int r600_copy_dma(struct radeon_device *rdev,
@@@ -1459,18 -1810,6 +1810,6 @@@ int r600_copy_blit(struct radeon_devic
        return 0;
  }
  
- int r600_irq_process(struct radeon_device *rdev)
- {
-       /* FIXME: implement */
-       return 0;
- }
- int r600_irq_set(struct radeon_device *rdev)
- {
-       /* FIXME: implement */
-       return 0;
- }
  int r600_set_surface_reg(struct radeon_device *rdev, int reg,
                         uint32_t tiling_flags, uint32_t pitch,
                         uint32_t offset, uint32_t obj_size)
@@@ -1506,6 -1845,14 +1845,14 @@@ int r600_startup(struct radeon_device *
  {
        int r;
  
+       if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+               r = r600_init_microcode(rdev);
+               if (r) {
+                       DRM_ERROR("Failed to load firmware!\n");
+                       return r;
+               }
+       }
        r600_mc_program(rdev);
        if (rdev->flags & RADEON_IS_AGP) {
                r600_agp_enable(rdev);
        }
        r600_gpu_init(rdev);
  
-       r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
-                             &rdev->r600_blit.shader_gpu_addr);
+       r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+       if (unlikely(r != 0))
+               return r;
+       r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+                       &rdev->r600_blit.shader_gpu_addr);
+       radeon_bo_unreserve(rdev->r600_blit.shader_obj);
        if (r) {
-               DRM_ERROR("failed to pin blit object %d\n", r);
+               dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
                return r;
        }
  
+       /* Enable IRQ */
+       r = r600_irq_init(rdev);
+       if (r) {
+               DRM_ERROR("radeon: IH init failed (%d).\n", r);
+               radeon_irq_kms_fini(rdev);
+               return r;
+       }
+       r600_irq_set(rdev);
        r = radeon_ring_init(rdev, rdev->cp.ring_size);
        if (r)
                return r;
@@@ -1583,13 -1943,19 +1943,19 @@@ int r600_resume(struct radeon_device *r
  
  int r600_suspend(struct radeon_device *rdev)
  {
+       int r;
        /* FIXME: we should wait for ring to be empty */
        r600_cp_stop(rdev);
        rdev->cp.ready = false;
        r600_wb_disable(rdev);
        r600_pcie_gart_disable(rdev);
        /* unpin shaders bo */
-       radeon_object_unpin(rdev->r600_blit.shader_obj);
+       r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+       if (unlikely(r != 0))
+               return r;
+       radeon_bo_unpin(rdev->r600_blit.shader_obj);
+       radeon_bo_unreserve(rdev->r600_blit.shader_obj);
        return 0;
  }
  
@@@ -1627,7 -1993,11 +1993,11 @@@ int r600_init(struct radeon_device *rde
        if (r)
                return r;
        /* Post card if necessary */
-       if (!r600_card_posted(rdev) && rdev->bios) {
+       if (!r600_card_posted(rdev)) {
+               if (!rdev->bios) {
+                       dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+                       return -EINVAL;
+               }
                DRM_INFO("GPU not posted. posting now...\n");
                atom_asic_init(rdev->mode_info.atom_context);
        }
        if (r)
                return r;
        /* Memory manager */
-       r = radeon_object_init(rdev);
+       r = radeon_bo_init(rdev);
        if (r)
                return r;
+       r = radeon_irq_kms_init(rdev);
+       if (r)
+               return r;
        rdev->cp.ring_obj = NULL;
        r600_ring_init(rdev, 1024 * 1024);
  
-       if (!rdev->me_fw || !rdev->pfp_fw) {
-               r = r600_cp_init_microcode(rdev);
-               if (r) {
-                       DRM_ERROR("Failed to load firmware!\n");
-                       return r;
-               }
-       }
+       rdev->ih.ring_obj = NULL;
+       r600_ih_ring_init(rdev, 64 * 1024);
  
        r = r600_pcie_gart_init(rdev);
        if (r)
                return r;
  
-       rdev->accel_working = true;
        r = r600_blit_init(rdev);
        if (r) {
-               DRM_ERROR("radeon: failled blitter (%d).\n", r);
+               DRM_ERROR("radeon: failed blitter (%d).\n", r);
                return r;
        }
  
+       rdev->accel_working = true;
        r = r600_startup(rdev);
        if (r) {
                r600_suspend(rdev);
        if (rdev->accel_working) {
                r = radeon_ib_pool_init(rdev);
                if (r) {
-                       DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
+                       DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
                        rdev->accel_working = false;
                }
                r = r600_ib_test(rdev);
                if (r) {
-                       DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+                       DRM_ERROR("radeon: failed testing IB (%d).\n", r);
                        rdev->accel_working = false;
                }
        }
@@@ -1704,6 -2074,8 +2074,8 @@@ void r600_fini(struct radeon_device *rd
        r600_suspend(rdev);
  
        r600_blit_fini(rdev);
+       r600_irq_fini(rdev);
+       radeon_irq_kms_fini(rdev);
        radeon_ring_fini(rdev);
        r600_wb_fini(rdev);
        r600_pcie_gart_fini(rdev);
        radeon_clocks_fini(rdev);
        if (rdev->flags & RADEON_IS_AGP)
                radeon_agp_fini(rdev);
-       radeon_object_fini(rdev);
+       radeon_bo_fini(rdev);
        radeon_atombios_fini(rdev);
        kfree(rdev->bios);
        rdev->bios = NULL;
@@@ -1798,8 -2170,657 +2170,657 @@@ int r600_ib_test(struct radeon_device *
        return r;
  }
  
+ /*
+  * Interrupts
+  *
+  * Interrupts use a ring buffer on r6xx/r7xx hardware.  It works pretty
+  * the same as the CP ring buffer, but in reverse.  Rather than the CPU
+  * writing to the ring and the GPU consuming, the GPU writes to the ring
+  * and host consumes.  As the host irq handler processes interrupts, it
+  * increments the rptr.  When the rptr catches up with the wptr, all the
+  * current interrupts have been processed.
+  */
+ void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
+ {
+       u32 rb_bufsz;
+       /* Align ring size */
+       rb_bufsz = drm_order(ring_size / 4);
+       ring_size = (1 << rb_bufsz) * 4;
+       rdev->ih.ring_size = ring_size;
+       rdev->ih.align_mask = 4 - 1;
+ }
+ static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size)
+ {
+       int r;
+       rdev->ih.ring_size = ring_size;
+       /* Allocate ring buffer */
+       if (rdev->ih.ring_obj == NULL) {
+               r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
+                                    true,
+                                    RADEON_GEM_DOMAIN_GTT,
+                                    &rdev->ih.ring_obj);
+               if (r) {
+                       DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
+                       return r;
+               }
+               r = radeon_bo_reserve(rdev->ih.ring_obj, false);
+               if (unlikely(r != 0))
+                       return r;
+               r = radeon_bo_pin(rdev->ih.ring_obj,
+                                 RADEON_GEM_DOMAIN_GTT,
+                                 &rdev->ih.gpu_addr);
+               if (r) {
+                       radeon_bo_unreserve(rdev->ih.ring_obj);
+                       DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
+                       return r;
+               }
+               r = radeon_bo_kmap(rdev->ih.ring_obj,
+                                  (void **)&rdev->ih.ring);
+               radeon_bo_unreserve(rdev->ih.ring_obj);
+               if (r) {
+                       DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
+                       return r;
+               }
+       }
+       rdev->ih.ptr_mask = (rdev->cp.ring_size / 4) - 1;
+       rdev->ih.rptr = 0;
+       return 0;
+ }
+ static void r600_ih_ring_fini(struct radeon_device *rdev)
+ {
+       int r;
+       if (rdev->ih.ring_obj) {
+               r = radeon_bo_reserve(rdev->ih.ring_obj, false);
+               if (likely(r == 0)) {
+                       radeon_bo_kunmap(rdev->ih.ring_obj);
+                       radeon_bo_unpin(rdev->ih.ring_obj);
+                       radeon_bo_unreserve(rdev->ih.ring_obj);
+               }
+               radeon_bo_unref(&rdev->ih.ring_obj);
+               rdev->ih.ring = NULL;
+               rdev->ih.ring_obj = NULL;
+       }
+ }
+ static void r600_rlc_stop(struct radeon_device *rdev)
+ {
+       if (rdev->family >= CHIP_RV770) {
+               /* r7xx asics need to soft reset RLC before halting */
+               WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
+               RREG32(SRBM_SOFT_RESET);
+               udelay(15000);
+               WREG32(SRBM_SOFT_RESET, 0);
+               RREG32(SRBM_SOFT_RESET);
+       }
+       WREG32(RLC_CNTL, 0);
+ }
+ static void r600_rlc_start(struct radeon_device *rdev)
+ {
+       WREG32(RLC_CNTL, RLC_ENABLE);
+ }
+ static int r600_rlc_init(struct radeon_device *rdev)
+ {
+       u32 i;
+       const __be32 *fw_data;
+       if (!rdev->rlc_fw)
+               return -EINVAL;
+       r600_rlc_stop(rdev);
+       WREG32(RLC_HB_BASE, 0);
+       WREG32(RLC_HB_CNTL, 0);
+       WREG32(RLC_HB_RPTR, 0);
+       WREG32(RLC_HB_WPTR, 0);
+       WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
+       WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
+       WREG32(RLC_MC_CNTL, 0);
+       WREG32(RLC_UCODE_CNTL, 0);
+       fw_data = (const __be32 *)rdev->rlc_fw->data;
+       if (rdev->family >= CHIP_RV770) {
+               for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
+                       WREG32(RLC_UCODE_ADDR, i);
+                       WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+               }
+       } else {
+               for (i = 0; i < RLC_UCODE_SIZE; i++) {
+                       WREG32(RLC_UCODE_ADDR, i);
+                       WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+               }
+       }
+       WREG32(RLC_UCODE_ADDR, 0);
+       r600_rlc_start(rdev);
+       return 0;
+ }
+ static void r600_enable_interrupts(struct radeon_device *rdev)
+ {
+       u32 ih_cntl = RREG32(IH_CNTL);
+       u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+       ih_cntl |= ENABLE_INTR;
+       ih_rb_cntl |= IH_RB_ENABLE;
+       WREG32(IH_CNTL, ih_cntl);
+       WREG32(IH_RB_CNTL, ih_rb_cntl);
+       rdev->ih.enabled = true;
+ }
+ static void r600_disable_interrupts(struct radeon_device *rdev)
+ {
+       u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+       u32 ih_cntl = RREG32(IH_CNTL);
+       ih_rb_cntl &= ~IH_RB_ENABLE;
+       ih_cntl &= ~ENABLE_INTR;
+       WREG32(IH_RB_CNTL, ih_rb_cntl);
+       WREG32(IH_CNTL, ih_cntl);
+       /* set rptr, wptr to 0 */
+       WREG32(IH_RB_RPTR, 0);
+       WREG32(IH_RB_WPTR, 0);
+       rdev->ih.enabled = false;
+       rdev->ih.wptr = 0;
+       rdev->ih.rptr = 0;
+ }
+ static void r600_disable_interrupt_state(struct radeon_device *rdev)
+ {
+       u32 tmp;
+       WREG32(CP_INT_CNTL, 0);
+       WREG32(GRBM_INT_CNTL, 0);
+       WREG32(DxMODE_INT_MASK, 0);
+       if (ASIC_IS_DCE3(rdev)) {
+               WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
+               WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
+               tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+               WREG32(DC_HPD1_INT_CONTROL, tmp);
+               tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+               WREG32(DC_HPD2_INT_CONTROL, tmp);
+               tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+               WREG32(DC_HPD3_INT_CONTROL, tmp);
+               tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+               WREG32(DC_HPD4_INT_CONTROL, tmp);
+               if (ASIC_IS_DCE32(rdev)) {
+                       tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+                       WREG32(DC_HPD5_INT_CONTROL, 0);
+                       tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+                       WREG32(DC_HPD6_INT_CONTROL, 0);
+               }
+       } else {
+               WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
+               WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
+               tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
+               WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, 0);
+               tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
+               WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, 0);
+               tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
+               WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, 0);
+       }
+ }
+ int r600_irq_init(struct radeon_device *rdev)
+ {
+       int ret = 0;
+       int rb_bufsz;
+       u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
+       /* allocate ring */
+       ret = r600_ih_ring_alloc(rdev, rdev->ih.ring_size);
+       if (ret)
+               return ret;
+       /* disable irqs */
+       r600_disable_interrupts(rdev);
+       /* init rlc */
+       ret = r600_rlc_init(rdev);
+       if (ret) {
+               r600_ih_ring_fini(rdev);
+               return ret;
+       }
+       /* setup interrupt control */
+       /* set dummy read address to ring address */
+       WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
+       interrupt_cntl = RREG32(INTERRUPT_CNTL);
+       /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
+        * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
+        */
+       interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
+       /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
+       interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
+       WREG32(INTERRUPT_CNTL, interrupt_cntl);
+       WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
+       rb_bufsz = drm_order(rdev->ih.ring_size / 4);
+       ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
+                     IH_WPTR_OVERFLOW_CLEAR |
+                     (rb_bufsz << 1));
+       /* WPTR writeback, not yet */
+       /*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/
+       WREG32(IH_RB_WPTR_ADDR_LO, 0);
+       WREG32(IH_RB_WPTR_ADDR_HI, 0);
+       WREG32(IH_RB_CNTL, ih_rb_cntl);
+       /* set rptr, wptr to 0 */
+       WREG32(IH_RB_RPTR, 0);
+       WREG32(IH_RB_WPTR, 0);
+       /* Default settings for IH_CNTL (disabled at first) */
+       ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
+       /* RPTR_REARM only works if msi's are enabled */
+       if (rdev->msi_enabled)
+               ih_cntl |= RPTR_REARM;
+ #ifdef __BIG_ENDIAN
+       ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT);
+ #endif
+       WREG32(IH_CNTL, ih_cntl);
+       /* force the active interrupt state to all disabled */
+       r600_disable_interrupt_state(rdev);
+       /* enable irqs */
+       r600_enable_interrupts(rdev);
+       return ret;
+ }
+ void r600_irq_fini(struct radeon_device *rdev)
+ {
+       r600_disable_interrupts(rdev);
+       r600_rlc_stop(rdev);
+       r600_ih_ring_fini(rdev);
+ }
+ int r600_irq_set(struct radeon_device *rdev)
+ {
+       u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
+       u32 mode_int = 0;
+       u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
+       /* don't enable anything if the ih is disabled */
+       if (!rdev->ih.enabled)
+               return 0;
+       if (ASIC_IS_DCE3(rdev)) {
+               hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+               hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+               hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+               hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
+               if (ASIC_IS_DCE32(rdev)) {
+                       hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
+                       hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+               }
+       } else {
+               hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+               hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+               hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+       }
+       if (rdev->irq.sw_int) {
+               DRM_DEBUG("r600_irq_set: sw int\n");
+               cp_int_cntl |= RB_INT_ENABLE;
+       }
+       if (rdev->irq.crtc_vblank_int[0]) {
+               DRM_DEBUG("r600_irq_set: vblank 0\n");
+               mode_int |= D1MODE_VBLANK_INT_MASK;
+       }
+       if (rdev->irq.crtc_vblank_int[1]) {
+               DRM_DEBUG("r600_irq_set: vblank 1\n");
+               mode_int |= D2MODE_VBLANK_INT_MASK;
+       }
+       if (rdev->irq.hpd[0]) {
+               DRM_DEBUG("r600_irq_set: hpd 1\n");
+               hpd1 |= DC_HPDx_INT_EN;
+       }
+       if (rdev->irq.hpd[1]) {
+               DRM_DEBUG("r600_irq_set: hpd 2\n");
+               hpd2 |= DC_HPDx_INT_EN;
+       }
+       if (rdev->irq.hpd[2]) {
+               DRM_DEBUG("r600_irq_set: hpd 3\n");
+               hpd3 |= DC_HPDx_INT_EN;
+       }
+       if (rdev->irq.hpd[3]) {
+               DRM_DEBUG("r600_irq_set: hpd 4\n");
+               hpd4 |= DC_HPDx_INT_EN;
+       }
+       if (rdev->irq.hpd[4]) {
+               DRM_DEBUG("r600_irq_set: hpd 5\n");
+               hpd5 |= DC_HPDx_INT_EN;
+       }
+       if (rdev->irq.hpd[5]) {
+               DRM_DEBUG("r600_irq_set: hpd 6\n");
+               hpd6 |= DC_HPDx_INT_EN;
+       }
+       WREG32(CP_INT_CNTL, cp_int_cntl);
+       WREG32(DxMODE_INT_MASK, mode_int);
+       if (ASIC_IS_DCE3(rdev)) {
+               WREG32(DC_HPD1_INT_CONTROL, hpd1);
+               WREG32(DC_HPD2_INT_CONTROL, hpd2);
+               WREG32(DC_HPD3_INT_CONTROL, hpd3);
+               WREG32(DC_HPD4_INT_CONTROL, hpd4);
+               if (ASIC_IS_DCE32(rdev)) {
+                       WREG32(DC_HPD5_INT_CONTROL, hpd5);
+                       WREG32(DC_HPD6_INT_CONTROL, hpd6);
+               }
+       } else {
+               WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
+               WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
+               WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
+       }
+       return 0;
+ }
+ static inline void r600_irq_ack(struct radeon_device *rdev,
+                               u32 *disp_int,
+                               u32 *disp_int_cont,
+                               u32 *disp_int_cont2)
+ {
+       u32 tmp;
+       if (ASIC_IS_DCE3(rdev)) {
+               *disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
+               *disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
+               *disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
+       } else {
+               *disp_int = RREG32(DISP_INTERRUPT_STATUS);
+               *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
+               *disp_int_cont2 = 0;
+       }
+       if (*disp_int & LB_D1_VBLANK_INTERRUPT)
+               WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
+       if (*disp_int & LB_D1_VLINE_INTERRUPT)
+               WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
+       if (*disp_int & LB_D2_VBLANK_INTERRUPT)
+               WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
+       if (*disp_int & LB_D2_VLINE_INTERRUPT)
+               WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
+       if (*disp_int & DC_HPD1_INTERRUPT) {
+               if (ASIC_IS_DCE3(rdev)) {
+                       tmp = RREG32(DC_HPD1_INT_CONTROL);
+                       tmp |= DC_HPDx_INT_ACK;
+                       WREG32(DC_HPD1_INT_CONTROL, tmp);
+               } else {
+                       tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
+                       tmp |= DC_HPDx_INT_ACK;
+                       WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+               }
+       }
+       if (*disp_int & DC_HPD2_INTERRUPT) {
+               if (ASIC_IS_DCE3(rdev)) {
+                       tmp = RREG32(DC_HPD2_INT_CONTROL);
+                       tmp |= DC_HPDx_INT_ACK;
+                       WREG32(DC_HPD2_INT_CONTROL, tmp);
+               } else {
+                       tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
+                       tmp |= DC_HPDx_INT_ACK;
+                       WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+               }
+       }
+       if (*disp_int_cont & DC_HPD3_INTERRUPT) {
+               if (ASIC_IS_DCE3(rdev)) {
+                       tmp = RREG32(DC_HPD3_INT_CONTROL);
+                       tmp |= DC_HPDx_INT_ACK;
+                       WREG32(DC_HPD3_INT_CONTROL, tmp);
+               } else {
+                       tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
+                       tmp |= DC_HPDx_INT_ACK;
+                       WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
+               }
+       }
+       if (*disp_int_cont & DC_HPD4_INTERRUPT) {
+               tmp = RREG32(DC_HPD4_INT_CONTROL);
+               tmp |= DC_HPDx_INT_ACK;
+               WREG32(DC_HPD4_INT_CONTROL, tmp);
+       }
+       if (ASIC_IS_DCE32(rdev)) {
+               if (*disp_int_cont2 & DC_HPD5_INTERRUPT) {
+                       tmp = RREG32(DC_HPD5_INT_CONTROL);
+                       tmp |= DC_HPDx_INT_ACK;
+                       WREG32(DC_HPD5_INT_CONTROL, tmp);
+               }
+               if (*disp_int_cont2 & DC_HPD6_INTERRUPT) {
+                       tmp = RREG32(DC_HPD5_INT_CONTROL);
+                       tmp |= DC_HPDx_INT_ACK;
+                       WREG32(DC_HPD6_INT_CONTROL, tmp);
+               }
+       }
+ }
+ void r600_irq_disable(struct radeon_device *rdev)
+ {
+       u32 disp_int, disp_int_cont, disp_int_cont2;
+       r600_disable_interrupts(rdev);
+       /* Wait and acknowledge irq */
+       mdelay(1);
+       r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
+       r600_disable_interrupt_state(rdev);
+ }
+ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
+ {
+       u32 wptr, tmp;
  
+       /* XXX use writeback */
+       wptr = RREG32(IH_RB_WPTR);
  
+       if (wptr & RB_OVERFLOW) {
+               WARN_ON(1);
+               /* XXX deal with overflow */
+               DRM_ERROR("IH RB overflow\n");
+               tmp = RREG32(IH_RB_CNTL);
+               tmp |= IH_WPTR_OVERFLOW_CLEAR;
+               WREG32(IH_RB_CNTL, tmp);
+       }
+       wptr = wptr & WPTR_OFFSET_MASK;
+       return wptr;
+ }
+ /*        r600 IV Ring
+  * Each IV ring entry is 128 bits:
+  * [7:0]    - interrupt source id
+  * [31:8]   - reserved
+  * [59:32]  - interrupt source data
+  * [127:60]  - reserved
+  *
+  * The basic interrupt vector entries
+  * are decoded as follows:
+  * src_id  src_data  description
+  *      1         0  D1 Vblank
+  *      1         1  D1 Vline
+  *      5         0  D2 Vblank
+  *      5         1  D2 Vline
+  *     19         0  FP Hot plug detection A
+  *     19         1  FP Hot plug detection B
+  *     19         2  DAC A auto-detection
+  *     19         3  DAC B auto-detection
+  *    176         -  CP_INT RB
+  *    177         -  CP_INT IB1
+  *    178         -  CP_INT IB2
+  *    181         -  EOP Interrupt
+  *    233         -  GUI Idle
+  *
+  * Note, these are based on r600 and may need to be
+  * adjusted or added to on newer asics
+  */
+ int r600_irq_process(struct radeon_device *rdev)
+ {
+       u32 wptr = r600_get_ih_wptr(rdev);
+       u32 rptr = rdev->ih.rptr;
+       u32 src_id, src_data;
+       u32 last_entry = rdev->ih.ring_size - 16;
+       u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
+       unsigned long flags;
+       bool queue_hotplug = false;
+       DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+       spin_lock_irqsave(&rdev->ih.lock, flags);
+       if (rptr == wptr) {
+               spin_unlock_irqrestore(&rdev->ih.lock, flags);
+               return IRQ_NONE;
+       }
+       if (rdev->shutdown) {
+               spin_unlock_irqrestore(&rdev->ih.lock, flags);
+               return IRQ_NONE;
+       }
+ restart_ih:
+       /* display interrupts */
+       r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
+       rdev->ih.wptr = wptr;
+       while (rptr != wptr) {
+               /* wptr/rptr are in bytes! */
+               ring_index = rptr / 4;
+               src_id =  rdev->ih.ring[ring_index] & 0xff;
+               src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
+               switch (src_id) {
+               case 1: /* D1 vblank/vline */
+                       switch (src_data) {
+                       case 0: /* D1 vblank */
+                               if (disp_int & LB_D1_VBLANK_INTERRUPT) {
+                                       drm_handle_vblank(rdev->ddev, 0);
+                                       disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+                                       DRM_DEBUG("IH: D1 vblank\n");
+                               }
+                               break;
+                       case 1: /* D1 vline */
+                               if (disp_int & LB_D1_VLINE_INTERRUPT) {
+                                       disp_int &= ~LB_D1_VLINE_INTERRUPT;
+                                       DRM_DEBUG("IH: D1 vline\n");
+                               }
+                               break;
+                       default:
+                               DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+                               break;
+                       }
+                       break;
+               case 5: /* D2 vblank/vline */
+                       switch (src_data) {
+                       case 0: /* D2 vblank */
+                               if (disp_int & LB_D2_VBLANK_INTERRUPT) {
+                                       drm_handle_vblank(rdev->ddev, 1);
+                                       disp_int &= ~LB_D2_VBLANK_INTERRUPT;
+                                       DRM_DEBUG("IH: D2 vblank\n");
+                               }
+                               break;
+                       case 1: /* D1 vline */
+                               if (disp_int & LB_D2_VLINE_INTERRUPT) {
+                                       disp_int &= ~LB_D2_VLINE_INTERRUPT;
+                                       DRM_DEBUG("IH: D2 vline\n");
+                               }
+                               break;
+                       default:
+                               DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+                               break;
+                       }
+                       break;
+               case 19: /* HPD/DAC hotplug */
+                       switch (src_data) {
+                       case 0:
+                               if (disp_int & DC_HPD1_INTERRUPT) {
+                                       disp_int &= ~DC_HPD1_INTERRUPT;
+                                       queue_hotplug = true;
+                                       DRM_DEBUG("IH: HPD1\n");
+                               }
+                               break;
+                       case 1:
+                               if (disp_int & DC_HPD2_INTERRUPT) {
+                                       disp_int &= ~DC_HPD2_INTERRUPT;
+                                       queue_hotplug = true;
+                                       DRM_DEBUG("IH: HPD2\n");
+                               }
+                               break;
+                       case 4:
+                               if (disp_int_cont & DC_HPD3_INTERRUPT) {
+                                       disp_int_cont &= ~DC_HPD3_INTERRUPT;
+                                       queue_hotplug = true;
+                                       DRM_DEBUG("IH: HPD3\n");
+                               }
+                               break;
+                       case 5:
+                               if (disp_int_cont & DC_HPD4_INTERRUPT) {
+                                       disp_int_cont &= ~DC_HPD4_INTERRUPT;
+                                       queue_hotplug = true;
+                                       DRM_DEBUG("IH: HPD4\n");
+                               }
+                               break;
+                       case 10:
+                               if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
+                                       disp_int_cont &= ~DC_HPD5_INTERRUPT;
+                                       queue_hotplug = true;
+                                       DRM_DEBUG("IH: HPD5\n");
+                               }
+                               break;
+                       case 12:
+                               if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
+                                       disp_int_cont &= ~DC_HPD6_INTERRUPT;
+                                       queue_hotplug = true;
+                                       DRM_DEBUG("IH: HPD6\n");
+                               }
+                               break;
+                       default:
+                               DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+                               break;
+                       }
+                       break;
+               case 176: /* CP_INT in ring buffer */
+               case 177: /* CP_INT in IB1 */
+               case 178: /* CP_INT in IB2 */
+                       DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
+                       radeon_fence_process(rdev);
+                       break;
+               case 181: /* CP EOP event */
+                       DRM_DEBUG("IH: CP EOP\n");
+                       break;
+               default:
+                       DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+                       break;
+               }
+               /* wptr/rptr are in bytes! */
+               if (rptr == last_entry)
+                       rptr = 0;
+               else
+                       rptr += 16;
+       }
+       /* make sure wptr hasn't changed while processing */
+       wptr = r600_get_ih_wptr(rdev);
+       if (wptr != rdev->ih.wptr)
+               goto restart_ih;
+       if (queue_hotplug)
+               queue_work(rdev->wq, &rdev->hotplug_work);
+       rdev->ih.rptr = rptr;
+       WREG32(IH_RB_RPTR, rdev->ih.rptr);
+       spin_unlock_irqrestore(&rdev->ih.lock, flags);
+       return IRQ_HANDLED;
+ }
  
  /*
   * Debugfs info
@@@ -1811,21 -2832,21 +2832,21 @@@ static int r600_debugfs_cp_ring_info(st
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        struct radeon_device *rdev = dev->dev_private;
        unsigned count, i, j;
  
        radeon_ring_free_size(rdev);
-       rdp = RREG32(CP_RB_RPTR);
-       wdp = RREG32(CP_RB_WPTR);
-       count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
+       count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
        seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
-       seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
-       seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
+       seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
+       seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
+       seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
+       seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
        seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
        seq_printf(m, "%u dwords in ring\n", count);
+       i = rdev->cp.rptr;
        for (j = 0; j <= count; j++) {
-               i = (rdp + j) & rdev->cp.ptr_mask;
                seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
+               i = (i + 1) & rdev->cp.ptr_mask;
        }
        return 0;
  }
@@@ -59,7 -59,7 +59,7 @@@ static struct fb_ops radeonfb_ops = 
  };
  
  /**
 - * Curretly it is assumed that the old framebuffer is reused.
 + * Currently it is assumed that the old framebuffer is reused.
   *
   * LOCKING
   * caller should hold the mode config lock.
@@@ -140,7 -140,7 +140,7 @@@ int radeonfb_create(struct drm_device *
        struct radeon_framebuffer *rfb;
        struct drm_mode_fb_cmd mode_cmd;
        struct drm_gem_object *gobj = NULL;
-       struct radeon_object *robj = NULL;
+       struct radeon_bo *rbo = NULL;
        struct device *device = &rdev->pdev->dev;
        int size, aligned_size, ret;
        u64 fb_gpuaddr;
        ret = radeon_gem_object_create(rdev, aligned_size, 0,
                        RADEON_GEM_DOMAIN_VRAM,
                        false, ttm_bo_type_kernel,
-                       false, &gobj);
+                       &gobj);
        if (ret) {
                printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n",
                       surface_width, surface_height);
                ret = -ENOMEM;
                goto out;
        }
-       robj = gobj->driver_private;
+       rbo = gobj->driver_private;
  
        if (fb_tiled)
                tiling_flags = RADEON_TILING_MACRO;
        }
  #endif
  
-       if (tiling_flags)
-               radeon_object_set_tiling_flags(robj, tiling_flags | RADEON_TILING_SURFACE, mode_cmd.pitch);
+       if (tiling_flags) {
+               ret = radeon_bo_set_tiling_flags(rbo,
+                                       tiling_flags | RADEON_TILING_SURFACE,
+                                       mode_cmd.pitch);
+               if (ret)
+                       dev_err(rdev->dev, "FB failed to set tiling flags\n");
+       }
        mutex_lock(&rdev->ddev->struct_mutex);
        fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj);
        if (fb == NULL) {
                ret = -ENOMEM;
                goto out_unref;
        }
-       ret = radeon_object_pin(robj, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
+       ret = radeon_bo_reserve(rbo, false);
+       if (unlikely(ret != 0))
+               goto out_unref;
+       ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
+       if (ret) {
+               radeon_bo_unreserve(rbo);
+               goto out_unref;
+       }
+       if (fb_tiled)
+               radeon_bo_check_tiling(rbo, 0, 0);
+       ret = radeon_bo_kmap(rbo, &fbptr);
+       radeon_bo_unreserve(rbo);
        if (ret) {
-               printk(KERN_ERR "failed to pin framebuffer\n");
-               ret = -ENOMEM;
                goto out_unref;
        }
  
        *fb_p = fb;
        rfb = to_radeon_framebuffer(fb);
        rdev->fbdev_rfb = rfb;
-       rdev->fbdev_robj = robj;
+       rdev->fbdev_rbo = rbo;
  
        info = framebuffer_alloc(sizeof(struct radeon_fb_device), device);
        if (info == NULL) {
        if (ret)
                goto out_unref;
  
-       if (fb_tiled)
-               radeon_object_check_tiling(robj, 0, 0);
-       ret = radeon_object_kmap(robj, &fbptr);
-       if (ret) {
-               goto out_unref;
-       }
-       memset_io(fbptr, 0, aligned_size);
+       memset_io(fbptr, 0xff, aligned_size);
  
        strcpy(info->fix.id, "radeondrmfb");
  
        return 0;
  
  out_unref:
-       if (robj) {
-               radeon_object_kunmap(robj);
+       if (rbo) {
+               ret = radeon_bo_reserve(rbo, false);
+               if (likely(ret == 0)) {
+                       radeon_bo_kunmap(rbo);
+                       radeon_bo_unreserve(rbo);
+               }
        }
        if (fb && ret) {
                list_del(&fb->filp_head);
@@@ -321,14 -331,22 +331,22 @@@ int radeon_parse_options(char *options
  
  int radeonfb_probe(struct drm_device *dev)
  {
-       return drm_fb_helper_single_fb_probe(dev, 32, &radeonfb_create);
+       struct radeon_device *rdev = dev->dev_private;
+       int bpp_sel = 32;
+       /* select 8 bpp console on RN50 or 16MB cards */
+       if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
+               bpp_sel = 8;
+       return drm_fb_helper_single_fb_probe(dev, bpp_sel, &radeonfb_create);
  }
  
  int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
  {
        struct fb_info *info;
        struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb);
-       struct radeon_object *robj;
+       struct radeon_bo *rbo;
+       int r;
  
        if (!fb) {
                return -EINVAL;
        info = fb->fbdev;
        if (info) {
                struct radeon_fb_device *rfbdev = info->par;
-               robj = rfb->obj->driver_private;
+               rbo = rfb->obj->driver_private;
                unregister_framebuffer(info);
-               radeon_object_kunmap(robj);
-               radeon_object_unpin(robj);
+               r = radeon_bo_reserve(rbo, false);
+               if (likely(r == 0)) {
+                       radeon_bo_kunmap(rbo);
+                       radeon_bo_unpin(rbo);
+                       radeon_bo_unreserve(rbo);
+               }
                drm_fb_helper_free(&rfbdev->helper);
                framebuffer_release(info);
        }
@@@ -150,7 -150,7 +150,7 @@@ static int radeon_init_mem_type(struct 
                man->default_caching = TTM_PL_FLAG_CACHED;
                break;
        case TTM_PL_TT:
-               man->gpu_offset = 0;
+               man->gpu_offset = rdev->mc.gtt_location;
                man->available_caching = TTM_PL_MASK_CACHING;
                man->default_caching = TTM_PL_FLAG_CACHED;
                man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
                break;
        case TTM_PL_VRAM:
                /* "On-card" video ram */
-               man->gpu_offset = 0;
+               man->gpu_offset = rdev->mc.vram_location;
                man->flags = TTM_MEMTYPE_FLAG_FIXED |
                             TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
                             TTM_MEMTYPE_FLAG_MAPPABLE;
        return 0;
  }
  
- static uint32_t radeon_evict_flags(struct ttm_buffer_object *bo)
+ static void radeon_evict_flags(struct ttm_buffer_object *bo,
+                               struct ttm_placement *placement)
  {
-       uint32_t cur_placement = bo->mem.placement & ~TTM_PL_MASK_MEMTYPE;
+       struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
        switch (bo->mem.mem_type) {
+       case TTM_PL_VRAM:
+               radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
+               break;
+       case TTM_PL_TT:
        default:
-               return (cur_placement & ~TTM_PL_MASK_CACHING) |
-                       TTM_PL_FLAG_SYSTEM |
-                       TTM_PL_FLAG_CACHED;
+               radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
        }
+       *placement = rbo->placement;
  }
  
  static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
@@@ -283,14 -286,21 +286,21 @@@ static int radeon_move_vram_ram(struct 
        struct radeon_device *rdev;
        struct ttm_mem_reg *old_mem = &bo->mem;
        struct ttm_mem_reg tmp_mem;
-       uint32_t proposed_placement;
+       u32 placements;
+       struct ttm_placement placement;
        int r;
  
        rdev = radeon_get_rdev(bo->bdev);
        tmp_mem = *new_mem;
        tmp_mem.mm_node = NULL;
-       proposed_placement = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
-       r = ttm_bo_mem_space(bo, proposed_placement, &tmp_mem,
+       placement.fpfn = 0;
+       placement.lpfn = 0;
+       placement.num_placement = 1;
+       placement.placement = &placements;
+       placement.num_busy_placement = 1;
+       placement.busy_placement = &placements;
+       placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+       r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
                             interruptible, no_wait);
        if (unlikely(r)) {
                return r;
@@@ -329,15 -339,21 +339,21 @@@ static int radeon_move_ram_vram(struct 
        struct radeon_device *rdev;
        struct ttm_mem_reg *old_mem = &bo->mem;
        struct ttm_mem_reg tmp_mem;
-       uint32_t proposed_flags;
+       struct ttm_placement placement;
+       u32 placements;
        int r;
  
        rdev = radeon_get_rdev(bo->bdev);
        tmp_mem = *new_mem;
        tmp_mem.mm_node = NULL;
-       proposed_flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
-       r = ttm_bo_mem_space(bo, proposed_flags, &tmp_mem,
-                            interruptible, no_wait);
+       placement.fpfn = 0;
+       placement.lpfn = 0;
+       placement.num_placement = 1;
+       placement.placement = &placements;
+       placement.num_busy_placement = 1;
+       placement.busy_placement = &placements;
+       placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+       r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait);
        if (unlikely(r)) {
                return r;
        }
@@@ -378,7 -394,7 +394,7 @@@ static int radeon_bo_move(struct ttm_bu
             new_mem->mem_type == TTM_PL_SYSTEM) ||
            (old_mem->mem_type == TTM_PL_SYSTEM &&
             new_mem->mem_type == TTM_PL_TT)) {
 -              /* bind is enought */
 +              /* bind is enough */
                radeon_move_null(bo, new_mem);
                return 0;
        }
@@@ -407,18 -423,6 +423,6 @@@ memcpy
        return r;
  }
  
- const uint32_t radeon_mem_prios[] = {
-       TTM_PL_VRAM,
-       TTM_PL_TT,
-       TTM_PL_SYSTEM,
- };
- const uint32_t radeon_busy_prios[] = {
-       TTM_PL_TT,
-       TTM_PL_VRAM,
-       TTM_PL_SYSTEM,
- };
  static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
                                bool lazy, bool interruptible)
  {
@@@ -446,10 -450,6 +450,6 @@@ static bool radeon_sync_obj_signaled(vo
  }
  
  static struct ttm_bo_driver radeon_bo_driver = {
-       .mem_type_prio = radeon_mem_prios,
-       .mem_busy_prio = radeon_busy_prios,
-       .num_mem_type_prio = ARRAY_SIZE(radeon_mem_prios),
-       .num_mem_busy_prio = ARRAY_SIZE(radeon_busy_prios),
        .create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
        .invalidate_caches = &radeon_invalidate_caches,
        .init_mem_type = &radeon_init_mem_type,
@@@ -482,27 -482,31 +482,31 @@@ int radeon_ttm_init(struct radeon_devic
                DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
                return r;
        }
-       r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 0,
-                          ((rdev->mc.real_vram_size) >> PAGE_SHIFT));
+       r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
+                               rdev->mc.real_vram_size >> PAGE_SHIFT);
        if (r) {
                DRM_ERROR("Failed initializing VRAM heap.\n");
                return r;
        }
-       r = radeon_object_create(rdev, NULL, 256 * 1024, true,
-                                RADEON_GEM_DOMAIN_VRAM, false,
-                                &rdev->stollen_vga_memory);
+       r = radeon_bo_create(rdev, NULL, 256 * 1024, true,
+                               RADEON_GEM_DOMAIN_VRAM,
+                               &rdev->stollen_vga_memory);
        if (r) {
                return r;
        }
-       r = radeon_object_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
+       r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
+       if (r)
+               return r;
+       r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
+       radeon_bo_unreserve(rdev->stollen_vga_memory);
        if (r) {
-               radeon_object_unref(&rdev->stollen_vga_memory);
+               radeon_bo_unref(&rdev->stollen_vga_memory);
                return r;
        }
        DRM_INFO("radeon: %uM of VRAM memory ready\n",
                 (unsigned)rdev->mc.real_vram_size / (1024 * 1024));
-       r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0,
-                          ((rdev->mc.gtt_size) >> PAGE_SHIFT));
+       r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
+                               rdev->mc.gtt_size >> PAGE_SHIFT);
        if (r) {
                DRM_ERROR("Failed initializing GTT heap.\n");
                return r;
  
  void radeon_ttm_fini(struct radeon_device *rdev)
  {
+       int r;
        if (rdev->stollen_vga_memory) {
-               radeon_object_unpin(rdev->stollen_vga_memory);
-               radeon_object_unref(&rdev->stollen_vga_memory);
+               r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
+               if (r == 0) {
+                       radeon_bo_unpin(rdev->stollen_vga_memory);
+                       radeon_bo_unreserve(rdev->stollen_vga_memory);
+               }
+               radeon_bo_unref(&rdev->stollen_vga_memory);
        }
        ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
        ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
@@@ -92,7 -92,7 +92,7 @@@ int rv770_pcie_gart_enable(struct radeo
  void rv770_pcie_gart_disable(struct radeon_device *rdev)
  {
        u32 tmp;
-       int i;
+       int i, r;
  
        /* Disable all tables */
        for (i = 0; i < 7; i++)
        WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
        WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
        if (rdev->gart.table.vram.robj) {
-               radeon_object_kunmap(rdev->gart.table.vram.robj);
-               radeon_object_unpin(rdev->gart.table.vram.robj);
+               r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+               if (likely(r == 0)) {
+                       radeon_bo_kunmap(rdev->gart.table.vram.robj);
+                       radeon_bo_unpin(rdev->gart.table.vram.robj);
+                       radeon_bo_unreserve(rdev->gart.table.vram.robj);
+               }
        }
  }
  
@@@ -829,11 -833,11 +833,11 @@@ int rv770_mc_init(struct radeon_device 
                 * AGP so that GPU can catch out of VRAM/AGP access
                 */
                if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
 -                      /* Enought place before */
 +                      /* Enough place before */
                        rdev->mc.vram_location = rdev->mc.gtt_location -
                                                        rdev->mc.mc_vram_size;
                } else if (tmp > rdev->mc.mc_vram_size) {
 -                      /* Enought place after */
 +                      /* Enough place after */
                        rdev->mc.vram_location = rdev->mc.gtt_location +
                                                        rdev->mc.gtt_size;
                } else {
@@@ -870,6 -874,14 +874,14 @@@ static int rv770_startup(struct radeon_
  {
        int r;
  
+       if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+               r = r600_init_microcode(rdev);
+               if (r) {
+                       DRM_ERROR("Failed to load firmware!\n");
+                       return r;
+               }
+       }
        rv770_mc_program(rdev);
        if (rdev->flags & RADEON_IS_AGP) {
                rv770_agp_enable(rdev);
        }
        rv770_gpu_init(rdev);
  
-       r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
-                             &rdev->r600_blit.shader_gpu_addr);
+       r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+       if (unlikely(r != 0))
+               return r;
+       r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+                       &rdev->r600_blit.shader_gpu_addr);
+       radeon_bo_unreserve(rdev->r600_blit.shader_obj);
        if (r) {
                DRM_ERROR("failed to pin blit object %d\n", r);
                return r;
        }
  
+       /* Enable IRQ */
+       r = r600_irq_init(rdev);
+       if (r) {
+               DRM_ERROR("radeon: IH init failed (%d).\n", r);
+               radeon_irq_kms_fini(rdev);
+               return r;
+       }
+       r600_irq_set(rdev);
        r = radeon_ring_init(rdev, rdev->cp.ring_size);
        if (r)
                return r;
@@@ -934,13 -959,19 +959,19 @@@ int rv770_resume(struct radeon_device *
  
  int rv770_suspend(struct radeon_device *rdev)
  {
+       int r;
        /* FIXME: we should wait for ring to be empty */
        r700_cp_stop(rdev);
        rdev->cp.ready = false;
        r600_wb_disable(rdev);
        rv770_pcie_gart_disable(rdev);
        /* unpin shaders bo */
-         radeon_object_unpin(rdev->r600_blit.shader_obj);
+       r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+       if (likely(r == 0)) {
+               radeon_bo_unpin(rdev->r600_blit.shader_obj);
+               radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+       }
        return 0;
  }
  
@@@ -975,7 -1006,11 +1006,11 @@@ int rv770_init(struct radeon_device *rd
        if (r)
                return r;
        /* Post card if necessary */
-       if (!r600_card_posted(rdev) && rdev->bios) {
+       if (!r600_card_posted(rdev)) {
+               if (!rdev->bios) {
+                       dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+                       return -EINVAL;
+               }
                DRM_INFO("GPU not posted. posting now...\n");
                atom_asic_init(rdev->mode_info.atom_context);
        }
        if (r)
                return r;
        /* Memory manager */
-       r = radeon_object_init(rdev);
+       r = radeon_bo_init(rdev);
+       if (r)
+               return r;
+       r = radeon_irq_kms_init(rdev);
        if (r)
                return r;
        rdev->cp.ring_obj = NULL;
        r600_ring_init(rdev, 1024 * 1024);
  
-       if (!rdev->me_fw || !rdev->pfp_fw) {
-               r = r600_cp_init_microcode(rdev);
-               if (r) {
-                       DRM_ERROR("Failed to load firmware!\n");
-                       return r;
-               }
-       }
+       rdev->ih.ring_obj = NULL;
+       r600_ih_ring_init(rdev, 64 * 1024);
  
        r = r600_pcie_gart_init(rdev);
        if (r)
                return r;
  
-       rdev->accel_working = true;
        r = r600_blit_init(rdev);
        if (r) {
-               DRM_ERROR("radeon: failled blitter (%d).\n", r);
-               rdev->accel_working = false;
+               DRM_ERROR("radeon: failed blitter (%d).\n", r);
+               return r;
        }
  
+       rdev->accel_working = true;
        r = rv770_startup(rdev);
        if (r) {
                rv770_suspend(rdev);
        if (rdev->accel_working) {
                r = radeon_ib_pool_init(rdev);
                if (r) {
-                       DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
+                       DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
                        rdev->accel_working = false;
                }
                r = r600_ib_test(rdev);
                if (r) {
-                       DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+                       DRM_ERROR("radeon: failed testing IB (%d).\n", r);
                        rdev->accel_working = false;
                }
        }
@@@ -1051,6 -1086,8 +1086,8 @@@ void rv770_fini(struct radeon_device *r
        rv770_suspend(rdev);
  
        r600_blit_fini(rdev);
+       r600_irq_fini(rdev);
+       radeon_irq_kms_fini(rdev);
        radeon_ring_fini(rdev);
        r600_wb_fini(rdev);
        rv770_pcie_gart_fini(rdev);
        radeon_clocks_fini(rdev);
        if (rdev->flags & RADEON_IS_AGP)
                radeon_agp_fini(rdev);
-       radeon_object_fini(rdev);
+       radeon_bo_fini(rdev);
        radeon_atombios_fini(rdev);
        kfree(rdev->bios);
        rdev->bios = NULL;
@@@ -369,6 -369,7 +369,7 @@@ pgprot_t ttm_io_prot(uint32_t caching_f
  #endif
        return tmp;
  }
+ EXPORT_SYMBOL(ttm_io_prot);
  
  static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
                          unsigned long bus_base,
@@@ -427,7 -428,7 +428,7 @@@ static int ttm_bo_kmap_ttm(struct ttm_b
  
                /*
                 * We need to use vmap to get the desired page protection
 -               * or to make the buffer object look contigous.
 +               * or to make the buffer object look contiguous.
                 */
                prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
                        PAGE_KERNEL :