Merge branch 'drm-for-2.6.35' of git://git.kernel.org/pub/scm/linux/kernel/git/airlie...
Linus Torvalds [Fri, 21 May 2010 18:14:52 +0000 (11:14 -0700)]
* 'drm-for-2.6.35' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (207 commits)
  drm/radeon/kms/pm/r600: select the mid clock mode for single head low profile
  drm/radeon: fix power supply kconfig interaction.
  drm/radeon/kms: record object that have been list reserved
  drm/radeon: AGP memory is only I/O if the aperture can be mapped by the CPU.
  drm/radeon/kms: don't default display priority to high on rs4xx
  drm/edid: fix typo in 1600x1200@75 mode
  drm/nouveau: fix i2c-related init table handlers
  drm/nouveau: support init table i2c device identifier 0x81
  drm/nouveau: ensure we've parsed i2c table entry for INIT_*I2C* handlers
  drm/nouveau: display error message for any failed init table opcode
  drm/nouveau: fix init table handlers to return proper error codes
  drm/nv50: support fractional feedback divider on newer chips
  drm/nv50: fix monitor detection on certain chipsets
  drm/nv50: store full dcb i2c entry from vbios
  drm/nv50: fix suspend/resume with DP outputs
  drm/nv50: output calculated crtc pll when debugging on
  drm/nouveau: dump pll limits entries when debugging is on
  drm/nouveau: bios parser fixes for eDP boards
  drm/nouveau: fix a nouveau_bo dereference after it's been destroyed
  drm/nv40: remove some completed ctxprog TODOs
  ...

1  2 
arch/x86/include/asm/cacheflush.h
drivers/gpu/drm/drm_sysfs.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/radeon/atombios.h
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/ttm/ttm_bo.c
include/drm/ttm/ttm_bo_driver.h

@@@ -44,6 -44,9 +44,6 @@@ static inline void copy_from_user_page(
        memcpy(dst, src, len);
  }
  
 -#define PG_WC                         PG_arch_1
 -PAGEFLAG(WC, WC)
 -
  #ifdef CONFIG_X86_PAT
  /*
   * X86 PAT uses page flags WC and Uncached together to keep track of
   * _PAGE_CACHE_UC_MINUS and fourth state where page's memory type has not
   * been changed from its default (value of -1 used to denote this).
   * Note we do not support _PAGE_CACHE_UC here.
 - *
 - * Caller must hold memtype_lock for atomicity.
   */
 +
 +#define _PGMT_DEFAULT         0
 +#define _PGMT_WC              (1UL << PG_arch_1)
 +#define _PGMT_UC_MINUS                (1UL << PG_uncached)
 +#define _PGMT_WB              (1UL << PG_uncached | 1UL << PG_arch_1)
 +#define _PGMT_MASK            (1UL << PG_uncached | 1UL << PG_arch_1)
 +#define _PGMT_CLEAR_MASK      (~_PGMT_MASK)
 +
  static inline unsigned long get_page_memtype(struct page *pg)
  {
 -      if (!PageUncached(pg) && !PageWC(pg))
 +      unsigned long pg_flags = pg->flags & _PGMT_MASK;
 +
 +      if (pg_flags == _PGMT_DEFAULT)
                return -1;
 -      else if (!PageUncached(pg) && PageWC(pg))
 +      else if (pg_flags == _PGMT_WC)
                return _PAGE_CACHE_WC;
 -      else if (PageUncached(pg) && !PageWC(pg))
 +      else if (pg_flags == _PGMT_UC_MINUS)
                return _PAGE_CACHE_UC_MINUS;
        else
                return _PAGE_CACHE_WB;
  
  static inline void set_page_memtype(struct page *pg, unsigned long memtype)
  {
 +      unsigned long memtype_flags = _PGMT_DEFAULT;
 +      unsigned long old_flags;
 +      unsigned long new_flags;
 +
        switch (memtype) {
        case _PAGE_CACHE_WC:
 -              ClearPageUncached(pg);
 -              SetPageWC(pg);
 +              memtype_flags = _PGMT_WC;
                break;
        case _PAGE_CACHE_UC_MINUS:
 -              SetPageUncached(pg);
 -              ClearPageWC(pg);
 +              memtype_flags = _PGMT_UC_MINUS;
                break;
        case _PAGE_CACHE_WB:
 -              SetPageUncached(pg);
 -              SetPageWC(pg);
 -              break;
 -      default:
 -      case -1:
 -              ClearPageUncached(pg);
 -              ClearPageWC(pg);
 +              memtype_flags = _PGMT_WB;
                break;
        }
 +
 +      do {
 +              old_flags = pg->flags;
 +              new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags;
 +      } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags);
  }
  #else
  static inline unsigned long get_page_memtype(struct page *pg) { return -1; }
@@@ -145,9 -139,11 +145,11 @@@ int set_memory_np(unsigned long addr, i
  int set_memory_4k(unsigned long addr, int numpages);
  
  int set_memory_array_uc(unsigned long *addr, int addrinarray);
+ int set_memory_array_wc(unsigned long *addr, int addrinarray);
  int set_memory_array_wb(unsigned long *addr, int addrinarray);
  
  int set_pages_array_uc(struct page **pages, int addrinarray);
+ int set_pages_array_wc(struct page **pages, int addrinarray);
  int set_pages_array_wb(struct page **pages, int addrinarray);
  
  /*
@@@ -193,9 -193,8 +193,9 @@@ static ssize_t enabled_show(struct devi
                        "disabled");
  }
  
 -static ssize_t edid_show(struct kobject *kobj, struct bin_attribute *attr,
 -                       char *buf, loff_t off, size_t count)
 +static ssize_t edid_show(struct file *filp, struct kobject *kobj,
 +                       struct bin_attribute *attr, char *buf, loff_t off,
 +                       size_t count)
  {
        struct device *connector_dev = container_of(kobj, struct device, kobj);
        struct drm_connector *connector = to_drm_connector(connector_dev);
@@@ -334,7 -333,7 +334,7 @@@ static struct device_attribute connecto
  static struct bin_attribute edid_attr = {
        .attr.name = "edid",
        .attr.mode = 0444,
-       .size = 128,
+       .size = 0,
        .read = edid_show,
  };
  
@@@ -169,9 -169,13 +169,13 @@@ void intel_enable_asle (struct drm_devi
  
        if (HAS_PCH_SPLIT(dev))
                ironlake_enable_display_irq(dev_priv, DE_GSE);
-       else
+       else {
                i915_enable_pipestat(dev_priv, 1,
                                     I915_LEGACY_BLC_EVENT_ENABLE);
+               if (IS_I965G(dev))
+                       i915_enable_pipestat(dev_priv, 0,
+                                            I915_LEGACY_BLC_EVENT_ENABLE);
+       }
  }
  
  /**
@@@ -256,18 -260,18 +260,18 @@@ static void i915_hotplug_work_func(stru
                                                    hotplug_work);
        struct drm_device *dev = dev_priv->dev;
        struct drm_mode_config *mode_config = &dev->mode_config;
-       struct drm_connector *connector;
+       struct drm_encoder *encoder;
  
-       if (mode_config->num_connector) {
-               list_for_each_entry(connector, &mode_config->connector_list, head) {
-                       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+       if (mode_config->num_encoder) {
+               list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+                       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
        
                        if (intel_encoder->hot_plug)
                                (*intel_encoder->hot_plug) (intel_encoder);
                }
        }
        /* Just fire off a uevent and let userspace tell us what to do */
-       drm_sysfs_hotplug_event(dev);
+       drm_helper_hpd_irq_event(dev);
  }
  
  static void i915_handle_rps_change(struct drm_device *dev)
@@@ -456,15 -460,11 +460,15 @@@ i915_error_object_create(struct drm_dev
  
        for (page = 0; page < page_count; page++) {
                void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
 +              unsigned long flags;
 +
                if (d == NULL)
                        goto unwind;
 -              s = kmap_atomic(src_priv->pages[page], KM_USER0);
 +              local_irq_save(flags);
 +              s = kmap_atomic(src_priv->pages[page], KM_IRQ0);
                memcpy(d, s, PAGE_SIZE);
 -              kunmap_atomic(s, KM_USER0);
 +              kunmap_atomic(s, KM_IRQ0);
 +              local_irq_restore(flags);
                dst->pages[page] = d;
        }
        dst->page_count = page_count;
@@@ -612,7 -612,7 +616,7 @@@ static void i915_capture_error_state(st
        batchbuffer[1] = NULL;
        count = 0;
        list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
-               struct drm_gem_object *obj = obj_priv->obj;
+               struct drm_gem_object *obj = &obj_priv->base;
  
                if (batchbuffer[0] == NULL &&
                    bbaddr >= obj_priv->gtt_offset &&
        if (error->active_bo) {
                int i = 0;
                list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
-                       struct drm_gem_object *obj = obj_priv->obj;
+                       struct drm_gem_object *obj = &obj_priv->base;
  
                        error->active_bo[i].size = obj->size;
                        error->active_bo[i].name = obj->name;
@@@ -950,7 -950,8 +954,8 @@@ irqreturn_t i915_driver_irq_handler(DRM
                        intel_finish_page_flip(dev, 1);
                }
  
-               if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
+               if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
+                   (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
                    (iir & I915_ASLE_INTERRUPT))
                        opregion_asle_intr(dev);
  
@@@ -742,12 -742,11 +742,11 @@@ bool intel_pipe_has_type (struct drm_cr
  {
      struct drm_device *dev = crtc->dev;
      struct drm_mode_config *mode_config = &dev->mode_config;
-     struct drm_connector *l_entry;
+     struct drm_encoder *l_entry;
  
-     list_for_each_entry(l_entry, &mode_config->connector_list, head) {
-           if (l_entry->encoder &&
-               l_entry->encoder->crtc == crtc) {
-                   struct intel_encoder *intel_encoder = to_intel_encoder(l_entry);
+     list_for_each_entry(l_entry, &mode_config->encoder_list, head) {
+           if (l_entry && l_entry->crtc == crtc) {
+                   struct intel_encoder *intel_encoder = enc_to_intel_encoder(l_entry);
                    if (intel_encoder->type == type)
                            return true;
            }
      return false;
  }
  
- static struct drm_connector *
- intel_pipe_get_connector (struct drm_crtc *crtc)
- {
-     struct drm_device *dev = crtc->dev;
-     struct drm_mode_config *mode_config = &dev->mode_config;
-     struct drm_connector *l_entry, *ret = NULL;
-     list_for_each_entry(l_entry, &mode_config->connector_list, head) {
-           if (l_entry->encoder &&
-               l_entry->encoder->crtc == crtc) {
-                   ret = l_entry;
-                   break;
-           }
-     }
-     return ret;
- }
  #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
  /**
   * Returns whether the given set of divisors are valid for a given refclk with
@@@ -905,9 -887,9 +887,9 @@@ intel_g4x_find_best_PLL(const intel_lim
  
        memset(best_clock, 0, sizeof(*best_clock));
        max_n = limit->n.max;
 -      /* based on hardware requriment prefer smaller n to precision */
 +      /* based on hardware requirement, prefer smaller n to precision */
        for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
 -              /* based on hardware requirment prefere larger m1,m2 */
 +              /* based on hardware requirement, prefere larger m1,m2 */
                for (clock.m1 = limit->m1.max;
                     clock.m1 >= limit->m1.min; clock.m1--) {
                        for (clock.m2 = limit->m2.max;
@@@ -1066,9 -1048,8 +1048,8 @@@ void i8xx_disable_fbc(struct drm_devic
        DRM_DEBUG_KMS("disabled FBC\n");
  }
  
- static bool i8xx_fbc_enabled(struct drm_crtc *crtc)
+ static bool i8xx_fbc_enabled(struct drm_device *dev)
  {
-       struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
  
        return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
@@@ -1125,14 -1106,43 +1106,43 @@@ void g4x_disable_fbc(struct drm_device 
        DRM_DEBUG_KMS("disabled FBC\n");
  }
  
- static bool g4x_fbc_enabled(struct drm_crtc *crtc)
+ static bool g4x_fbc_enabled(struct drm_device *dev)
  {
-       struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
  
        return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
  }
  
+ bool intel_fbc_enabled(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       if (!dev_priv->display.fbc_enabled)
+               return false;
+       return dev_priv->display.fbc_enabled(dev);
+ }
+ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+ {
+       struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+       if (!dev_priv->display.enable_fbc)
+               return;
+       dev_priv->display.enable_fbc(crtc, interval);
+ }
+ void intel_disable_fbc(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       if (!dev_priv->display.disable_fbc)
+               return;
+       dev_priv->display.disable_fbc(dev);
+ }
  /**
   * intel_update_fbc - enable/disable FBC as needed
   * @crtc: CRTC to point the compressor at
@@@ -1167,9 -1177,7 +1177,7 @@@ static void intel_update_fbc(struct drm
        if (!i915_powersave)
                return;
  
-       if (!dev_priv->display.fbc_enabled ||
-           !dev_priv->display.enable_fbc ||
-           !dev_priv->display.disable_fbc)
+       if (!I915_HAS_FBC(dev))
                return;
  
        if (!crtc->fb)
                goto out_disable;
        }
  
-       if (dev_priv->display.fbc_enabled(crtc)) {
+       if (intel_fbc_enabled(dev)) {
                /* We can re-enable it in this case, but need to update pitch */
-               if (fb->pitch > dev_priv->cfb_pitch)
-                       dev_priv->display.disable_fbc(dev);
-               if (obj_priv->fence_reg != dev_priv->cfb_fence)
-                       dev_priv->display.disable_fbc(dev);
-               if (plane != dev_priv->cfb_plane)
-                       dev_priv->display.disable_fbc(dev);
+               if ((fb->pitch > dev_priv->cfb_pitch) ||
+                   (obj_priv->fence_reg != dev_priv->cfb_fence) ||
+                   (plane != dev_priv->cfb_plane))
+                       intel_disable_fbc(dev);
        }
  
-       if (!dev_priv->display.fbc_enabled(crtc)) {
-               /* Now try to turn it back on if possible */
-               dev_priv->display.enable_fbc(crtc, 500);
-       }
+       /* Now try to turn it back on if possible */
+       if (!intel_fbc_enabled(dev))
+               intel_enable_fbc(crtc, 500);
  
        return;
  
  out_disable:
        DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
        /* Multiple disables should be harmless */
-       if (dev_priv->display.fbc_enabled(crtc))
-               dev_priv->display.disable_fbc(dev);
+       if (intel_fbc_enabled(dev))
+               intel_disable_fbc(dev);
  }
  
  static int
@@@ -1510,6 -1515,219 +1515,219 @@@ static void ironlake_set_pll_edp (struc
        udelay(500);
  }
  
+ /* The FDI link training functions for ILK/Ibexpeak. */
+ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
+ {
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
+       int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
+       int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
+       int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
+       int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
+       u32 temp, tries = 0;
+       /* enable CPU FDI TX and PCH FDI RX */
+       temp = I915_READ(fdi_tx_reg);
+       temp |= FDI_TX_ENABLE;
+       temp &= ~(7 << 19);
+       temp |= (intel_crtc->fdi_lanes - 1) << 19;
+       temp &= ~FDI_LINK_TRAIN_NONE;
+       temp |= FDI_LINK_TRAIN_PATTERN_1;
+       I915_WRITE(fdi_tx_reg, temp);
+       I915_READ(fdi_tx_reg);
+       temp = I915_READ(fdi_rx_reg);
+       temp &= ~FDI_LINK_TRAIN_NONE;
+       temp |= FDI_LINK_TRAIN_PATTERN_1;
+       I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
+       I915_READ(fdi_rx_reg);
+       udelay(150);
+       /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+          for train result */
+       temp = I915_READ(fdi_rx_imr_reg);
+       temp &= ~FDI_RX_SYMBOL_LOCK;
+       temp &= ~FDI_RX_BIT_LOCK;
+       I915_WRITE(fdi_rx_imr_reg, temp);
+       I915_READ(fdi_rx_imr_reg);
+       udelay(150);
+       for (;;) {
+               temp = I915_READ(fdi_rx_iir_reg);
+               DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+               if ((temp & FDI_RX_BIT_LOCK)) {
+                       DRM_DEBUG_KMS("FDI train 1 done.\n");
+                       I915_WRITE(fdi_rx_iir_reg,
+                                  temp | FDI_RX_BIT_LOCK);
+                       break;
+               }
+               tries++;
+               if (tries > 5) {
+                       DRM_DEBUG_KMS("FDI train 1 fail!\n");
+                       break;
+               }
+       }
+       /* Train 2 */
+       temp = I915_READ(fdi_tx_reg);
+       temp &= ~FDI_LINK_TRAIN_NONE;
+       temp |= FDI_LINK_TRAIN_PATTERN_2;
+       I915_WRITE(fdi_tx_reg, temp);
+       temp = I915_READ(fdi_rx_reg);
+       temp &= ~FDI_LINK_TRAIN_NONE;
+       temp |= FDI_LINK_TRAIN_PATTERN_2;
+       I915_WRITE(fdi_rx_reg, temp);
+       udelay(150);
+       tries = 0;
+       for (;;) {
+               temp = I915_READ(fdi_rx_iir_reg);
+               DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+               if (temp & FDI_RX_SYMBOL_LOCK) {
+                       I915_WRITE(fdi_rx_iir_reg,
+                                  temp | FDI_RX_SYMBOL_LOCK);
+                       DRM_DEBUG_KMS("FDI train 2 done.\n");
+                       break;
+               }
+               tries++;
+               if (tries > 5) {
+                       DRM_DEBUG_KMS("FDI train 2 fail!\n");
+                       break;
+               }
+       }
+       DRM_DEBUG_KMS("FDI train done\n");
+ }
+ static int snb_b_fdi_train_param [] = {
+       FDI_LINK_TRAIN_400MV_0DB_SNB_B,
+       FDI_LINK_TRAIN_400MV_6DB_SNB_B,
+       FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
+       FDI_LINK_TRAIN_800MV_0DB_SNB_B,
+ };
+ /* The FDI link training functions for SNB/Cougarpoint. */
+ static void gen6_fdi_link_train(struct drm_crtc *crtc)
+ {
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
+       int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
+       int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
+       int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
+       int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
+       u32 temp, i;
+       /* enable CPU FDI TX and PCH FDI RX */
+       temp = I915_READ(fdi_tx_reg);
+       temp |= FDI_TX_ENABLE;
+       temp &= ~(7 << 19);
+       temp |= (intel_crtc->fdi_lanes - 1) << 19;
+       temp &= ~FDI_LINK_TRAIN_NONE;
+       temp |= FDI_LINK_TRAIN_PATTERN_1;
+       temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+       /* SNB-B */
+       temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+       I915_WRITE(fdi_tx_reg, temp);
+       I915_READ(fdi_tx_reg);
+       temp = I915_READ(fdi_rx_reg);
+       if (HAS_PCH_CPT(dev)) {
+               temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+               temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+       } else {
+               temp &= ~FDI_LINK_TRAIN_NONE;
+               temp |= FDI_LINK_TRAIN_PATTERN_1;
+       }
+       I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
+       I915_READ(fdi_rx_reg);
+       udelay(150);
+       /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+          for train result */
+       temp = I915_READ(fdi_rx_imr_reg);
+       temp &= ~FDI_RX_SYMBOL_LOCK;
+       temp &= ~FDI_RX_BIT_LOCK;
+       I915_WRITE(fdi_rx_imr_reg, temp);
+       I915_READ(fdi_rx_imr_reg);
+       udelay(150);
+       for (i = 0; i < 4; i++ ) {
+               temp = I915_READ(fdi_tx_reg);
+               temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+               temp |= snb_b_fdi_train_param[i];
+               I915_WRITE(fdi_tx_reg, temp);
+               udelay(500);
+               temp = I915_READ(fdi_rx_iir_reg);
+               DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+               if (temp & FDI_RX_BIT_LOCK) {
+                       I915_WRITE(fdi_rx_iir_reg,
+                                  temp | FDI_RX_BIT_LOCK);
+                       DRM_DEBUG_KMS("FDI train 1 done.\n");
+                       break;
+               }
+       }
+       if (i == 4)
+               DRM_DEBUG_KMS("FDI train 1 fail!\n");
+       /* Train 2 */
+       temp = I915_READ(fdi_tx_reg);
+       temp &= ~FDI_LINK_TRAIN_NONE;
+       temp |= FDI_LINK_TRAIN_PATTERN_2;
+       if (IS_GEN6(dev)) {
+               temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+               /* SNB-B */
+               temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+       }
+       I915_WRITE(fdi_tx_reg, temp);
+       temp = I915_READ(fdi_rx_reg);
+       if (HAS_PCH_CPT(dev)) {
+               temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+               temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+       } else {
+               temp &= ~FDI_LINK_TRAIN_NONE;
+               temp |= FDI_LINK_TRAIN_PATTERN_2;
+       }
+       I915_WRITE(fdi_rx_reg, temp);
+       udelay(150);
+       for (i = 0; i < 4; i++ ) {
+               temp = I915_READ(fdi_tx_reg);
+               temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+               temp |= snb_b_fdi_train_param[i];
+               I915_WRITE(fdi_tx_reg, temp);
+               udelay(500);
+               temp = I915_READ(fdi_rx_iir_reg);
+               DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+               if (temp & FDI_RX_SYMBOL_LOCK) {
+                       I915_WRITE(fdi_rx_iir_reg,
+                                  temp | FDI_RX_SYMBOL_LOCK);
+                       DRM_DEBUG_KMS("FDI train 2 done.\n");
+                       break;
+               }
+       }
+       if (i == 4)
+               DRM_DEBUG_KMS("FDI train 2 fail!\n");
+       DRM_DEBUG_KMS("FDI train done.\n");
+ }
  static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
  {
        struct drm_device *dev = crtc->dev;
        int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
        int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
        int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
-       int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
-       int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
        int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
        int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1;
        int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ;
        int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B;
        int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B;
        int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
+       int trans_dpll_sel = (pipe == 0) ? 0 : 1;
        u32 temp;
-       int tries = 5, j, n;
+       int n;
        u32 pipe_bpc;
  
        temp = I915_READ(pipeconf_reg);
                        /* enable eDP PLL */
                        ironlake_enable_pll_edp(crtc);
                } else {
-                       /* enable PCH DPLL */
-                       temp = I915_READ(pch_dpll_reg);
-                       if ((temp & DPLL_VCO_ENABLE) == 0) {
-                               I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE);
-                               I915_READ(pch_dpll_reg);
-                       }
  
                        /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
                        temp = I915_READ(fdi_rx_reg);
                         */
                        temp &= ~(0x7 << 16);
                        temp |= (pipe_bpc << 11);
-                       I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE |
-                                       FDI_SEL_PCDCLK |
-                                       FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */
+                       temp &= ~(7 << 19);
+                       temp |= (intel_crtc->fdi_lanes - 1) << 19;
+                       I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
+                       I915_READ(fdi_rx_reg);
+                       udelay(200);
+                       /* Switch from Rawclk to PCDclk */
+                       temp = I915_READ(fdi_rx_reg);
+                       I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
                        I915_READ(fdi_rx_reg);
                        udelay(200);
  
                }
  
                if (!HAS_eDP) {
-                       /* enable CPU FDI TX and PCH FDI RX */
-                       temp = I915_READ(fdi_tx_reg);
-                       temp |= FDI_TX_ENABLE;
-                       temp |= FDI_DP_PORT_WIDTH_X4; /* default */
-                       temp &= ~FDI_LINK_TRAIN_NONE;
-                       temp |= FDI_LINK_TRAIN_PATTERN_1;
-                       I915_WRITE(fdi_tx_reg, temp);
-                       I915_READ(fdi_tx_reg);
-                       temp = I915_READ(fdi_rx_reg);
-                       temp &= ~FDI_LINK_TRAIN_NONE;
-                       temp |= FDI_LINK_TRAIN_PATTERN_1;
-                       I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
-                       I915_READ(fdi_rx_reg);
-                       udelay(150);
-                       /* Train FDI. */
-                       /* umask FDI RX Interrupt symbol_lock and bit_lock bit
-                          for train result */
-                       temp = I915_READ(fdi_rx_imr_reg);
-                       temp &= ~FDI_RX_SYMBOL_LOCK;
-                       temp &= ~FDI_RX_BIT_LOCK;
-                       I915_WRITE(fdi_rx_imr_reg, temp);
-                       I915_READ(fdi_rx_imr_reg);
-                       udelay(150);
+                       /* For PCH output, training FDI link */
+                       if (IS_GEN6(dev))
+                               gen6_fdi_link_train(crtc);
+                       else
+                               ironlake_fdi_link_train(crtc);
  
-                       temp = I915_READ(fdi_rx_iir_reg);
-                       DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
-                       if ((temp & FDI_RX_BIT_LOCK) == 0) {
-                               for (j = 0; j < tries; j++) {
-                                       temp = I915_READ(fdi_rx_iir_reg);
-                                       DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
-                                                               temp);
-                                       if (temp & FDI_RX_BIT_LOCK)
-                                               break;
-                                       udelay(200);
-                               }
-                               if (j != tries)
-                                       I915_WRITE(fdi_rx_iir_reg,
-                                                       temp | FDI_RX_BIT_LOCK);
-                               else
-                                       DRM_DEBUG_KMS("train 1 fail\n");
-                       } else {
-                               I915_WRITE(fdi_rx_iir_reg,
-                                               temp | FDI_RX_BIT_LOCK);
-                               DRM_DEBUG_KMS("train 1 ok 2!\n");
+                       /* enable PCH DPLL */
+                       temp = I915_READ(pch_dpll_reg);
+                       if ((temp & DPLL_VCO_ENABLE) == 0) {
+                               I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE);
+                               I915_READ(pch_dpll_reg);
                        }
-                       temp = I915_READ(fdi_tx_reg);
-                       temp &= ~FDI_LINK_TRAIN_NONE;
-                       temp |= FDI_LINK_TRAIN_PATTERN_2;
-                       I915_WRITE(fdi_tx_reg, temp);
-                       temp = I915_READ(fdi_rx_reg);
-                       temp &= ~FDI_LINK_TRAIN_NONE;
-                       temp |= FDI_LINK_TRAIN_PATTERN_2;
-                       I915_WRITE(fdi_rx_reg, temp);
-                       udelay(150);
+                       udelay(200);
  
-                       temp = I915_READ(fdi_rx_iir_reg);
-                       DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
-                       if ((temp & FDI_RX_SYMBOL_LOCK) == 0) {
-                               for (j = 0; j < tries; j++) {
-                                       temp = I915_READ(fdi_rx_iir_reg);
-                                       DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
-                                                               temp);
-                                       if (temp & FDI_RX_SYMBOL_LOCK)
-                                               break;
-                                       udelay(200);
-                               }
-                               if (j != tries) {
-                                       I915_WRITE(fdi_rx_iir_reg,
-                                                       temp | FDI_RX_SYMBOL_LOCK);
-                                       DRM_DEBUG_KMS("train 2 ok 1!\n");
-                               } else
-                                       DRM_DEBUG_KMS("train 2 fail\n");
-                       } else {
-                               I915_WRITE(fdi_rx_iir_reg,
-                                               temp | FDI_RX_SYMBOL_LOCK);
-                               DRM_DEBUG_KMS("train 2 ok 2!\n");
+                       if (HAS_PCH_CPT(dev)) {
+                               /* Be sure PCH DPLL SEL is set */
+                               temp = I915_READ(PCH_DPLL_SEL);
+                               if (trans_dpll_sel == 0 &&
+                                               (temp & TRANSA_DPLL_ENABLE) == 0)
+                                       temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
+                               else if (trans_dpll_sel == 1 &&
+                                               (temp & TRANSB_DPLL_ENABLE) == 0)
+                                       temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+                               I915_WRITE(PCH_DPLL_SEL, temp);
+                               I915_READ(PCH_DPLL_SEL);
                        }
-                       DRM_DEBUG_KMS("train done\n");
  
                        /* set transcoder timing */
                        I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg));
                        I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg));
                        I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg));
  
+                       /* enable normal train */
+                       temp = I915_READ(fdi_tx_reg);
+                       temp &= ~FDI_LINK_TRAIN_NONE;
+                       I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
+                                       FDI_TX_ENHANCE_FRAME_ENABLE);
+                       I915_READ(fdi_tx_reg);
+                       temp = I915_READ(fdi_rx_reg);
+                       if (HAS_PCH_CPT(dev)) {
+                               temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+                               temp |= FDI_LINK_TRAIN_NORMAL_CPT;
+                       } else {
+                               temp &= ~FDI_LINK_TRAIN_NONE;
+                               temp |= FDI_LINK_TRAIN_NONE;
+                       }
+                       I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+                       I915_READ(fdi_rx_reg);
+                       /* wait one idle pattern time */
+                       udelay(100);
+                       /* For PCH DP, enable TRANS_DP_CTL */
+                       if (HAS_PCH_CPT(dev) &&
+                           intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+                               int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
+                               int reg;
+                               reg = I915_READ(trans_dp_ctl);
+                               reg &= ~TRANS_DP_PORT_SEL_MASK;
+                               reg = TRANS_DP_OUTPUT_ENABLE |
+                                     TRANS_DP_ENH_FRAMING |
+                                     TRANS_DP_VSYNC_ACTIVE_HIGH |
+                                     TRANS_DP_HSYNC_ACTIVE_HIGH;
+                               switch (intel_trans_dp_port_sel(crtc)) {
+                               case PCH_DP_B:
+                                       reg |= TRANS_DP_PORT_SEL_B;
+                                       break;
+                               case PCH_DP_C:
+                                       reg |= TRANS_DP_PORT_SEL_C;
+                                       break;
+                               case PCH_DP_D:
+                                       reg |= TRANS_DP_PORT_SEL_D;
+                                       break;
+                               default:
+                                       DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
+                                       reg |= TRANS_DP_PORT_SEL_B;
+                                       break;
+                               }
+                               I915_WRITE(trans_dp_ctl, reg);
+                               POSTING_READ(trans_dp_ctl);
+                       }
                        /* enable PCH transcoder */
                        temp = I915_READ(transconf_reg);
                        /*
                        while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0)
                                ;
  
-                       /* enable normal */
-                       temp = I915_READ(fdi_tx_reg);
-                       temp &= ~FDI_LINK_TRAIN_NONE;
-                       I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
-                                       FDI_TX_ENHANCE_FRAME_ENABLE);
-                       I915_READ(fdi_tx_reg);
-                       temp = I915_READ(fdi_rx_reg);
-                       temp &= ~FDI_LINK_TRAIN_NONE;
-                       I915_WRITE(fdi_rx_reg, temp | FDI_LINK_TRAIN_NONE |
-                                       FDI_RX_ENHANCE_FRAME_ENABLE);
-                       I915_READ(fdi_rx_reg);
-                       /* wait one idle pattern time */
-                       udelay(100);
                }
  
                intel_crtc_load_lut(crtc);
                        I915_READ(pf_ctl_reg);
                }
                I915_WRITE(pf_win_size, 0);
+               POSTING_READ(pf_win_size);
  
                /* disable CPU FDI tx and PCH FDI rx */
                temp = I915_READ(fdi_tx_reg);
                temp &= ~FDI_LINK_TRAIN_NONE;
                temp |= FDI_LINK_TRAIN_PATTERN_1;
                I915_WRITE(fdi_tx_reg, temp);
+               POSTING_READ(fdi_tx_reg);
  
                temp = I915_READ(fdi_rx_reg);
-               temp &= ~FDI_LINK_TRAIN_NONE;
-               temp |= FDI_LINK_TRAIN_PATTERN_1;
+               if (HAS_PCH_CPT(dev)) {
+                       temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+                       temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+               } else {
+                       temp &= ~FDI_LINK_TRAIN_NONE;
+                       temp |= FDI_LINK_TRAIN_PATTERN_1;
+               }
                I915_WRITE(fdi_rx_reg, temp);
+               POSTING_READ(fdi_rx_reg);
  
                udelay(100);
  
                                }
                        }
                }
                temp = I915_READ(transconf_reg);
                /* BPC in transcoder is consistent with that in pipeconf */
                temp &= ~PIPE_BPC_MASK;
                I915_READ(transconf_reg);
                udelay(100);
  
+               if (HAS_PCH_CPT(dev)) {
+                       /* disable TRANS_DP_CTL */
+                       int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
+                       int reg;
+                       reg = I915_READ(trans_dp_ctl);
+                       reg &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
+                       I915_WRITE(trans_dp_ctl, reg);
+                       POSTING_READ(trans_dp_ctl);
+                       /* disable DPLL_SEL */
+                       temp = I915_READ(PCH_DPLL_SEL);
+                       if (trans_dpll_sel == 0)
+                               temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
+                       else
+                               temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+                       I915_WRITE(PCH_DPLL_SEL, temp);
+                       I915_READ(PCH_DPLL_SEL);
+               }
                /* disable PCH DPLL */
                temp = I915_READ(pch_dpll_reg);
-               if ((temp & DPLL_VCO_ENABLE) != 0) {
-                       I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
-                       I915_READ(pch_dpll_reg);
-               }
+               I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
+               I915_READ(pch_dpll_reg);
  
                if (HAS_eDP) {
                        ironlake_disable_pll_edp(crtc);
                }
  
+               /* Switch from PCDclk to Rawclk */
                temp = I915_READ(fdi_rx_reg);
                temp &= ~FDI_SEL_PCDCLK;
                I915_WRITE(fdi_rx_reg, temp);
                I915_READ(fdi_rx_reg);
  
+               /* Disable CPU FDI TX PLL */
+               temp = I915_READ(fdi_tx_reg);
+               I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE);
+               I915_READ(fdi_tx_reg);
+               udelay(100);
                temp = I915_READ(fdi_rx_reg);
                temp &= ~FDI_RX_PLL_ENABLE;
                I915_WRITE(fdi_rx_reg, temp);
                I915_READ(fdi_rx_reg);
  
-               /* Disable CPU FDI TX PLL */
-               temp = I915_READ(fdi_tx_reg);
-               if ((temp & FDI_TX_PLL_ENABLE) != 0) {
-                       I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE);
-                       I915_READ(fdi_tx_reg);
-                       udelay(100);
-               }
                /* Wait for the clocks to turn off. */
                udelay(100);
                break;
@@@ -2331,6 -2554,30 +2554,30 @@@ static struct intel_watermark_params i8
        I830_FIFO_LINE_SIZE
  };
  
+ static struct intel_watermark_params ironlake_display_wm_info = {
+       ILK_DISPLAY_FIFO,
+       ILK_DISPLAY_MAXWM,
+       ILK_DISPLAY_DFTWM,
+       2,
+       ILK_FIFO_LINE_SIZE
+ };
+ static struct intel_watermark_params ironlake_display_srwm_info = {
+       ILK_DISPLAY_SR_FIFO,
+       ILK_DISPLAY_MAX_SRWM,
+       ILK_DISPLAY_DFT_SRWM,
+       2,
+       ILK_FIFO_LINE_SIZE
+ };
+ static struct intel_watermark_params ironlake_cursor_srwm_info = {
+       ILK_CURSOR_SR_FIFO,
+       ILK_CURSOR_MAX_SRWM,
+       ILK_CURSOR_DFT_SRWM,
+       2,
+       ILK_FIFO_LINE_SIZE
+ };
  /**
   * intel_calculate_wm - calculate watermark level
   * @clock_in_khz: pixel clock
@@@ -2449,66 -2696,6 +2696,6 @@@ static void pineview_disable_cxsr(struc
        DRM_INFO("Big FIFO is disabled\n");
  }
  
- static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock,
-                                int pixel_size)
- {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 reg;
-       unsigned long wm;
-       struct cxsr_latency *latency;
-       latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq,
-               dev_priv->mem_freq);
-       if (!latency) {
-               DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
-               pineview_disable_cxsr(dev);
-               return;
-       }
-       /* Display SR */
-       wm = intel_calculate_wm(clock, &pineview_display_wm, pixel_size,
-                               latency->display_sr);
-       reg = I915_READ(DSPFW1);
-       reg &= 0x7fffff;
-       reg |= wm << 23;
-       I915_WRITE(DSPFW1, reg);
-       DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
-       /* cursor SR */
-       wm = intel_calculate_wm(clock, &pineview_cursor_wm, pixel_size,
-                               latency->cursor_sr);
-       reg = I915_READ(DSPFW3);
-       reg &= ~(0x3f << 24);
-       reg |= (wm & 0x3f) << 24;
-       I915_WRITE(DSPFW3, reg);
-       /* Display HPLL off SR */
-       wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
-               latency->display_hpll_disable, I915_FIFO_LINE_SIZE);
-       reg = I915_READ(DSPFW3);
-       reg &= 0xfffffe00;
-       reg |= wm & 0x1ff;
-       I915_WRITE(DSPFW3, reg);
-       /* cursor HPLL off SR */
-       wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, pixel_size,
-                               latency->cursor_hpll_disable);
-       reg = I915_READ(DSPFW3);
-       reg &= ~(0x3f << 16);
-       reg |= (wm & 0x3f) << 16;
-       I915_WRITE(DSPFW3, reg);
-       DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
-       /* activate cxsr */
-       reg = I915_READ(DSPFW3);
-       reg |= PINEVIEW_SELF_REFRESH_EN;
-       I915_WRITE(DSPFW3, reg);
-       DRM_INFO("Big FIFO is enabled\n");
-       return;
- }
  /*
   * Latency for FIFO fetches is dependent on several factors:
   *   - memory configuration (speed, channels)
@@@ -2593,6 -2780,71 +2780,71 @@@ static int i830_get_fifo_size(struct dr
        return size;
  }
  
+ static void pineview_update_wm(struct drm_device *dev,  int planea_clock,
+                         int planeb_clock, int sr_hdisplay, int pixel_size)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 reg;
+       unsigned long wm;
+       struct cxsr_latency *latency;
+       int sr_clock;
+       latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq,
+                                        dev_priv->mem_freq);
+       if (!latency) {
+               DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+               pineview_disable_cxsr(dev);
+               return;
+       }
+       if (!planea_clock || !planeb_clock) {
+               sr_clock = planea_clock ? planea_clock : planeb_clock;
+               /* Display SR */
+               wm = intel_calculate_wm(sr_clock, &pineview_display_wm,
+                                       pixel_size, latency->display_sr);
+               reg = I915_READ(DSPFW1);
+               reg &= ~DSPFW_SR_MASK;
+               reg |= wm << DSPFW_SR_SHIFT;
+               I915_WRITE(DSPFW1, reg);
+               DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
+               /* cursor SR */
+               wm = intel_calculate_wm(sr_clock, &pineview_cursor_wm,
+                                       pixel_size, latency->cursor_sr);
+               reg = I915_READ(DSPFW3);
+               reg &= ~DSPFW_CURSOR_SR_MASK;
+               reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
+               I915_WRITE(DSPFW3, reg);
+               /* Display HPLL off SR */
+               wm = intel_calculate_wm(sr_clock, &pineview_display_hplloff_wm,
+                                       pixel_size, latency->display_hpll_disable);
+               reg = I915_READ(DSPFW3);
+               reg &= ~DSPFW_HPLL_SR_MASK;
+               reg |= wm & DSPFW_HPLL_SR_MASK;
+               I915_WRITE(DSPFW3, reg);
+               /* cursor HPLL off SR */
+               wm = intel_calculate_wm(sr_clock, &pineview_cursor_hplloff_wm,
+                                       pixel_size, latency->cursor_hpll_disable);
+               reg = I915_READ(DSPFW3);
+               reg &= ~DSPFW_HPLL_CURSOR_MASK;
+               reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
+               I915_WRITE(DSPFW3, reg);
+               DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
+               /* activate cxsr */
+               reg = I915_READ(DSPFW3);
+               reg |= PINEVIEW_SELF_REFRESH_EN;
+               I915_WRITE(DSPFW3, reg);
+               DRM_DEBUG_KMS("Self-refresh is enabled\n");
+       } else {
+               pineview_disable_cxsr(dev);
+               DRM_DEBUG_KMS("Self-refresh is disabled\n");
+       }
+ }
  static void g4x_update_wm(struct drm_device *dev,  int planea_clock,
                          int planeb_clock, int sr_hdisplay, int pixel_size)
  {
@@@ -2813,6 -3065,108 +3065,108 @@@ static void i830_update_wm(struct drm_d
        I915_WRITE(FW_BLC, fwater_lo);
  }
  
+ #define ILK_LP0_PLANE_LATENCY         700
+ static void ironlake_update_wm(struct drm_device *dev,  int planea_clock,
+                      int planeb_clock, int sr_hdisplay, int pixel_size)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
+       int sr_wm, cursor_wm;
+       unsigned long line_time_us;
+       int sr_clock, entries_required;
+       u32 reg_value;
+       /* Calculate and update the watermark for plane A */
+       if (planea_clock) {
+               entries_required = ((planea_clock / 1000) * pixel_size *
+                                    ILK_LP0_PLANE_LATENCY) / 1000;
+               entries_required = DIV_ROUND_UP(entries_required,
+                                  ironlake_display_wm_info.cacheline_size);
+               planea_wm = entries_required +
+                           ironlake_display_wm_info.guard_size;
+               if (planea_wm > (int)ironlake_display_wm_info.max_wm)
+                       planea_wm = ironlake_display_wm_info.max_wm;
+               cursora_wm = 16;
+               reg_value = I915_READ(WM0_PIPEA_ILK);
+               reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+               reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) |
+                            (cursora_wm & WM0_PIPE_CURSOR_MASK);
+               I915_WRITE(WM0_PIPEA_ILK, reg_value);
+               DRM_DEBUG_KMS("FIFO watermarks For pipe A - plane %d, "
+                               "cursor: %d\n", planea_wm, cursora_wm);
+       }
+       /* Calculate and update the watermark for plane B */
+       if (planeb_clock) {
+               entries_required = ((planeb_clock / 1000) * pixel_size *
+                                    ILK_LP0_PLANE_LATENCY) / 1000;
+               entries_required = DIV_ROUND_UP(entries_required,
+                                  ironlake_display_wm_info.cacheline_size);
+               planeb_wm = entries_required +
+                           ironlake_display_wm_info.guard_size;
+               if (planeb_wm > (int)ironlake_display_wm_info.max_wm)
+                       planeb_wm = ironlake_display_wm_info.max_wm;
+               cursorb_wm = 16;
+               reg_value = I915_READ(WM0_PIPEB_ILK);
+               reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+               reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) |
+                            (cursorb_wm & WM0_PIPE_CURSOR_MASK);
+               I915_WRITE(WM0_PIPEB_ILK, reg_value);
+               DRM_DEBUG_KMS("FIFO watermarks For pipe B - plane %d, "
+                               "cursor: %d\n", planeb_wm, cursorb_wm);
+       }
+       /*
+        * Calculate and update the self-refresh watermark only when one
+        * display plane is used.
+        */
+       if (!planea_clock || !planeb_clock) {
+               int line_count;
+               /* Read the self-refresh latency. The unit is 0.5us */
+               int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK;
+               sr_clock = planea_clock ? planea_clock : planeb_clock;
+               line_time_us = ((sr_hdisplay * 1000) / sr_clock);
+               /* Use ns/us then divide to preserve precision */
+               line_count = ((ilk_sr_latency * 500) / line_time_us + 1000)
+                              / 1000;
+               /* calculate the self-refresh watermark for display plane */
+               entries_required = line_count * sr_hdisplay * pixel_size;
+               entries_required = DIV_ROUND_UP(entries_required,
+                                  ironlake_display_srwm_info.cacheline_size);
+               sr_wm = entries_required +
+                       ironlake_display_srwm_info.guard_size;
+               /* calculate the self-refresh watermark for display cursor */
+               entries_required = line_count * pixel_size * 64;
+               entries_required = DIV_ROUND_UP(entries_required,
+                                  ironlake_cursor_srwm_info.cacheline_size);
+               cursor_wm = entries_required +
+                           ironlake_cursor_srwm_info.guard_size;
+               /* configure watermark and enable self-refresh */
+               reg_value = I915_READ(WM1_LP_ILK);
+               reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK |
+                              WM1_LP_CURSOR_MASK);
+               reg_value |= WM1_LP_SR_EN |
+                            (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
+                            (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm;
+               I915_WRITE(WM1_LP_ILK, reg_value);
+               DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
+                               "cursor %d\n", sr_wm, cursor_wm);
+       } else {
+               /* Turn off self refresh if both pipes are enabled */
+               I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
+       }
+ }
  /**
   * intel_update_watermarks - update FIFO watermark values based on current modes
   *
@@@ -2882,12 -3236,6 +3236,6 @@@ static void intel_update_watermarks(str
        if (enabled <= 0)
                return;
  
-       /* Single plane configs can enable self refresh */
-       if (enabled == 1 && IS_PINEVIEW(dev))
-               pineview_enable_cxsr(dev, sr_clock, pixel_size);
-       else if (IS_PINEVIEW(dev))
-               pineview_disable_cxsr(dev);
        dev_priv->display.update_wm(dev, planea_clock, planeb_clock,
                                    sr_hdisplay, pixel_size);
  }
@@@ -2924,7 -3272,8 +3272,8 @@@ static int intel_crtc_mode_set(struct d
        bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
        bool is_edp = false;
        struct drm_mode_config *mode_config = &dev->mode_config;
-       struct drm_connector *connector;
+       struct drm_encoder *encoder;
+       struct intel_encoder *intel_encoder = NULL;
        const intel_limit_t *limit;
        int ret;
        struct fdi_m_n m_n = {0};
        int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0;
        int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
        int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
+       int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
+       int trans_dpll_sel = (pipe == 0) ? 0 : 1;
        int lvds_reg = LVDS;
        u32 temp;
        int sdvo_pixel_multiply;
  
        drm_vblank_pre_modeset(dev, pipe);
  
-       list_for_each_entry(connector, &mode_config->connector_list, head) {
-               struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+       list_for_each_entry(encoder, &mode_config->encoder_list, head) {
  
-               if (!connector->encoder || connector->encoder->crtc != crtc)
+               if (!encoder || encoder->crtc != crtc)
                        continue;
  
+               intel_encoder = enc_to_intel_encoder(encoder);
                switch (intel_encoder->type) {
                case INTEL_OUTPUT_LVDS:
                        is_lvds = true;
  
        /* FDI link */
        if (HAS_PCH_SPLIT(dev)) {
-               int lane, link_bw, bpp;
+               int lane = 0, link_bw, bpp;
                /* eDP doesn't require FDI link, so just set DP M/N
                   according to current link config */
                if (is_edp) {
-                       struct drm_connector *edp;
                        target_clock = mode->clock;
-                       edp = intel_pipe_get_connector(crtc);
-                       intel_edp_link_config(to_intel_encoder(edp),
+                       intel_edp_link_config(intel_encoder,
                                        &lane, &link_bw);
                } else {
                        /* DP over FDI requires target mode clock
                                target_clock = mode->clock;
                        else
                                target_clock = adjusted_mode->clock;
-                       lane = 4;
                        link_bw = 270000;
                }
  
                        bpp = 24;
                }
  
+               if (!lane) {
+                       /* 
+                        * Account for spread spectrum to avoid
+                        * oversubscribing the link. Max center spread
+                        * is 2.5%; use 5% for safety's sake.
+                        */
+                       u32 bps = target_clock * bpp * 21 / 20;
+                       lane = bps / (link_bw * 8) + 1;
+               }
+               intel_crtc->fdi_lanes = lane;
                ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
        }
  
                        pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
        }
  
-       dspcntr |= DISPLAY_PLANE_ENABLE;
-       pipeconf |= PIPEACONF_ENABLE;
-       dpll |= DPLL_VCO_ENABLE;
        /* Disable the panel fitter if it was on our pipe */
        if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe)
                I915_WRITE(PFIT_CONTROL, 0);
                udelay(150);
        }
  
+       /* enable transcoder DPLL */
+       if (HAS_PCH_CPT(dev)) {
+               temp = I915_READ(PCH_DPLL_SEL);
+               if (trans_dpll_sel == 0)
+                       temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
+               else
+                       temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+               I915_WRITE(PCH_DPLL_SEL, temp);
+               I915_READ(PCH_DPLL_SEL);
+               udelay(150);
+       }
        /* The LVDS pin pair needs to be on before the DPLLs are enabled.
         * This is an exception to the general rule that mode_set doesn't turn
         * things on.
                        lvds_reg = PCH_LVDS;
  
                lvds = I915_READ(lvds_reg);
-               lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT;
+               lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+               if (pipe == 1) {
+                       if (HAS_PCH_CPT(dev))
+                               lvds |= PORT_TRANS_B_SEL_CPT;
+                       else
+                               lvds |= LVDS_PIPEB_SELECT;
+               } else {
+                       if (HAS_PCH_CPT(dev))
+                               lvds &= ~PORT_TRANS_SEL_MASK;
+                       else
+                               lvds &= ~LVDS_PIPEB_SELECT;
+               }
                /* set the corresponsding LVDS_BORDER bit */
                lvds |= dev_priv->lvds_border_bits;
                /* Set the B0-B3 data pairs corresponding to whether we're going to
                /* set the dithering flag */
                if (IS_I965G(dev)) {
                        if (dev_priv->lvds_dither) {
-                               if (HAS_PCH_SPLIT(dev))
+                               if (HAS_PCH_SPLIT(dev)) {
                                        pipeconf |= PIPE_ENABLE_DITHER;
-                               else
+                                       pipeconf |= PIPE_DITHER_TYPE_ST01;
+                               } else
                                        lvds |= LVDS_ENABLE_DITHER;
                        } else {
-                               if (HAS_PCH_SPLIT(dev))
+                               if (HAS_PCH_SPLIT(dev)) {
                                        pipeconf &= ~PIPE_ENABLE_DITHER;
-                               else
+                                       pipeconf &= ~PIPE_DITHER_TYPE_MASK;
+                               } else
                                        lvds &= ~LVDS_ENABLE_DITHER;
                        }
                }
        }
        if (is_dp)
                intel_dp_set_m_n(crtc, mode, adjusted_mode);
+       else if (HAS_PCH_SPLIT(dev)) {
+               /* For non-DP output, clear any trans DP clock recovery setting.*/
+               if (pipe == 0) {
+                       I915_WRITE(TRANSA_DATA_M1, 0);
+                       I915_WRITE(TRANSA_DATA_N1, 0);
+                       I915_WRITE(TRANSA_DP_LINK_M1, 0);
+                       I915_WRITE(TRANSA_DP_LINK_N1, 0);
+               } else {
+                       I915_WRITE(TRANSB_DATA_M1, 0);
+                       I915_WRITE(TRANSB_DATA_N1, 0);
+                       I915_WRITE(TRANSB_DP_LINK_M1, 0);
+                       I915_WRITE(TRANSB_DP_LINK_N1, 0);
+               }
+       }
  
        if (!is_edp) {
                I915_WRITE(fp_reg, fp);
                        /* enable FDI RX PLL too */
                        temp = I915_READ(fdi_rx_reg);
                        I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
+                       I915_READ(fdi_rx_reg);
+                       udelay(200);
+                       /* enable FDI TX PLL too */
+                       temp = I915_READ(fdi_tx_reg);
+                       I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
+                       I915_READ(fdi_tx_reg);
+                       /* enable FDI RX PCDCLK */
+                       temp = I915_READ(fdi_rx_reg);
+                       I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
+                       I915_READ(fdi_rx_reg);
                        udelay(200);
                }
        }
@@@ -3671,6 -4078,7 +4078,7 @@@ static struct drm_display_mode load_det
  };
  
  struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
+                                           struct drm_connector *connector,
                                            struct drm_display_mode *mode,
                                            int *dpms_mode)
  {
        }
  
        encoder->crtc = crtc;
-       intel_encoder->base.encoder = encoder;
+       connector->encoder = encoder;
        intel_encoder->load_detect_temp = true;
  
        intel_crtc = to_intel_crtc(crtc);
        return crtc;
  }
  
- void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpms_mode)
+ void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
+                                   struct drm_connector *connector, int dpms_mode)
  {
        struct drm_encoder *encoder = &intel_encoder->enc;
        struct drm_device *dev = encoder->dev;
  
        if (intel_encoder->load_detect_temp) {
                encoder->crtc = NULL;
-               intel_encoder->base.encoder = NULL;
+               connector->encoder = NULL;
                intel_encoder->load_detect_temp = false;
                crtc->enabled = drm_helper_crtc_in_use(crtc);
                drm_helper_disable_unused_functions(dev);
@@@ -4392,14 -4801,14 +4801,14 @@@ struct drm_crtc *intel_get_crtc_from_pi
        return crtc;
  }
  
- static int intel_connector_clones(struct drm_device *dev, int type_mask)
+ static int intel_encoder_clones(struct drm_device *dev, int type_mask)
  {
        int index_mask = 0;
-       struct drm_connector *connector;
+       struct drm_encoder *encoder;
        int entry = 0;
  
-         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
                if (type_mask & intel_encoder->clone_mask)
                        index_mask |= (1 << entry);
                entry++;
  static void intel_setup_outputs(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_connector *connector;
+       struct drm_encoder *encoder;
  
        intel_crt_init(dev);
  
                        intel_dp_init(dev, DP_A);
  
                if (I915_READ(HDMIB) & PORT_DETECTED) {
-                       /* check SDVOB */
-                       /* found = intel_sdvo_init(dev, HDMIB); */
-                       found = 0;
+                       /* PCH SDVOB multiplex with HDMIB */
+                       found = intel_sdvo_init(dev, PCH_SDVOB);
                        if (!found)
                                intel_hdmi_init(dev, HDMIB);
                        if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
        if (SUPPORTS_TV(dev))
                intel_tv_init(dev);
  
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-               struct drm_encoder *encoder = &intel_encoder->enc;
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
  
                encoder->possible_crtcs = intel_encoder->crtc_mask;
-               encoder->possible_clones = intel_connector_clones(dev,
+               encoder->possible_clones = intel_encoder_clones(dev,
                                                intel_encoder->clone_mask);
        }
  }
  static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
  {
        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-       struct drm_device *dev = fb->dev;
-       if (fb->fbdev)
-               intelfb_remove(dev, fb);
  
        drm_framebuffer_cleanup(fb);
        drm_gem_object_unreference_unlocked(intel_fb->obj);
@@@ -4533,18 -4936,13 +4936,13 @@@ static const struct drm_framebuffer_fun
        .create_handle = intel_user_framebuffer_create_handle,
  };
  
- int intel_framebuffer_create(struct drm_device *dev,
-                            struct drm_mode_fb_cmd *mode_cmd,
-                            struct drm_framebuffer **fb,
-                            struct drm_gem_object *obj)
+ int intel_framebuffer_init(struct drm_device *dev,
+                          struct intel_framebuffer *intel_fb,
+                          struct drm_mode_fb_cmd *mode_cmd,
+                          struct drm_gem_object *obj)
  {
-       struct intel_framebuffer *intel_fb;
        int ret;
  
-       intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
-       if (!intel_fb)
-               return -ENOMEM;
        ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
        if (ret) {
                DRM_ERROR("framebuffer init failed %d\n", ret);
        }
  
        drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
        intel_fb->obj = obj;
        return 0;
  }
  
  static struct drm_framebuffer *
  intel_user_framebuffer_create(struct drm_device *dev,
                              struct drm_file *filp,
                              struct drm_mode_fb_cmd *mode_cmd)
  {
        struct drm_gem_object *obj;
-       struct drm_framebuffer *fb;
+       struct intel_framebuffer *intel_fb;
        int ret;
  
        obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
        if (!obj)
                return NULL;
  
-       ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj);
+       intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+       if (!intel_fb)
+               return NULL;
+       ret = intel_framebuffer_init(dev, intel_fb,
+                                    mode_cmd, obj);
        if (ret) {
                drm_gem_object_unreference_unlocked(obj);
+               kfree(intel_fb);
                return NULL;
        }
  
-       return fb;
+       return &intel_fb->base;
  }
  
  static const struct drm_mode_config_funcs intel_mode_funcs = {
        .fb_create = intel_user_framebuffer_create,
-       .fb_changed = intelfb_probe,
+       .output_poll_changed = intel_fb_output_poll_changed,
  };
  
  static struct drm_gem_object *
@@@ -4594,7 -4993,7 +4993,7 @@@ intel_alloc_power_context(struct drm_de
        struct drm_gem_object *pwrctx;
        int ret;
  
-       pwrctx = drm_gem_object_alloc(dev, 4096);
+       pwrctx = i915_gem_alloc_object(dev, 4096);
        if (!pwrctx) {
                DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
                return NULL;
@@@ -4732,6 -5131,25 +5131,25 @@@ void intel_init_clock_gating(struct drm
                }
  
                I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+               /*
+                * According to the spec the following bits should be set in
+                * order to enable memory self-refresh
+                * The bit 22/21 of 0x42004
+                * The bit 5 of 0x42020
+                * The bit 15 of 0x45000
+                */
+               if (IS_IRONLAKE(dev)) {
+                       I915_WRITE(ILK_DISPLAY_CHICKEN2,
+                                       (I915_READ(ILK_DISPLAY_CHICKEN2) |
+                                       ILK_DPARB_GATE | ILK_VSDPFD_FULL));
+                       I915_WRITE(ILK_DSPCLK_GATE,
+                                       (I915_READ(ILK_DSPCLK_GATE) |
+                                               ILK_DPARB_CLK_GATE));
+                       I915_WRITE(DISP_ARB_CTL,
+                                       (I915_READ(DISP_ARB_CTL) |
+                                               DISP_FBC_WM_DIS));
+               }
                return;
        } else if (IS_G4X(dev)) {
                uint32_t dspclk_gate;
@@@ -4809,8 -5227,7 +5227,7 @@@ static void intel_init_display(struct d
        else
                dev_priv->display.dpms = i9xx_crtc_dpms;
  
-       /* Only mobile has FBC, leave pointers NULL for other chips */
-       if (IS_MOBILE(dev)) {
+       if (I915_HAS_FBC(dev)) {
                if (IS_GM45(dev)) {
                        dev_priv->display.fbc_enabled = g4x_fbc_enabled;
                        dev_priv->display.enable_fbc = g4x_enable_fbc;
                        i830_get_display_clock_speed;
  
        /* For FIFO watermark updates */
-       if (HAS_PCH_SPLIT(dev))
-               dev_priv->display.update_wm = NULL;
-       else if (IS_G4X(dev))
+       if (HAS_PCH_SPLIT(dev)) {
+               if (IS_IRONLAKE(dev)) {
+                       if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
+                               dev_priv->display.update_wm = ironlake_update_wm;
+                       else {
+                               DRM_DEBUG_KMS("Failed to get proper latency. "
+                                             "Disable CxSR\n");
+                               dev_priv->display.update_wm = NULL;
+                       }
+               } else
+                       dev_priv->display.update_wm = NULL;
+       } else if (IS_PINEVIEW(dev)) {
+               if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
+                                           dev_priv->fsb_freq,
+                                           dev_priv->mem_freq)) {
+                       DRM_INFO("failed to find known CxSR latency "
+                                "(found fsb freq %d, mem freq %d), "
+                                "disabling CxSR\n",
+                                dev_priv->fsb_freq, dev_priv->mem_freq);
+                       /* Disable CxSR and never update its watermark again */
+                       pineview_disable_cxsr(dev);
+                       dev_priv->display.update_wm = NULL;
+               } else
+                       dev_priv->display.update_wm = pineview_update_wm;
+       } else if (IS_G4X(dev))
                dev_priv->display.update_wm = g4x_update_wm;
        else if (IS_I965G(dev))
                dev_priv->display.update_wm = i965_update_wm;
@@@ -4923,13 -5362,6 +5362,6 @@@ void intel_modeset_init(struct drm_devi
                    (unsigned long)dev);
  
        intel_setup_overlay(dev);
-       if (IS_PINEVIEW(dev) && !intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
-                                                       dev_priv->fsb_freq,
-                                                       dev_priv->mem_freq))
-               DRM_INFO("failed to find known CxSR latency "
-                        "(found fsb freq %d, mem freq %d), disabling CxSR\n",
-                        dev_priv->fsb_freq, dev_priv->mem_freq);
  }
  
  void intel_modeset_cleanup(struct drm_device *dev)
  
        mutex_lock(&dev->struct_mutex);
  
+       drm_kms_helper_poll_fini(dev);
+       intel_fbdev_fini(dev);
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                /* Skip inactive CRTCs */
                if (!crtc->fb)
  }
  
  
- /* current intel driver doesn't take advantage of encoders
-    always give back the encoder for the connector
- */
- struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
+ /*
+  * Return which encoder is currently attached for connector.
+  */
+ struct drm_encoder *intel_attached_encoder (struct drm_connector *connector)
  {
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+       struct drm_mode_object *obj;
+       struct drm_encoder *encoder;
+       int i;
  
-       return &intel_encoder->enc;
+       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+               if (connector->encoder_ids[i] == 0)
+                       break;
+               obj = drm_mode_object_find(connector->dev,
+                                            connector->encoder_ids[i],
+                                            DRM_MODE_OBJECT_ENCODER);
+               if (!obj)
+                       continue;
+               encoder = obj_to_encoder(obj);
+               return encoder;
+       }
+       return NULL;
  }
  
  /*
@@@ -3780,7 -3780,7 +3780,7 @@@ typedef struct _ATOM_ASIC_SS_ASSIGNMEN
        UCHAR                                                           ucReserved[2];
  }ATOM_ASIC_SS_ASSIGNMENT;
  
 -//Define ucClockIndication, SW uses the IDs below to search if the SS is requried/enabled on a clock branch/signal type.
 +//Define ucClockIndication, SW uses the IDs below to search if the SS is required/enabled on a clock branch/signal type.
  //SS is not required or enabled if a match is not found.
  #define ASIC_INTERNAL_MEMORY_SS                       1
  #define ASIC_INTERNAL_ENGINE_SS                       2
@@@ -5742,6 -5742,9 +5742,9 @@@ typedef struct _ATOM_PPLIB_THERMALCONTR
  #define ATOM_PP_THERMALCONTROLLER_RV6xx     7
  #define ATOM_PP_THERMALCONTROLLER_RV770     8
  #define ATOM_PP_THERMALCONTROLLER_ADT7473   9
+ #define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO     11
+ #define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
+ #define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL   0x89    // ADT7473 Fan Control + Internal Thermal Controller
  
  typedef struct _ATOM_PPLIB_STATE
  {
      UCHAR ucClockStateIndices[1]; // variable-sized
  } ATOM_PPLIB_STATE;
  
+ typedef struct _ATOM_PPLIB_FANTABLE
+ {
+     UCHAR   ucFanTableFormat;                // Change this if the table format changes or version changes so that the other fields are not the same.
+     UCHAR   ucTHyst;                         // Temperature hysteresis. Integer.
+     USHORT  usTMin;                          // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM.
+     USHORT  usTMed;                          // The middle temperature where we change slopes.
+     USHORT  usTHigh;                         // The high point above TMed for adjusting the second slope.
+     USHORT  usPWMMin;                        // The minimum PWM value in percent (0.01% increments).
+     USHORT  usPWMMed;                        // The PWM value (in percent) at TMed.
+     USHORT  usPWMHigh;                       // The PWM value at THigh.
+ } ATOM_PPLIB_FANTABLE;
+ typedef struct _ATOM_PPLIB_EXTENDEDHEADER
+ {
+     USHORT  usSize;
+     ULONG   ulMaxEngineClock;   // For Overdrive.
+     ULONG   ulMaxMemoryClock;   // For Overdrive.
+     // Add extra system parameters here, always adjust size to include all fields.
+ } ATOM_PPLIB_EXTENDEDHEADER;
  //// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
  #define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
  #define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
  #define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
  #define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
  #define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
+ #define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096
+ #define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000              // Go to boot state on alerts, e.g. on an AC->DC transition.
+ #define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000   // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition).
+ #define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000                   // Does the driver control VDDCI independently from VDDC.
+ #define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000               // Enable the 'regulator hot' feature.
+ #define ATOM_PP_PLATFORM_CAP_BACO          0x00020000               // Does the driver supports BACO state.
  
  typedef struct _ATOM_PPLIB_POWERPLAYTABLE
  {
  
  } ATOM_PPLIB_POWERPLAYTABLE;
  
+ typedef struct _ATOM_PPLIB_POWERPLAYTABLE2
+ {
+     ATOM_PPLIB_POWERPLAYTABLE basicTable;
+     UCHAR   ucNumCustomThermalPolicy;
+     USHORT  usCustomThermalPolicyArrayOffset;
+ }ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2;
+ typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
+ {
+     ATOM_PPLIB_POWERPLAYTABLE2 basicTable2;
+     USHORT                     usFormatID;                      // To be used ONLY by PPGen.
+     USHORT                     usFanTableOffset;
+     USHORT                     usExtendendedHeaderOffset;
+ } ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
  //// ATOM_PPLIB_NONCLOCK_INFO::usClassification
  #define ATOM_PPLIB_CLASSIFICATION_UI_MASK          0x0007
  #define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT         0
  #define ATOM_PPLIB_CLASSIFICATION_UVDSTATE               0x0400
  #define ATOM_PPLIB_CLASSIFICATION_3DLOW                  0x0800
  #define ATOM_PPLIB_CLASSIFICATION_ACPI                   0x1000
- // remaining 3 bits are reserved
+ #define ATOM_PPLIB_CLASSIFICATION_HD2STATE               0x2000
+ #define ATOM_PPLIB_CLASSIFICATION_HDSTATE                0x4000
+ #define ATOM_PPLIB_CLASSIFICATION_SDSTATE                0x8000
  
  //// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
  #define ATOM_PPLIB_SINGLE_DISPLAY_ONLY           0x00000001
  
  #define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING        0x00001000
  #define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS  0x00002000
+ #define ATOM_PPLIB_DISALLOW_ON_DC                        0x00004000
  #define ATOM_PPLIB_ENABLE_VARIBRIGHT                     0x00008000
  
- #define ATOM_PPLIB_DISALLOW_ON_DC                       0x00004000
+ //memory related flags
+ #define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF               0x000010000
+ //M3 Arb    //2bits, current 3 sets of parameters in total
+ #define ATOM_PPLIB_M3ARB_MASK                       0x00060000
+ #define ATOM_PPLIB_M3ARB_SHIFT                      17
  
  // Contained in an array starting at the offset
  // in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
@@@ -5860,6 -5912,9 +5912,9 @@@ typedef struct _ATOM_PPLIB_NONCLOCK_INF
  // Contained in an array starting at the offset
  // in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
  // referenced from ATOM_PPLIB_STATE::ucClockStateIndices
+ #define ATOM_PPLIB_NONCLOCKINFO_VER1      12
+ #define ATOM_PPLIB_NONCLOCKINFO_VER2      24
  typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
  {
        USHORT usEngineClockLow;
  #define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE    4
  #define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF    8
  #define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF    16
+ #define ATOM_PPLIB_R600_FLAGS_LOWPOWER         32   // On the RV770 use 'low power' setting (sequencer S0).
+ typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO
+ {
+       USHORT usEngineClockLow;
+       UCHAR  ucEngineClockHigh;
+       USHORT usMemoryClockLow;
+       UCHAR  ucMemoryClockHigh;
+       USHORT usVDDC;
+       USHORT usVDDCI;
+       USHORT usUnused;
+       ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
+ } ATOM_PPLIB_EVERGREEN_CLOCK_INFO;
  
  typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
  
        UCHAR  ucPadding;                   // For proper alignment and size.
        USHORT usVDDC;                      // For the 780, use: None, Low, High, Variable
        UCHAR  ucMaxHTLinkWidth;            // From SBIOS - {2, 4, 8, 16}
 -      UCHAR  ucMinHTLinkWidth;            // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement.
 +      UCHAR  ucMinHTLinkWidth;            // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requirement.
        USHORT usHTLinkFreq;                // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
        ULONG  ulFlags; 
  } ATOM_PPLIB_RS780_CLOCK_INFO;
   * - 2.1.0 - add square tiling interface
   * - 2.2.0 - add r6xx/r7xx const buffer support
   * - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs
+  * - 2.4.0 - add crtc id query
   */
  #define KMS_DRIVER_MAJOR      2
- #define KMS_DRIVER_MINOR      3
+ #define KMS_DRIVER_MINOR      4
  #define KMS_DRIVER_PATCHLEVEL 0
  int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
  int radeon_driver_unload_kms(struct drm_device *dev);
@@@ -91,7 -92,6 +92,6 @@@ int radeon_testing = 0
  int radeon_connector_table = 0;
  int radeon_tv = 1;
  int radeon_new_pll = -1;
- int radeon_dynpm = -1;
  int radeon_audio = 1;
  int radeon_disp_priority = 0;
  int radeon_hw_i2c = 0;
@@@ -132,9 -132,6 +132,6 @@@ module_param_named(tv, radeon_tv, int, 
  MODULE_PARM_DESC(new_pll, "Select new PLL code");
  module_param_named(new_pll, radeon_new_pll, int, 0444);
  
- MODULE_PARM_DESC(dynpm, "Disable/Enable dynamic power management (1 = enable)");
- module_param_named(dynpm, radeon_dynpm, int, 0444);
  MODULE_PARM_DESC(audio, "Audio enable (0 = disable)");
  module_param_named(audio, radeon_audio, int, 0444);
  
@@@ -216,7 -213,6 +213,7 @@@ static struct drm_driver driver_old = 
                 .mmap = drm_mmap,
                 .poll = drm_poll,
                 .fasync = drm_fasync,
 +               .read = drm_read,
  #ifdef CONFIG_COMPAT
                 .compat_ioctl = radeon_compat_ioctl,
  #endif
@@@ -305,7 -301,6 +302,7 @@@ static struct drm_driver kms_driver = 
                 .mmap = radeon_mmap,
                 .poll = drm_poll,
                 .fasync = drm_fasync,
 +               .read = drm_read,
  #ifdef CONFIG_COMPAT
                 .compat_ioctl = radeon_kms_compat_ioctl,
  #endif
@@@ -79,8 -79,6 +79,6 @@@ static void ttm_mem_type_debug(struct t
        printk(KERN_ERR TTM_PFX "    use_type: %d\n", man->use_type);
        printk(KERN_ERR TTM_PFX "    flags: 0x%08X\n", man->flags);
        printk(KERN_ERR TTM_PFX "    gpu_offset: 0x%08lX\n", man->gpu_offset);
-       printk(KERN_ERR TTM_PFX "    io_offset: 0x%08lX\n", man->io_offset);
-       printk(KERN_ERR TTM_PFX "    io_size: %ld\n", man->io_size);
        printk(KERN_ERR TTM_PFX "    size: %llu\n", man->size);
        printk(KERN_ERR TTM_PFX "    available_caching: 0x%08X\n",
                man->available_caching);
@@@ -357,7 -355,8 +355,8 @@@ static int ttm_bo_add_ttm(struct ttm_bu
  
  static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
                                  struct ttm_mem_reg *mem,
-                                 bool evict, bool interruptible, bool no_wait)
+                                 bool evict, bool interruptible,
+                                 bool no_wait_reserve, bool no_wait_gpu)
  {
        struct ttm_bo_device *bdev = bo->bdev;
        bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
  
        if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
            !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
-               ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
+               ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
        else if (bdev->driver->move)
                ret = bdev->driver->move(bo, evict, interruptible,
-                                        no_wait, mem);
+                                        no_wait_reserve, no_wait_gpu, mem);
        else
-               ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
+               ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
  
        if (ret)
                goto out_err;
@@@ -605,8 -604,22 +604,22 @@@ void ttm_bo_unref(struct ttm_buffer_obj
  }
  EXPORT_SYMBOL(ttm_bo_unref);
  
+ int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
+ {
+       return cancel_delayed_work_sync(&bdev->wq);
+ }
+ EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
+ void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
+ {
+       if (resched)
+               schedule_delayed_work(&bdev->wq,
+                                     ((HZ / 100) < 1) ? 1 : HZ / 100);
+ }
+ EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
  static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
-                       bool no_wait)
+                       bool no_wait_reserve, bool no_wait_gpu)
  {
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_bo_global *glob = bo->glob;
        int ret = 0;
  
        spin_lock(&bo->lock);
-       ret = ttm_bo_wait(bo, false, interruptible, no_wait);
+       ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
        spin_unlock(&bo->lock);
  
        if (unlikely(ret != 0)) {
  
        evict_mem = bo->mem;
        evict_mem.mm_node = NULL;
+       evict_mem.bus.io_reserved = false;
  
        placement.fpfn = 0;
        placement.lpfn = 0;
        placement.num_busy_placement = 0;
        bdev->driver->evict_flags(bo, &placement);
        ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
-                               no_wait);
+                               no_wait_reserve, no_wait_gpu);
        if (ret) {
                if (ret != -ERESTARTSYS) {
                        printk(KERN_ERR TTM_PFX
        }
  
        ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
-                                    no_wait);
+                                    no_wait_reserve, no_wait_gpu);
        if (ret) {
                if (ret != -ERESTARTSYS)
                        printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
@@@ -670,7 -684,8 +684,8 @@@ out
  
  static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
                                uint32_t mem_type,
-                               bool interruptible, bool no_wait)
+                               bool interruptible, bool no_wait_reserve,
+                               bool no_wait_gpu)
  {
        struct ttm_bo_global *glob = bdev->glob;
        struct ttm_mem_type_manager *man = &bdev->man[mem_type];
@@@ -687,11 -702,11 +702,11 @@@ retry
        bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
        kref_get(&bo->list_kref);
  
-       ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+       ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
  
        if (unlikely(ret == -EBUSY)) {
                spin_unlock(&glob->lru_lock);
-               if (likely(!no_wait))
+               if (likely(!no_wait_gpu))
                        ret = ttm_bo_wait_unreserved(bo, interruptible);
  
                kref_put(&bo->list_kref, ttm_bo_release_list);
        while (put_count--)
                kref_put(&bo->list_kref, ttm_bo_ref_bug);
  
-       ret = ttm_bo_evict(bo, interruptible, no_wait);
+       ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
        ttm_bo_unreserve(bo);
  
        kref_put(&bo->list_kref, ttm_bo_release_list);
@@@ -764,7 -779,9 +779,9 @@@ static int ttm_bo_mem_force_space(struc
                                        uint32_t mem_type,
                                        struct ttm_placement *placement,
                                        struct ttm_mem_reg *mem,
-                                       bool interruptible, bool no_wait)
+                                       bool interruptible,
+                                       bool no_wait_reserve,
+                                       bool no_wait_gpu)
  {
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_bo_global *glob = bdev->glob;
                }
                spin_unlock(&glob->lru_lock);
                ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
-                                               no_wait);
+                                               no_wait_reserve, no_wait_gpu);
                if (unlikely(ret != 0))
                        return ret;
        } while (1);
@@@ -855,7 -872,8 +872,8 @@@ static bool ttm_bo_mt_compatible(struc
  int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                        struct ttm_placement *placement,
                        struct ttm_mem_reg *mem,
-                       bool interruptible, bool no_wait)
+                       bool interruptible, bool no_wait_reserve,
+                       bool no_wait_gpu)
  {
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_mem_type_manager *man;
                }
  
                ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
-                                               interruptible, no_wait);
+                                               interruptible, no_wait_reserve, no_wait_gpu);
                if (ret == 0 && mem->mm_node) {
                        mem->placement = cur_flags;
                        mem->mm_node->private = bo;
@@@ -978,7 -996,8 +996,8 @@@ EXPORT_SYMBOL(ttm_bo_wait_cpu)
  
  int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
                        struct ttm_placement *placement,
-                       bool interruptible, bool no_wait)
+                       bool interruptible, bool no_wait_reserve,
+                       bool no_wait_gpu)
  {
        struct ttm_bo_global *glob = bo->glob;
        int ret = 0;
         * instead of doing it here.
         */
        spin_lock(&bo->lock);
-       ret = ttm_bo_wait(bo, false, interruptible, no_wait);
+       ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
        spin_unlock(&bo->lock);
        if (ret)
                return ret;
        mem.num_pages = bo->num_pages;
        mem.size = mem.num_pages << PAGE_SHIFT;
        mem.page_alignment = bo->mem.page_alignment;
+       mem.bus.io_reserved = false;
        /*
         * Determine where to move the buffer.
         */
-       ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
+       ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
        if (ret)
                goto out_unlock;
-       ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
+       ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
  out_unlock:
        if (ret && mem.mm_node) {
                spin_lock(&glob->lru_lock);
@@@ -1039,7 -1059,8 +1059,8 @@@ static int ttm_bo_mem_compat(struct ttm
  
  int ttm_bo_validate(struct ttm_buffer_object *bo,
                        struct ttm_placement *placement,
-                       bool interruptible, bool no_wait)
+                       bool interruptible, bool no_wait_reserve,
+                       bool no_wait_gpu)
  {
        int ret;
  
         */
        ret = ttm_bo_mem_compat(placement, &bo->mem);
        if (ret < 0) {
-               ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
+               ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
                if (ret)
                        return ret;
        } else {
@@@ -1153,6 -1174,7 +1174,7 @@@ int ttm_bo_init(struct ttm_bo_device *b
        bo->mem.num_pages = bo->num_pages;
        bo->mem.mm_node = NULL;
        bo->mem.page_alignment = page_alignment;
+       bo->mem.bus.io_reserved = false;
        bo->buffer_start = buffer_start & PAGE_MASK;
        bo->priv_flags = 0;
        bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
                        goto out_err;
        }
  
-       ret = ttm_bo_validate(bo, placement, interruptible, false);
+       ret = ttm_bo_validate(bo, placement, interruptible, false, false);
        if (ret)
                goto out_err;
  
@@@ -1249,7 -1271,7 +1271,7 @@@ static int ttm_bo_force_list_clean(stru
        spin_lock(&glob->lru_lock);
        while (!list_empty(&man->lru)) {
                spin_unlock(&glob->lru_lock);
-               ret = ttm_mem_evict_first(bdev, mem_type, false, false);
+               ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
                if (ret) {
                        if (allow_errors) {
                                return ret;
@@@ -1553,26 -1575,6 +1575,6 @@@ bool ttm_mem_reg_is_pci(struct ttm_bo_d
        return true;
  }
  
- int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
-                     struct ttm_mem_reg *mem,
-                     unsigned long *bus_base,
-                     unsigned long *bus_offset, unsigned long *bus_size)
- {
-       struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
-       *bus_size = 0;
-       if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
-               return -EINVAL;
-       if (ttm_mem_reg_is_pci(bdev, mem)) {
-               *bus_offset = mem->mm_node->start << PAGE_SHIFT;
-               *bus_size = mem->num_pages << PAGE_SHIFT;
-               *bus_base = man->io_offset;
-       }
-       return 0;
- }
  void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
  {
        struct ttm_bo_device *bdev = bo->bdev;
  
        if (!bdev->dev_mapping)
                return;
        unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
+       ttm_mem_io_free(bdev, &bo->mem);
  }
  EXPORT_SYMBOL(ttm_bo_unmap_virtual);
  
@@@ -1716,12 -1718,40 +1718,12 @@@ int ttm_bo_wait(struct ttm_buffer_objec
  }
  EXPORT_SYMBOL(ttm_bo_wait);
  
 -void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
 -{
 -      atomic_set(&bo->reserved, 0);
 -      wake_up_all(&bo->event_queue);
 -}
 -
 -int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
 -                           bool no_wait)
 -{
 -      int ret;
 -
 -      while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
 -              if (no_wait)
 -                      return -EBUSY;
 -              else if (interruptible) {
 -                      ret = wait_event_interruptible
 -                          (bo->event_queue, atomic_read(&bo->reserved) == 0);
 -                      if (unlikely(ret != 0))
 -                              return ret;
 -              } else {
 -                      wait_event(bo->event_queue,
 -                                 atomic_read(&bo->reserved) == 0);
 -              }
 -      }
 -      return 0;
 -}
 -
  int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
  {
        int ret = 0;
  
        /*
 -       * Using ttm_bo_reserve instead of ttm_bo_block_reservation
 -       * makes sure the lru lists are updated.
 +       * Using ttm_bo_reserve makes sure the lru lists are updated.
         */
  
        ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
@@@ -1811,7 -1841,7 +1813,7 @@@ static int ttm_bo_swapout(struct ttm_me
                evict_mem.mem_type = TTM_PL_SYSTEM;
  
                ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
-                                            false, false);
+                                            false, false, false);
                if (unlikely(ret != 0))
                        goto out;
        }
@@@ -176,8 -176,6 +176,6 @@@ struct ttm_tt 
  
  #define TTM_MEMTYPE_FLAG_FIXED         (1 << 0)       /* Fixed (on-card) PCI memory */
  #define TTM_MEMTYPE_FLAG_MAPPABLE      (1 << 1)       /* Memory mappable */
- #define TTM_MEMTYPE_FLAG_NEEDS_IOREMAP (1 << 2)       /* Fixed memory needs ioremap
-                                                  before kernel access. */
  #define TTM_MEMTYPE_FLAG_CMA           (1 << 3)       /* Can't map aperture */
  
  /**
   * managed by this memory type.
   * @gpu_offset: If used, the GPU offset of the first managed page of
   * fixed memory or the first managed location in an aperture.
-  * @io_offset: The io_offset of the first managed page of IO memory or
-  * the first managed location in an aperture. For TTM_MEMTYPE_FLAG_CMA
-  * memory, this should be set to NULL.
-  * @io_size: The size of a managed IO region (fixed memory or aperture).
-  * @io_addr: Virtual kernel address if the io region is pre-mapped. For
-  * TTM_MEMTYPE_FLAG_NEEDS_IOREMAP there is no pre-mapped io map and
-  * @io_addr should be set to NULL.
   * @size: Size of the managed region.
   * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
   * as defined in ttm_placement_common.h
@@@ -221,9 -212,6 +212,6 @@@ struct ttm_mem_type_manager 
        bool use_type;
        uint32_t flags;
        unsigned long gpu_offset;
-       unsigned long io_offset;
-       unsigned long io_size;
-       void *io_addr;
        uint64_t size;
        uint32_t available_caching;
        uint32_t default_caching;
@@@ -311,7 -299,8 +299,8 @@@ struct ttm_bo_driver 
         */
        int (*move) (struct ttm_buffer_object *bo,
                     bool evict, bool interruptible,
-                    bool no_wait, struct ttm_mem_reg *new_mem);
+                    bool no_wait_reserve, bool no_wait_gpu,
+                    struct ttm_mem_reg *new_mem);
  
        /**
         * struct ttm_bo_driver_member verify_access
                            struct ttm_mem_reg *new_mem);
        /* notify the driver we are taking a fault on this BO
         * and have reserved it */
-       void (*fault_reserve_notify)(struct ttm_buffer_object *bo);
+       int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
  
        /**
         * notify the driver that we're about to swap out this bo
         */
        void (*swap_notify) (struct ttm_buffer_object *bo);
+       /**
+        * Driver callback on when mapping io memory (for bo_move_memcpy
+        * for instance). TTM will take care to call io_mem_free whenever
+        * the mapping is not use anymore. io_mem_reserve & io_mem_free
+        * are balanced.
+        */
+       int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
+       void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
  };
  
  /**
@@@ -633,7 -631,8 +631,8 @@@ extern bool ttm_mem_reg_is_pci(struct t
   * @proposed_placement: Proposed new placement for the buffer object.
   * @mem: A struct ttm_mem_reg.
   * @interruptible: Sleep interruptible when sliping.
-  * @no_wait: Don't sleep waiting for space to become available.
+  * @no_wait_reserve: Return immediately if other buffers are busy.
+  * @no_wait_gpu: Return immediately if the GPU is busy.
   *
   * Allocate memory space for the buffer object pointed to by @bo, using
   * the placement flags in @mem, potentially evicting other idle buffer objects.
  extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                                struct ttm_placement *placement,
                                struct ttm_mem_reg *mem,
-                               bool interruptible, bool no_wait);
+                               bool interruptible,
+                               bool no_wait_reserve, bool no_wait_gpu);
  /**
   * ttm_bo_wait_for_cpu
   *
@@@ -682,6 -682,11 +682,11 @@@ extern int ttm_bo_pci_offset(struct ttm
                             unsigned long *bus_offset,
                             unsigned long *bus_size);
  
+ extern int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
+                               struct ttm_mem_reg *mem);
+ extern void ttm_mem_io_free(struct ttm_bo_device *bdev,
+                               struct ttm_mem_reg *mem);
  extern void ttm_bo_global_release(struct ttm_global_reference *ref);
  extern int ttm_bo_global_init(struct ttm_global_reference *ref);
  
@@@ -789,6 -794,34 +794,6 @@@ extern void ttm_bo_unreserve(struct ttm
  extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
                                  bool interruptible);
  
 -/**
 - * ttm_bo_block_reservation
 - *
 - * @bo: A pointer to a struct ttm_buffer_object.
 - * @interruptible: Use interruptible sleep when waiting.
 - * @no_wait: Don't sleep, but rather return -EBUSY.
 - *
 - * Block reservation for validation by simply reserving the buffer.
 - * This is intended for single buffer use only without eviction,
 - * and thus needs no deadlock protection.
 - *
 - * Returns:
 - * -EBUSY: If no_wait == 1 and the buffer is already reserved.
 - * -ERESTARTSYS: If interruptible == 1 and the process received a signal
 - * while sleeping.
 - */
 -extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo,
 -                                  bool interruptible, bool no_wait);
 -
 -/**
 - * ttm_bo_unblock_reservation
 - *
 - * @bo: A pointer to a struct ttm_buffer_object.
 - *
 - * Unblocks reservation leaving lru lists untouched.
 - */
 -extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo);
 -
  /*
   * ttm_bo_util.c
   */
   *
   * @bo: A pointer to a struct ttm_buffer_object.
   * @evict: 1: This is an eviction. Don't try to pipeline.
-  * @no_wait: Never sleep, but rather return with -EBUSY.
+  * @no_wait_reserve: Return immediately if other buffers are busy.
+  * @no_wait_gpu: Return immediately if the GPU is busy.
   * @new_mem: struct ttm_mem_reg indicating where to move.
   *
   * Optimized move function for a buffer object with both old and
   */
  
  extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
-                          bool evict, bool no_wait,
-                          struct ttm_mem_reg *new_mem);
+                          bool evict, bool no_wait_reserve,
+                          bool no_wait_gpu, struct ttm_mem_reg *new_mem);
  
  /**
   * ttm_bo_move_memcpy
   *
   * @bo: A pointer to a struct ttm_buffer_object.
   * @evict: 1: This is an eviction. Don't try to pipeline.
-  * @no_wait: Never sleep, but rather return with -EBUSY.
+  * @no_wait_reserve: Return immediately if other buffers are busy.
+  * @no_wait_gpu: Return immediately if the GPU is busy.
   * @new_mem: struct ttm_mem_reg indicating where to move.
   *
   * Fallback move function for a mappable buffer object in mappable memory.
   */
  
  extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
-                             bool evict,
-                             bool no_wait, struct ttm_mem_reg *new_mem);
+                             bool evict, bool no_wait_reserve,
+                             bool no_wait_gpu, struct ttm_mem_reg *new_mem);
  
  /**
   * ttm_bo_free_old_node
@@@ -854,7 -889,8 +861,8 @@@ extern void ttm_bo_free_old_node(struc
   * @sync_obj_arg: An argument to pass to the sync object idle / wait
   * functions.
   * @evict: This is an evict move. Don't return until the buffer is idle.
-  * @no_wait: Never sleep, but rather return with -EBUSY.
+  * @no_wait_reserve: Return immediately if other buffers are busy.
+  * @no_wait_gpu: Return immediately if the GPU is busy.
   * @new_mem: struct ttm_mem_reg indicating where to move.
   *
   * Accelerated move function to be called when an accelerated move
  extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
                                     void *sync_obj,
                                     void *sync_obj_arg,
-                                    bool evict, bool no_wait,
+                                    bool evict, bool no_wait_reserve,
+                                    bool no_wait_gpu,
                                     struct ttm_mem_reg *new_mem);
  /**
   * ttm_io_prot