drm/i915: disable shared panel fitter for pipe
[linux-2.6.git] / drivers / gpu / drm / i915 / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/cpufreq.h>
28 #include <linux/module.h>
29 #include <linux/input.h>
30 #include <linux/i2c.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/vgaarb.h>
34 #include <drm/drm_edid.h>
35 #include "drmP.h"
36 #include "intel_drv.h"
37 #include "i915_drm.h"
38 #include "i915_drv.h"
39 #include "i915_trace.h"
40 #include "drm_dp_helper.h"
41 #include "drm_crtc_helper.h"
42 #include <linux/dma_remapping.h>
43
44 #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
45
46 bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
47 static void intel_update_watermarks(struct drm_device *dev);
48 static void intel_increase_pllclock(struct drm_crtc *crtc);
49 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
50
51 typedef struct {
52         /* given values */
53         int n;
54         int m1, m2;
55         int p1, p2;
56         /* derived values */
57         int     dot;
58         int     vco;
59         int     m;
60         int     p;
61 } intel_clock_t;
62
63 typedef struct {
64         int     min, max;
65 } intel_range_t;
66
67 typedef struct {
68         int     dot_limit;
69         int     p2_slow, p2_fast;
70 } intel_p2_t;
71
72 #define INTEL_P2_NUM                  2
73 typedef struct intel_limit intel_limit_t;
74 struct intel_limit {
75         intel_range_t   dot, vco, n, m, m1, m2, p, p1;
76         intel_p2_t          p2;
77         bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
78                         int, int, intel_clock_t *, intel_clock_t *);
79 };
80
81 /* FDI */
82 #define IRONLAKE_FDI_FREQ               2700000 /* in kHz for mode->clock */
83
84 static bool
85 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
86                     int target, int refclk, intel_clock_t *match_clock,
87                     intel_clock_t *best_clock);
88 static bool
89 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
90                         int target, int refclk, intel_clock_t *match_clock,
91                         intel_clock_t *best_clock);
92
93 static bool
94 intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
95                       int target, int refclk, intel_clock_t *match_clock,
96                       intel_clock_t *best_clock);
97 static bool
98 intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
99                            int target, int refclk, intel_clock_t *match_clock,
100                            intel_clock_t *best_clock);
101
102 static inline u32 /* units of 100MHz */
103 intel_fdi_link_freq(struct drm_device *dev)
104 {
105         if (IS_GEN5(dev)) {
106                 struct drm_i915_private *dev_priv = dev->dev_private;
107                 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
108         } else
109                 return 27;
110 }
111
112 static const intel_limit_t intel_limits_i8xx_dvo = {
113         .dot = { .min = 25000, .max = 350000 },
114         .vco = { .min = 930000, .max = 1400000 },
115         .n = { .min = 3, .max = 16 },
116         .m = { .min = 96, .max = 140 },
117         .m1 = { .min = 18, .max = 26 },
118         .m2 = { .min = 6, .max = 16 },
119         .p = { .min = 4, .max = 128 },
120         .p1 = { .min = 2, .max = 33 },
121         .p2 = { .dot_limit = 165000,
122                 .p2_slow = 4, .p2_fast = 2 },
123         .find_pll = intel_find_best_PLL,
124 };
125
126 static const intel_limit_t intel_limits_i8xx_lvds = {
127         .dot = { .min = 25000, .max = 350000 },
128         .vco = { .min = 930000, .max = 1400000 },
129         .n = { .min = 3, .max = 16 },
130         .m = { .min = 96, .max = 140 },
131         .m1 = { .min = 18, .max = 26 },
132         .m2 = { .min = 6, .max = 16 },
133         .p = { .min = 4, .max = 128 },
134         .p1 = { .min = 1, .max = 6 },
135         .p2 = { .dot_limit = 165000,
136                 .p2_slow = 14, .p2_fast = 7 },
137         .find_pll = intel_find_best_PLL,
138 };
139
140 static const intel_limit_t intel_limits_i9xx_sdvo = {
141         .dot = { .min = 20000, .max = 400000 },
142         .vco = { .min = 1400000, .max = 2800000 },
143         .n = { .min = 1, .max = 6 },
144         .m = { .min = 70, .max = 120 },
145         .m1 = { .min = 10, .max = 22 },
146         .m2 = { .min = 5, .max = 9 },
147         .p = { .min = 5, .max = 80 },
148         .p1 = { .min = 1, .max = 8 },
149         .p2 = { .dot_limit = 200000,
150                 .p2_slow = 10, .p2_fast = 5 },
151         .find_pll = intel_find_best_PLL,
152 };
153
154 static const intel_limit_t intel_limits_i9xx_lvds = {
155         .dot = { .min = 20000, .max = 400000 },
156         .vco = { .min = 1400000, .max = 2800000 },
157         .n = { .min = 1, .max = 6 },
158         .m = { .min = 70, .max = 120 },
159         .m1 = { .min = 10, .max = 22 },
160         .m2 = { .min = 5, .max = 9 },
161         .p = { .min = 7, .max = 98 },
162         .p1 = { .min = 1, .max = 8 },
163         .p2 = { .dot_limit = 112000,
164                 .p2_slow = 14, .p2_fast = 7 },
165         .find_pll = intel_find_best_PLL,
166 };
167
168
169 static const intel_limit_t intel_limits_g4x_sdvo = {
170         .dot = { .min = 25000, .max = 270000 },
171         .vco = { .min = 1750000, .max = 3500000},
172         .n = { .min = 1, .max = 4 },
173         .m = { .min = 104, .max = 138 },
174         .m1 = { .min = 17, .max = 23 },
175         .m2 = { .min = 5, .max = 11 },
176         .p = { .min = 10, .max = 30 },
177         .p1 = { .min = 1, .max = 3},
178         .p2 = { .dot_limit = 270000,
179                 .p2_slow = 10,
180                 .p2_fast = 10
181         },
182         .find_pll = intel_g4x_find_best_PLL,
183 };
184
185 static const intel_limit_t intel_limits_g4x_hdmi = {
186         .dot = { .min = 22000, .max = 400000 },
187         .vco = { .min = 1750000, .max = 3500000},
188         .n = { .min = 1, .max = 4 },
189         .m = { .min = 104, .max = 138 },
190         .m1 = { .min = 16, .max = 23 },
191         .m2 = { .min = 5, .max = 11 },
192         .p = { .min = 5, .max = 80 },
193         .p1 = { .min = 1, .max = 8},
194         .p2 = { .dot_limit = 165000,
195                 .p2_slow = 10, .p2_fast = 5 },
196         .find_pll = intel_g4x_find_best_PLL,
197 };
198
199 static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
200         .dot = { .min = 20000, .max = 115000 },
201         .vco = { .min = 1750000, .max = 3500000 },
202         .n = { .min = 1, .max = 3 },
203         .m = { .min = 104, .max = 138 },
204         .m1 = { .min = 17, .max = 23 },
205         .m2 = { .min = 5, .max = 11 },
206         .p = { .min = 28, .max = 112 },
207         .p1 = { .min = 2, .max = 8 },
208         .p2 = { .dot_limit = 0,
209                 .p2_slow = 14, .p2_fast = 14
210         },
211         .find_pll = intel_g4x_find_best_PLL,
212 };
213
214 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
215         .dot = { .min = 80000, .max = 224000 },
216         .vco = { .min = 1750000, .max = 3500000 },
217         .n = { .min = 1, .max = 3 },
218         .m = { .min = 104, .max = 138 },
219         .m1 = { .min = 17, .max = 23 },
220         .m2 = { .min = 5, .max = 11 },
221         .p = { .min = 14, .max = 42 },
222         .p1 = { .min = 2, .max = 6 },
223         .p2 = { .dot_limit = 0,
224                 .p2_slow = 7, .p2_fast = 7
225         },
226         .find_pll = intel_g4x_find_best_PLL,
227 };
228
229 static const intel_limit_t intel_limits_g4x_display_port = {
230         .dot = { .min = 161670, .max = 227000 },
231         .vco = { .min = 1750000, .max = 3500000},
232         .n = { .min = 1, .max = 2 },
233         .m = { .min = 97, .max = 108 },
234         .m1 = { .min = 0x10, .max = 0x12 },
235         .m2 = { .min = 0x05, .max = 0x06 },
236         .p = { .min = 10, .max = 20 },
237         .p1 = { .min = 1, .max = 2},
238         .p2 = { .dot_limit = 0,
239                 .p2_slow = 10, .p2_fast = 10 },
240         .find_pll = intel_find_pll_g4x_dp,
241 };
242
243 static const intel_limit_t intel_limits_pineview_sdvo = {
244         .dot = { .min = 20000, .max = 400000},
245         .vco = { .min = 1700000, .max = 3500000 },
246         /* Pineview's Ncounter is a ring counter */
247         .n = { .min = 3, .max = 6 },
248         .m = { .min = 2, .max = 256 },
249         /* Pineview only has one combined m divider, which we treat as m2. */
250         .m1 = { .min = 0, .max = 0 },
251         .m2 = { .min = 0, .max = 254 },
252         .p = { .min = 5, .max = 80 },
253         .p1 = { .min = 1, .max = 8 },
254         .p2 = { .dot_limit = 200000,
255                 .p2_slow = 10, .p2_fast = 5 },
256         .find_pll = intel_find_best_PLL,
257 };
258
259 static const intel_limit_t intel_limits_pineview_lvds = {
260         .dot = { .min = 20000, .max = 400000 },
261         .vco = { .min = 1700000, .max = 3500000 },
262         .n = { .min = 3, .max = 6 },
263         .m = { .min = 2, .max = 256 },
264         .m1 = { .min = 0, .max = 0 },
265         .m2 = { .min = 0, .max = 254 },
266         .p = { .min = 7, .max = 112 },
267         .p1 = { .min = 1, .max = 8 },
268         .p2 = { .dot_limit = 112000,
269                 .p2_slow = 14, .p2_fast = 14 },
270         .find_pll = intel_find_best_PLL,
271 };
272
273 /* Ironlake / Sandybridge
274  *
275  * We calculate clock using (register_value + 2) for N/M1/M2, so here
276  * the range value for them is (actual_value - 2).
277  */
278 static const intel_limit_t intel_limits_ironlake_dac = {
279         .dot = { .min = 25000, .max = 350000 },
280         .vco = { .min = 1760000, .max = 3510000 },
281         .n = { .min = 1, .max = 5 },
282         .m = { .min = 79, .max = 127 },
283         .m1 = { .min = 12, .max = 22 },
284         .m2 = { .min = 5, .max = 9 },
285         .p = { .min = 5, .max = 80 },
286         .p1 = { .min = 1, .max = 8 },
287         .p2 = { .dot_limit = 225000,
288                 .p2_slow = 10, .p2_fast = 5 },
289         .find_pll = intel_g4x_find_best_PLL,
290 };
291
292 static const intel_limit_t intel_limits_ironlake_single_lvds = {
293         .dot = { .min = 25000, .max = 350000 },
294         .vco = { .min = 1760000, .max = 3510000 },
295         .n = { .min = 1, .max = 3 },
296         .m = { .min = 79, .max = 118 },
297         .m1 = { .min = 12, .max = 22 },
298         .m2 = { .min = 5, .max = 9 },
299         .p = { .min = 28, .max = 112 },
300         .p1 = { .min = 2, .max = 8 },
301         .p2 = { .dot_limit = 225000,
302                 .p2_slow = 14, .p2_fast = 14 },
303         .find_pll = intel_g4x_find_best_PLL,
304 };
305
306 static const intel_limit_t intel_limits_ironlake_dual_lvds = {
307         .dot = { .min = 25000, .max = 350000 },
308         .vco = { .min = 1760000, .max = 3510000 },
309         .n = { .min = 1, .max = 3 },
310         .m = { .min = 79, .max = 127 },
311         .m1 = { .min = 12, .max = 22 },
312         .m2 = { .min = 5, .max = 9 },
313         .p = { .min = 14, .max = 56 },
314         .p1 = { .min = 2, .max = 8 },
315         .p2 = { .dot_limit = 225000,
316                 .p2_slow = 7, .p2_fast = 7 },
317         .find_pll = intel_g4x_find_best_PLL,
318 };
319
320 /* LVDS 100mhz refclk limits. */
321 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
322         .dot = { .min = 25000, .max = 350000 },
323         .vco = { .min = 1760000, .max = 3510000 },
324         .n = { .min = 1, .max = 2 },
325         .m = { .min = 79, .max = 126 },
326         .m1 = { .min = 12, .max = 22 },
327         .m2 = { .min = 5, .max = 9 },
328         .p = { .min = 28, .max = 112 },
329         .p1 = { .min = 2, .max = 8 },
330         .p2 = { .dot_limit = 225000,
331                 .p2_slow = 14, .p2_fast = 14 },
332         .find_pll = intel_g4x_find_best_PLL,
333 };
334
335 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
336         .dot = { .min = 25000, .max = 350000 },
337         .vco = { .min = 1760000, .max = 3510000 },
338         .n = { .min = 1, .max = 3 },
339         .m = { .min = 79, .max = 126 },
340         .m1 = { .min = 12, .max = 22 },
341         .m2 = { .min = 5, .max = 9 },
342         .p = { .min = 14, .max = 42 },
343         .p1 = { .min = 2, .max = 6 },
344         .p2 = { .dot_limit = 225000,
345                 .p2_slow = 7, .p2_fast = 7 },
346         .find_pll = intel_g4x_find_best_PLL,
347 };
348
349 static const intel_limit_t intel_limits_ironlake_display_port = {
350         .dot = { .min = 25000, .max = 350000 },
351         .vco = { .min = 1760000, .max = 3510000},
352         .n = { .min = 1, .max = 2 },
353         .m = { .min = 81, .max = 90 },
354         .m1 = { .min = 12, .max = 22 },
355         .m2 = { .min = 5, .max = 9 },
356         .p = { .min = 10, .max = 20 },
357         .p1 = { .min = 1, .max = 2},
358         .p2 = { .dot_limit = 0,
359                 .p2_slow = 10, .p2_fast = 10 },
360         .find_pll = intel_find_pll_ironlake_dp,
361 };
362
363 static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
364                               unsigned int reg)
365 {
366         unsigned int val;
367
368         if (dev_priv->lvds_val)
369                 val = dev_priv->lvds_val;
370         else {
371                 /* BIOS should set the proper LVDS register value at boot, but
372                  * in reality, it doesn't set the value when the lid is closed;
373                  * we need to check "the value to be set" in VBT when LVDS
374                  * register is uninitialized.
375                  */
376                 val = I915_READ(reg);
377                 if (!(val & ~LVDS_DETECTED))
378                         val = dev_priv->bios_lvds_val;
379                 dev_priv->lvds_val = val;
380         }
381         return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
382 }
383
384 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
385                                                 int refclk)
386 {
387         struct drm_device *dev = crtc->dev;
388         struct drm_i915_private *dev_priv = dev->dev_private;
389         const intel_limit_t *limit;
390
391         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
392                 if (is_dual_link_lvds(dev_priv, PCH_LVDS)) {
393                         /* LVDS dual channel */
394                         if (refclk == 100000)
395                                 limit = &intel_limits_ironlake_dual_lvds_100m;
396                         else
397                                 limit = &intel_limits_ironlake_dual_lvds;
398                 } else {
399                         if (refclk == 100000)
400                                 limit = &intel_limits_ironlake_single_lvds_100m;
401                         else
402                                 limit = &intel_limits_ironlake_single_lvds;
403                 }
404         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
405                         HAS_eDP)
406                 limit = &intel_limits_ironlake_display_port;
407         else
408                 limit = &intel_limits_ironlake_dac;
409
410         return limit;
411 }
412
413 static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
414 {
415         struct drm_device *dev = crtc->dev;
416         struct drm_i915_private *dev_priv = dev->dev_private;
417         const intel_limit_t *limit;
418
419         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
420                 if (is_dual_link_lvds(dev_priv, LVDS))
421                         /* LVDS with dual channel */
422                         limit = &intel_limits_g4x_dual_channel_lvds;
423                 else
424                         /* LVDS with dual channel */
425                         limit = &intel_limits_g4x_single_channel_lvds;
426         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
427                    intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
428                 limit = &intel_limits_g4x_hdmi;
429         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
430                 limit = &intel_limits_g4x_sdvo;
431         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
432                 limit = &intel_limits_g4x_display_port;
433         } else /* The option is for other outputs */
434                 limit = &intel_limits_i9xx_sdvo;
435
436         return limit;
437 }
438
439 static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
440 {
441         struct drm_device *dev = crtc->dev;
442         const intel_limit_t *limit;
443
444         if (HAS_PCH_SPLIT(dev))
445                 limit = intel_ironlake_limit(crtc, refclk);
446         else if (IS_G4X(dev)) {
447                 limit = intel_g4x_limit(crtc);
448         } else if (IS_PINEVIEW(dev)) {
449                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
450                         limit = &intel_limits_pineview_lvds;
451                 else
452                         limit = &intel_limits_pineview_sdvo;
453         } else if (!IS_GEN2(dev)) {
454                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
455                         limit = &intel_limits_i9xx_lvds;
456                 else
457                         limit = &intel_limits_i9xx_sdvo;
458         } else {
459                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
460                         limit = &intel_limits_i8xx_lvds;
461                 else
462                         limit = &intel_limits_i8xx_dvo;
463         }
464         return limit;
465 }
466
467 /* m1 is reserved as 0 in Pineview, n is a ring counter */
468 static void pineview_clock(int refclk, intel_clock_t *clock)
469 {
470         clock->m = clock->m2 + 2;
471         clock->p = clock->p1 * clock->p2;
472         clock->vco = refclk * clock->m / clock->n;
473         clock->dot = clock->vco / clock->p;
474 }
475
476 static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
477 {
478         if (IS_PINEVIEW(dev)) {
479                 pineview_clock(refclk, clock);
480                 return;
481         }
482         clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
483         clock->p = clock->p1 * clock->p2;
484         clock->vco = refclk * clock->m / (clock->n + 2);
485         clock->dot = clock->vco / clock->p;
486 }
487
488 /**
489  * Returns whether any output on the specified pipe is of the specified type
490  */
491 bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
492 {
493         struct drm_device *dev = crtc->dev;
494         struct drm_mode_config *mode_config = &dev->mode_config;
495         struct intel_encoder *encoder;
496
497         list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
498                 if (encoder->base.crtc == crtc && encoder->type == type)
499                         return true;
500
501         return false;
502 }
503
504 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
505 /**
506  * Returns whether the given set of divisors are valid for a given refclk with
507  * the given connectors.
508  */
509
510 static bool intel_PLL_is_valid(struct drm_device *dev,
511                                const intel_limit_t *limit,
512                                const intel_clock_t *clock)
513 {
514         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
515                 INTELPllInvalid("p1 out of range\n");
516         if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
517                 INTELPllInvalid("p out of range\n");
518         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
519                 INTELPllInvalid("m2 out of range\n");
520         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
521                 INTELPllInvalid("m1 out of range\n");
522         if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
523                 INTELPllInvalid("m1 <= m2\n");
524         if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
525                 INTELPllInvalid("m out of range\n");
526         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
527                 INTELPllInvalid("n out of range\n");
528         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
529                 INTELPllInvalid("vco out of range\n");
530         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
531          * connector, etc., rather than just a single range.
532          */
533         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
534                 INTELPllInvalid("dot out of range\n");
535
536         return true;
537 }
538
539 static bool
540 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
541                     int target, int refclk, intel_clock_t *match_clock,
542                     intel_clock_t *best_clock)
543
544 {
545         struct drm_device *dev = crtc->dev;
546         struct drm_i915_private *dev_priv = dev->dev_private;
547         intel_clock_t clock;
548         int err = target;
549
550         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
551             (I915_READ(LVDS)) != 0) {
552                 /*
553                  * For LVDS, if the panel is on, just rely on its current
554                  * settings for dual-channel.  We haven't figured out how to
555                  * reliably set up different single/dual channel state, if we
556                  * even can.
557                  */
558                 if (is_dual_link_lvds(dev_priv, LVDS))
559                         clock.p2 = limit->p2.p2_fast;
560                 else
561                         clock.p2 = limit->p2.p2_slow;
562         } else {
563                 if (target < limit->p2.dot_limit)
564                         clock.p2 = limit->p2.p2_slow;
565                 else
566                         clock.p2 = limit->p2.p2_fast;
567         }
568
569         memset(best_clock, 0, sizeof(*best_clock));
570
571         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
572              clock.m1++) {
573                 for (clock.m2 = limit->m2.min;
574                      clock.m2 <= limit->m2.max; clock.m2++) {
575                         /* m1 is always 0 in Pineview */
576                         if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
577                                 break;
578                         for (clock.n = limit->n.min;
579                              clock.n <= limit->n.max; clock.n++) {
580                                 for (clock.p1 = limit->p1.min;
581                                         clock.p1 <= limit->p1.max; clock.p1++) {
582                                         int this_err;
583
584                                         intel_clock(dev, refclk, &clock);
585                                         if (!intel_PLL_is_valid(dev, limit,
586                                                                 &clock))
587                                                 continue;
588                                         if (match_clock &&
589                                             clock.p != match_clock->p)
590                                                 continue;
591
592                                         this_err = abs(clock.dot - target);
593                                         if (this_err < err) {
594                                                 *best_clock = clock;
595                                                 err = this_err;
596                                         }
597                                 }
598                         }
599                 }
600         }
601
602         return (err != target);
603 }
604
605 static bool
606 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
607                         int target, int refclk, intel_clock_t *match_clock,
608                         intel_clock_t *best_clock)
609 {
610         struct drm_device *dev = crtc->dev;
611         struct drm_i915_private *dev_priv = dev->dev_private;
612         intel_clock_t clock;
613         int max_n;
614         bool found;
615         /* approximately equals target * 0.00585 */
616         int err_most = (target >> 8) + (target >> 9);
617         found = false;
618
619         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
620                 int lvds_reg;
621
622                 if (HAS_PCH_SPLIT(dev))
623                         lvds_reg = PCH_LVDS;
624                 else
625                         lvds_reg = LVDS;
626                 if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
627                     LVDS_CLKB_POWER_UP)
628                         clock.p2 = limit->p2.p2_fast;
629                 else
630                         clock.p2 = limit->p2.p2_slow;
631         } else {
632                 if (target < limit->p2.dot_limit)
633                         clock.p2 = limit->p2.p2_slow;
634                 else
635                         clock.p2 = limit->p2.p2_fast;
636         }
637
638         memset(best_clock, 0, sizeof(*best_clock));
639         max_n = limit->n.max;
640         /* based on hardware requirement, prefer smaller n to precision */
641         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
642                 /* based on hardware requirement, prefere larger m1,m2 */
643                 for (clock.m1 = limit->m1.max;
644                      clock.m1 >= limit->m1.min; clock.m1--) {
645                         for (clock.m2 = limit->m2.max;
646                              clock.m2 >= limit->m2.min; clock.m2--) {
647                                 for (clock.p1 = limit->p1.max;
648                                      clock.p1 >= limit->p1.min; clock.p1--) {
649                                         int this_err;
650
651                                         intel_clock(dev, refclk, &clock);
652                                         if (!intel_PLL_is_valid(dev, limit,
653                                                                 &clock))
654                                                 continue;
655                                         if (match_clock &&
656                                             clock.p != match_clock->p)
657                                                 continue;
658
659                                         this_err = abs(clock.dot - target);
660                                         if (this_err < err_most) {
661                                                 *best_clock = clock;
662                                                 err_most = this_err;
663                                                 max_n = clock.n;
664                                                 found = true;
665                                         }
666                                 }
667                         }
668                 }
669         }
670         return found;
671 }
672
673 static bool
674 intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
675                            int target, int refclk, intel_clock_t *match_clock,
676                            intel_clock_t *best_clock)
677 {
678         struct drm_device *dev = crtc->dev;
679         intel_clock_t clock;
680
681         if (target < 200000) {
682                 clock.n = 1;
683                 clock.p1 = 2;
684                 clock.p2 = 10;
685                 clock.m1 = 12;
686                 clock.m2 = 9;
687         } else {
688                 clock.n = 2;
689                 clock.p1 = 1;
690                 clock.p2 = 10;
691                 clock.m1 = 14;
692                 clock.m2 = 8;
693         }
694         intel_clock(dev, refclk, &clock);
695         memcpy(best_clock, &clock, sizeof(intel_clock_t));
696         return true;
697 }
698
699 /* DisplayPort has only two frequencies, 162MHz and 270MHz */
700 static bool
701 intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
702                       int target, int refclk, intel_clock_t *match_clock,
703                       intel_clock_t *best_clock)
704 {
705         intel_clock_t clock;
706         if (target < 200000) {
707                 clock.p1 = 2;
708                 clock.p2 = 10;
709                 clock.n = 2;
710                 clock.m1 = 23;
711                 clock.m2 = 8;
712         } else {
713                 clock.p1 = 1;
714                 clock.p2 = 10;
715                 clock.n = 1;
716                 clock.m1 = 14;
717                 clock.m2 = 2;
718         }
719         clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
720         clock.p = (clock.p1 * clock.p2);
721         clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
722         clock.vco = 0;
723         memcpy(best_clock, &clock, sizeof(intel_clock_t));
724         return true;
725 }
726
727 /**
728  * intel_wait_for_vblank - wait for vblank on a given pipe
729  * @dev: drm device
730  * @pipe: pipe to wait for
731  *
732  * Wait for vblank to occur on a given pipe.  Needed for various bits of
733  * mode setting code.
734  */
735 void intel_wait_for_vblank(struct drm_device *dev, int pipe)
736 {
737         struct drm_i915_private *dev_priv = dev->dev_private;
738         int pipestat_reg = PIPESTAT(pipe);
739
740         /* Clear existing vblank status. Note this will clear any other
741          * sticky status fields as well.
742          *
743          * This races with i915_driver_irq_handler() with the result
744          * that either function could miss a vblank event.  Here it is not
745          * fatal, as we will either wait upon the next vblank interrupt or
746          * timeout.  Generally speaking intel_wait_for_vblank() is only
747          * called during modeset at which time the GPU should be idle and
748          * should *not* be performing page flips and thus not waiting on
749          * vblanks...
750          * Currently, the result of us stealing a vblank from the irq
751          * handler is that a single frame will be skipped during swapbuffers.
752          */
753         I915_WRITE(pipestat_reg,
754                    I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
755
756         /* Wait for vblank interrupt bit to set */
757         if (wait_for(I915_READ(pipestat_reg) &
758                      PIPE_VBLANK_INTERRUPT_STATUS,
759                      50))
760                 DRM_DEBUG_KMS("vblank wait timed out\n");
761 }
762
763 /*
764  * intel_wait_for_pipe_off - wait for pipe to turn off
765  * @dev: drm device
766  * @pipe: pipe to wait for
767  *
768  * After disabling a pipe, we can't wait for vblank in the usual way,
769  * spinning on the vblank interrupt status bit, since we won't actually
770  * see an interrupt when the pipe is disabled.
771  *
772  * On Gen4 and above:
773  *   wait for the pipe register state bit to turn off
774  *
775  * Otherwise:
776  *   wait for the display line value to settle (it usually
777  *   ends up stopping at the start of the next frame).
778  *
779  */
780 void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
781 {
782         struct drm_i915_private *dev_priv = dev->dev_private;
783
784         if (INTEL_INFO(dev)->gen >= 4) {
785                 int reg = PIPECONF(pipe);
786
787                 /* Wait for the Pipe State to go off */
788                 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
789                              100))
790                         DRM_DEBUG_KMS("pipe_off wait timed out\n");
791         } else {
792                 u32 last_line;
793                 int reg = PIPEDSL(pipe);
794                 unsigned long timeout = jiffies + msecs_to_jiffies(100);
795
796                 /* Wait for the display line to settle */
797                 do {
798                         last_line = I915_READ(reg) & DSL_LINEMASK;
799                         mdelay(5);
800                 } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
801                          time_after(timeout, jiffies));
802                 if (time_after(jiffies, timeout))
803                         DRM_DEBUG_KMS("pipe_off wait timed out\n");
804         }
805 }
806
807 static const char *state_string(bool enabled)
808 {
809         return enabled ? "on" : "off";
810 }
811
812 /* Only for pre-ILK configs */
813 static void assert_pll(struct drm_i915_private *dev_priv,
814                        enum pipe pipe, bool state)
815 {
816         int reg;
817         u32 val;
818         bool cur_state;
819
820         reg = DPLL(pipe);
821         val = I915_READ(reg);
822         cur_state = !!(val & DPLL_VCO_ENABLE);
823         WARN(cur_state != state,
824              "PLL state assertion failure (expected %s, current %s)\n",
825              state_string(state), state_string(cur_state));
826 }
827 #define assert_pll_enabled(d, p) assert_pll(d, p, true)
828 #define assert_pll_disabled(d, p) assert_pll(d, p, false)
829
830 /* For ILK+ */
831 static void assert_pch_pll(struct drm_i915_private *dev_priv,
832                            enum pipe pipe, bool state)
833 {
834         int reg;
835         u32 val;
836         bool cur_state;
837
838         if (HAS_PCH_CPT(dev_priv->dev)) {
839                 u32 pch_dpll;
840
841                 pch_dpll = I915_READ(PCH_DPLL_SEL);
842
843                 /* Make sure the selected PLL is enabled to the transcoder */
844                 WARN(!((pch_dpll >> (4 * pipe)) & 8),
845                      "transcoder %d PLL not enabled\n", pipe);
846
847                 /* Convert the transcoder pipe number to a pll pipe number */
848                 pipe = (pch_dpll >> (4 * pipe)) & 1;
849         }
850
851         reg = PCH_DPLL(pipe);
852         val = I915_READ(reg);
853         cur_state = !!(val & DPLL_VCO_ENABLE);
854         WARN(cur_state != state,
855              "PCH PLL state assertion failure (expected %s, current %s)\n",
856              state_string(state), state_string(cur_state));
857 }
858 #define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
859 #define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
860
861 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
862                           enum pipe pipe, bool state)
863 {
864         int reg;
865         u32 val;
866         bool cur_state;
867
868         reg = FDI_TX_CTL(pipe);
869         val = I915_READ(reg);
870         cur_state = !!(val & FDI_TX_ENABLE);
871         WARN(cur_state != state,
872              "FDI TX state assertion failure (expected %s, current %s)\n",
873              state_string(state), state_string(cur_state));
874 }
875 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
876 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
877
878 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
879                           enum pipe pipe, bool state)
880 {
881         int reg;
882         u32 val;
883         bool cur_state;
884
885         reg = FDI_RX_CTL(pipe);
886         val = I915_READ(reg);
887         cur_state = !!(val & FDI_RX_ENABLE);
888         WARN(cur_state != state,
889              "FDI RX state assertion failure (expected %s, current %s)\n",
890              state_string(state), state_string(cur_state));
891 }
892 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
893 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
894
895 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
896                                       enum pipe pipe)
897 {
898         int reg;
899         u32 val;
900
901         /* ILK FDI PLL is always enabled */
902         if (dev_priv->info->gen == 5)
903                 return;
904
905         reg = FDI_TX_CTL(pipe);
906         val = I915_READ(reg);
907         WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
908 }
909
910 static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
911                                       enum pipe pipe)
912 {
913         int reg;
914         u32 val;
915
916         reg = FDI_RX_CTL(pipe);
917         val = I915_READ(reg);
918         WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
919 }
920
921 static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
922                                   enum pipe pipe)
923 {
924         int pp_reg, lvds_reg;
925         u32 val;
926         enum pipe panel_pipe = PIPE_A;
927         bool locked = true;
928
929         if (HAS_PCH_SPLIT(dev_priv->dev)) {
930                 pp_reg = PCH_PP_CONTROL;
931                 lvds_reg = PCH_LVDS;
932         } else {
933                 pp_reg = PP_CONTROL;
934                 lvds_reg = LVDS;
935         }
936
937         val = I915_READ(pp_reg);
938         if (!(val & PANEL_POWER_ON) ||
939             ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
940                 locked = false;
941
942         if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
943                 panel_pipe = PIPE_B;
944
945         WARN(panel_pipe == pipe && locked,
946              "panel assertion failure, pipe %c regs locked\n",
947              pipe_name(pipe));
948 }
949
950 void assert_pipe(struct drm_i915_private *dev_priv,
951                  enum pipe pipe, bool state)
952 {
953         int reg;
954         u32 val;
955         bool cur_state;
956
957         /* if we need the pipe A quirk it must be always on */
958         if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
959                 state = true;
960
961         reg = PIPECONF(pipe);
962         val = I915_READ(reg);
963         cur_state = !!(val & PIPECONF_ENABLE);
964         WARN(cur_state != state,
965              "pipe %c assertion failure (expected %s, current %s)\n",
966              pipe_name(pipe), state_string(state), state_string(cur_state));
967 }
968
969 static void assert_plane(struct drm_i915_private *dev_priv,
970                          enum plane plane, bool state)
971 {
972         int reg;
973         u32 val;
974         bool cur_state;
975
976         reg = DSPCNTR(plane);
977         val = I915_READ(reg);
978         cur_state = !!(val & DISPLAY_PLANE_ENABLE);
979         WARN(cur_state != state,
980              "plane %c assertion failure (expected %s, current %s)\n",
981              plane_name(plane), state_string(state), state_string(cur_state));
982 }
983
984 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
985 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
986
987 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
988                                    enum pipe pipe)
989 {
990         int reg, i;
991         u32 val;
992         int cur_pipe;
993
994         /* Planes are fixed to pipes on ILK+ */
995         if (HAS_PCH_SPLIT(dev_priv->dev)) {
996                 reg = DSPCNTR(pipe);
997                 val = I915_READ(reg);
998                 WARN((val & DISPLAY_PLANE_ENABLE),
999                      "plane %c assertion failure, should be disabled but not\n",
1000                      plane_name(pipe));
1001                 return;
1002         }
1003
1004         /* Need to check both planes against the pipe */
1005         for (i = 0; i < 2; i++) {
1006                 reg = DSPCNTR(i);
1007                 val = I915_READ(reg);
1008                 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1009                         DISPPLANE_SEL_PIPE_SHIFT;
1010                 WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1011                      "plane %c assertion failure, should be off on pipe %c but is still active\n",
1012                      plane_name(i), pipe_name(pipe));
1013         }
1014 }
1015
1016 static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1017 {
1018         u32 val;
1019         bool enabled;
1020
1021         val = I915_READ(PCH_DREF_CONTROL);
1022         enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1023                             DREF_SUPERSPREAD_SOURCE_MASK));
1024         WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1025 }
1026
1027 static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
1028                                        enum pipe pipe)
1029 {
1030         int reg;
1031         u32 val;
1032         bool enabled;
1033
1034         reg = TRANSCONF(pipe);
1035         val = I915_READ(reg);
1036         enabled = !!(val & TRANS_ENABLE);
1037         WARN(enabled,
1038              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1039              pipe_name(pipe));
1040 }
1041
1042 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1043                             enum pipe pipe, u32 port_sel, u32 val)
1044 {
1045         if ((val & DP_PORT_EN) == 0)
1046                 return false;
1047
1048         if (HAS_PCH_CPT(dev_priv->dev)) {
1049                 u32     trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1050                 u32     trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1051                 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1052                         return false;
1053         } else {
1054                 if ((val & DP_PIPE_MASK) != (pipe << 30))
1055                         return false;
1056         }
1057         return true;
1058 }
1059
1060 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1061                               enum pipe pipe, u32 val)
1062 {
1063         if ((val & PORT_ENABLE) == 0)
1064                 return false;
1065
1066         if (HAS_PCH_CPT(dev_priv->dev)) {
1067                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1068                         return false;
1069         } else {
1070                 if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
1071                         return false;
1072         }
1073         return true;
1074 }
1075
1076 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1077                               enum pipe pipe, u32 val)
1078 {
1079         if ((val & LVDS_PORT_EN) == 0)
1080                 return false;
1081
1082         if (HAS_PCH_CPT(dev_priv->dev)) {
1083                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1084                         return false;
1085         } else {
1086                 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1087                         return false;
1088         }
1089         return true;
1090 }
1091
1092 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1093                               enum pipe pipe, u32 val)
1094 {
1095         if ((val & ADPA_DAC_ENABLE) == 0)
1096                 return false;
1097         if (HAS_PCH_CPT(dev_priv->dev)) {
1098                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1099                         return false;
1100         } else {
1101                 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1102                         return false;
1103         }
1104         return true;
1105 }
1106
1107 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1108                                    enum pipe pipe, int reg, u32 port_sel)
1109 {
1110         u32 val = I915_READ(reg);
1111         WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1112              "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1113              reg, pipe_name(pipe));
1114 }
1115
1116 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1117                                      enum pipe pipe, int reg)
1118 {
1119         u32 val = I915_READ(reg);
1120         WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1121              "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1122              reg, pipe_name(pipe));
1123 }
1124
1125 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1126                                       enum pipe pipe)
1127 {
1128         int reg;
1129         u32 val;
1130
1131         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1132         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1133         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1134
1135         reg = PCH_ADPA;
1136         val = I915_READ(reg);
1137         WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1138              "PCH VGA enabled on transcoder %c, should be disabled\n",
1139              pipe_name(pipe));
1140
1141         reg = PCH_LVDS;
1142         val = I915_READ(reg);
1143         WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1144              "PCH LVDS enabled on transcoder %c, should be disabled\n",
1145              pipe_name(pipe));
1146
1147         assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
1148         assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
1149         assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
1150 }
1151
1152 /**
1153  * intel_enable_pll - enable a PLL
1154  * @dev_priv: i915 private structure
1155  * @pipe: pipe PLL to enable
1156  *
1157  * Enable @pipe's PLL so we can start pumping pixels from a plane.  Check to
1158  * make sure the PLL reg is writable first though, since the panel write
1159  * protect mechanism may be enabled.
1160  *
1161  * Note!  This is for pre-ILK only.
1162  */
1163 static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1164 {
1165         int reg;
1166         u32 val;
1167
1168         /* No really, not for ILK+ */
1169         BUG_ON(dev_priv->info->gen >= 5);
1170
1171         /* PLL is protected by panel, make sure we can write it */
1172         if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1173                 assert_panel_unlocked(dev_priv, pipe);
1174
1175         reg = DPLL(pipe);
1176         val = I915_READ(reg);
1177         val |= DPLL_VCO_ENABLE;
1178
1179         /* We do this three times for luck */
1180         I915_WRITE(reg, val);
1181         POSTING_READ(reg);
1182         udelay(150); /* wait for warmup */
1183         I915_WRITE(reg, val);
1184         POSTING_READ(reg);
1185         udelay(150); /* wait for warmup */
1186         I915_WRITE(reg, val);
1187         POSTING_READ(reg);
1188         udelay(150); /* wait for warmup */
1189 }
1190
1191 /**
1192  * intel_disable_pll - disable a PLL
1193  * @dev_priv: i915 private structure
1194  * @pipe: pipe PLL to disable
1195  *
1196  * Disable the PLL for @pipe, making sure the pipe is off first.
1197  *
1198  * Note!  This is for pre-ILK only.
1199  */
1200 static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1201 {
1202         int reg;
1203         u32 val;
1204
1205         /* Don't disable pipe A or pipe A PLLs if needed */
1206         if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1207                 return;
1208
1209         /* Make sure the pipe isn't still relying on us */
1210         assert_pipe_disabled(dev_priv, pipe);
1211
1212         reg = DPLL(pipe);
1213         val = I915_READ(reg);
1214         val &= ~DPLL_VCO_ENABLE;
1215         I915_WRITE(reg, val);
1216         POSTING_READ(reg);
1217 }
1218
1219 /**
1220  * intel_enable_pch_pll - enable PCH PLL
1221  * @dev_priv: i915 private structure
1222  * @pipe: pipe PLL to enable
1223  *
1224  * The PCH PLL needs to be enabled before the PCH transcoder, since it
1225  * drives the transcoder clock.
1226  */
1227 static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
1228                                  enum pipe pipe)
1229 {
1230         int reg;
1231         u32 val;
1232
1233         if (pipe > 1)
1234                 return;
1235
1236         /* PCH only available on ILK+ */
1237         BUG_ON(dev_priv->info->gen < 5);
1238
1239         /* PCH refclock must be enabled first */
1240         assert_pch_refclk_enabled(dev_priv);
1241
1242         reg = PCH_DPLL(pipe);
1243         val = I915_READ(reg);
1244         val |= DPLL_VCO_ENABLE;
1245         I915_WRITE(reg, val);
1246         POSTING_READ(reg);
1247         udelay(200);
1248 }
1249
1250 static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
1251                                   enum pipe pipe)
1252 {
1253         int reg;
1254         u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL,
1255                 pll_sel = TRANSC_DPLL_ENABLE;
1256
1257         if (pipe > 1)
1258                 return;
1259
1260         /* PCH only available on ILK+ */
1261         BUG_ON(dev_priv->info->gen < 5);
1262
1263         /* Make sure transcoder isn't still depending on us */
1264         assert_transcoder_disabled(dev_priv, pipe);
1265
1266         if (pipe == 0)
1267                 pll_sel |= TRANSC_DPLLA_SEL;
1268         else if (pipe == 1)
1269                 pll_sel |= TRANSC_DPLLB_SEL;
1270
1271
1272         if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel)
1273                 return;
1274
1275         reg = PCH_DPLL(pipe);
1276         val = I915_READ(reg);
1277         val &= ~DPLL_VCO_ENABLE;
1278         I915_WRITE(reg, val);
1279         POSTING_READ(reg);
1280         udelay(200);
1281 }
1282
1283 static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1284                                     enum pipe pipe)
1285 {
1286         int reg;
1287         u32 val, pipeconf_val;
1288         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1289
1290         /* PCH only available on ILK+ */
1291         BUG_ON(dev_priv->info->gen < 5);
1292
1293         /* Make sure PCH DPLL is enabled */
1294         assert_pch_pll_enabled(dev_priv, pipe);
1295
1296         /* FDI must be feeding us bits for PCH ports */
1297         assert_fdi_tx_enabled(dev_priv, pipe);
1298         assert_fdi_rx_enabled(dev_priv, pipe);
1299
1300         reg = TRANSCONF(pipe);
1301         val = I915_READ(reg);
1302         pipeconf_val = I915_READ(PIPECONF(pipe));
1303
1304         if (HAS_PCH_IBX(dev_priv->dev)) {
1305                 /*
1306                  * make the BPC in transcoder be consistent with
1307                  * that in pipeconf reg.
1308                  */
1309                 val &= ~PIPE_BPC_MASK;
1310                 val |= pipeconf_val & PIPE_BPC_MASK;
1311         }
1312
1313         val &= ~TRANS_INTERLACE_MASK;
1314         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1315                 if (HAS_PCH_IBX(dev_priv->dev) &&
1316                     intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1317                         val |= TRANS_LEGACY_INTERLACED_ILK;
1318                 else
1319                         val |= TRANS_INTERLACED;
1320         else
1321                 val |= TRANS_PROGRESSIVE;
1322
1323         I915_WRITE(reg, val | TRANS_ENABLE);
1324         if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1325                 DRM_ERROR("failed to enable transcoder %d\n", pipe);
1326 }
1327
1328 static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
1329                                      enum pipe pipe)
1330 {
1331         int reg;
1332         u32 val;
1333
1334         /* FDI relies on the transcoder */
1335         assert_fdi_tx_disabled(dev_priv, pipe);
1336         assert_fdi_rx_disabled(dev_priv, pipe);
1337
1338         /* Ports must be off as well */
1339         assert_pch_ports_disabled(dev_priv, pipe);
1340
1341         reg = TRANSCONF(pipe);
1342         val = I915_READ(reg);
1343         val &= ~TRANS_ENABLE;
1344         I915_WRITE(reg, val);
1345         /* wait for PCH transcoder off, transcoder state */
1346         if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1347                 DRM_ERROR("failed to disable transcoder %d\n", pipe);
1348 }
1349
1350 /**
1351  * intel_enable_pipe - enable a pipe, asserting requirements
1352  * @dev_priv: i915 private structure
1353  * @pipe: pipe to enable
1354  * @pch_port: on ILK+, is this pipe driving a PCH port or not
1355  *
1356  * Enable @pipe, making sure that various hardware specific requirements
1357  * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1358  *
1359  * @pipe should be %PIPE_A or %PIPE_B.
1360  *
1361  * Will wait until the pipe is actually running (i.e. first vblank) before
1362  * returning.
1363  */
1364 static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1365                               bool pch_port)
1366 {
1367         int reg;
1368         u32 val;
1369
1370         /*
1371          * A pipe without a PLL won't actually be able to drive bits from
1372          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1373          * need the check.
1374          */
1375         if (!HAS_PCH_SPLIT(dev_priv->dev))
1376                 assert_pll_enabled(dev_priv, pipe);
1377         else {
1378                 if (pch_port) {
1379                         /* if driving the PCH, we need FDI enabled */
1380                         assert_fdi_rx_pll_enabled(dev_priv, pipe);
1381                         assert_fdi_tx_pll_enabled(dev_priv, pipe);
1382                 }
1383                 /* FIXME: assert CPU port conditions for SNB+ */
1384         }
1385
1386         reg = PIPECONF(pipe);
1387         val = I915_READ(reg);
1388         if (val & PIPECONF_ENABLE)
1389                 return;
1390
1391         I915_WRITE(reg, val | PIPECONF_ENABLE);
1392         intel_wait_for_vblank(dev_priv->dev, pipe);
1393 }
1394
1395 /**
1396  * intel_disable_pipe - disable a pipe, asserting requirements
1397  * @dev_priv: i915 private structure
1398  * @pipe: pipe to disable
1399  *
1400  * Disable @pipe, making sure that various hardware specific requirements
1401  * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1402  *
1403  * @pipe should be %PIPE_A or %PIPE_B.
1404  *
1405  * Will wait until the pipe has shut down before returning.
1406  */
1407 static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1408                                enum pipe pipe)
1409 {
1410         int reg;
1411         u32 val;
1412
1413         /*
1414          * Make sure planes won't keep trying to pump pixels to us,
1415          * or we might hang the display.
1416          */
1417         assert_planes_disabled(dev_priv, pipe);
1418
1419         /* Don't disable pipe A or pipe A PLLs if needed */
1420         if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1421                 return;
1422
1423         reg = PIPECONF(pipe);
1424         val = I915_READ(reg);
1425         if ((val & PIPECONF_ENABLE) == 0)
1426                 return;
1427
1428         I915_WRITE(reg, val & ~PIPECONF_ENABLE);
1429         intel_wait_for_pipe_off(dev_priv->dev, pipe);
1430 }
1431
1432 /*
1433  * Plane regs are double buffered, going from enabled->disabled needs a
1434  * trigger in order to latch.  The display address reg provides this.
1435  */
1436 static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1437                                       enum plane plane)
1438 {
1439         I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1440         I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1441 }
1442
1443 /**
1444  * intel_enable_plane - enable a display plane on a given pipe
1445  * @dev_priv: i915 private structure
1446  * @plane: plane to enable
1447  * @pipe: pipe being fed
1448  *
1449  * Enable @plane on @pipe, making sure that @pipe is running first.
1450  */
1451 static void intel_enable_plane(struct drm_i915_private *dev_priv,
1452                                enum plane plane, enum pipe pipe)
1453 {
1454         int reg;
1455         u32 val;
1456
1457         /* If the pipe isn't enabled, we can't pump pixels and may hang */
1458         assert_pipe_enabled(dev_priv, pipe);
1459
1460         reg = DSPCNTR(plane);
1461         val = I915_READ(reg);
1462         if (val & DISPLAY_PLANE_ENABLE)
1463                 return;
1464
1465         I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
1466         intel_flush_display_plane(dev_priv, plane);
1467         intel_wait_for_vblank(dev_priv->dev, pipe);
1468 }
1469
1470 /**
1471  * intel_disable_plane - disable a display plane
1472  * @dev_priv: i915 private structure
1473  * @plane: plane to disable
1474  * @pipe: pipe consuming the data
1475  *
1476  * Disable @plane; should be an independent operation.
1477  */
1478 static void intel_disable_plane(struct drm_i915_private *dev_priv,
1479                                 enum plane plane, enum pipe pipe)
1480 {
1481         int reg;
1482         u32 val;
1483
1484         reg = DSPCNTR(plane);
1485         val = I915_READ(reg);
1486         if ((val & DISPLAY_PLANE_ENABLE) == 0)
1487                 return;
1488
1489         I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
1490         intel_flush_display_plane(dev_priv, plane);
1491         intel_wait_for_vblank(dev_priv->dev, pipe);
1492 }
1493
1494 static void disable_pch_dp(struct drm_i915_private *dev_priv,
1495                            enum pipe pipe, int reg, u32 port_sel)
1496 {
1497         u32 val = I915_READ(reg);
1498         if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
1499                 DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
1500                 I915_WRITE(reg, val & ~DP_PORT_EN);
1501         }
1502 }
1503
1504 static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
1505                              enum pipe pipe, int reg)
1506 {
1507         u32 val = I915_READ(reg);
1508         if (hdmi_pipe_enabled(dev_priv, pipe, val)) {
1509                 DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
1510                               reg, pipe);
1511                 I915_WRITE(reg, val & ~PORT_ENABLE);
1512         }
1513 }
1514
1515 /* Disable any ports connected to this transcoder */
1516 static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1517                                     enum pipe pipe)
1518 {
1519         u32 reg, val;
1520
1521         val = I915_READ(PCH_PP_CONTROL);
1522         I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
1523
1524         disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1525         disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1526         disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1527
1528         reg = PCH_ADPA;
1529         val = I915_READ(reg);
1530         if (adpa_pipe_enabled(dev_priv, pipe, val))
1531                 I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
1532
1533         reg = PCH_LVDS;
1534         val = I915_READ(reg);
1535         if (lvds_pipe_enabled(dev_priv, pipe, val)) {
1536                 DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
1537                 I915_WRITE(reg, val & ~LVDS_PORT_EN);
1538                 POSTING_READ(reg);
1539                 udelay(100);
1540         }
1541
1542         disable_pch_hdmi(dev_priv, pipe, HDMIB);
1543         disable_pch_hdmi(dev_priv, pipe, HDMIC);
1544         disable_pch_hdmi(dev_priv, pipe, HDMID);
1545 }
1546
1547 static void i8xx_disable_fbc(struct drm_device *dev)
1548 {
1549         struct drm_i915_private *dev_priv = dev->dev_private;
1550         u32 fbc_ctl;
1551
1552         /* Disable compression */
1553         fbc_ctl = I915_READ(FBC_CONTROL);
1554         if ((fbc_ctl & FBC_CTL_EN) == 0)
1555                 return;
1556
1557         fbc_ctl &= ~FBC_CTL_EN;
1558         I915_WRITE(FBC_CONTROL, fbc_ctl);
1559
1560         /* Wait for compressing bit to clear */
1561         if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
1562                 DRM_DEBUG_KMS("FBC idle timed out\n");
1563                 return;
1564         }
1565
1566         DRM_DEBUG_KMS("disabled FBC\n");
1567 }
1568
1569 static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1570 {
1571         struct drm_device *dev = crtc->dev;
1572         struct drm_i915_private *dev_priv = dev->dev_private;
1573         struct drm_framebuffer *fb = crtc->fb;
1574         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1575         struct drm_i915_gem_object *obj = intel_fb->obj;
1576         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1577         int cfb_pitch;
1578         int plane, i;
1579         u32 fbc_ctl, fbc_ctl2;
1580
1581         cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
1582         if (fb->pitches[0] < cfb_pitch)
1583                 cfb_pitch = fb->pitches[0];
1584
1585         /* FBC_CTL wants 64B units */
1586         cfb_pitch = (cfb_pitch / 64) - 1;
1587         plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
1588
1589         /* Clear old tags */
1590         for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
1591                 I915_WRITE(FBC_TAG + (i * 4), 0);
1592
1593         /* Set it up... */
1594         fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
1595         fbc_ctl2 |= plane;
1596         I915_WRITE(FBC_CONTROL2, fbc_ctl2);
1597         I915_WRITE(FBC_FENCE_OFF, crtc->y);
1598
1599         /* enable it... */
1600         fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1601         if (IS_I945GM(dev))
1602                 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
1603         fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
1604         fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
1605         fbc_ctl |= obj->fence_reg;
1606         I915_WRITE(FBC_CONTROL, fbc_ctl);
1607
1608         DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
1609                       cfb_pitch, crtc->y, intel_crtc->plane);
1610 }
1611
1612 static bool i8xx_fbc_enabled(struct drm_device *dev)
1613 {
1614         struct drm_i915_private *dev_priv = dev->dev_private;
1615
1616         return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
1617 }
1618
1619 static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1620 {
1621         struct drm_device *dev = crtc->dev;
1622         struct drm_i915_private *dev_priv = dev->dev_private;
1623         struct drm_framebuffer *fb = crtc->fb;
1624         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1625         struct drm_i915_gem_object *obj = intel_fb->obj;
1626         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1627         int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1628         unsigned long stall_watermark = 200;
1629         u32 dpfc_ctl;
1630
1631         dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
1632         dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
1633         I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
1634
1635         I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1636                    (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1637                    (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1638         I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
1639
1640         /* enable it... */
1641         I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
1642
1643         DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1644 }
1645
1646 static void g4x_disable_fbc(struct drm_device *dev)
1647 {
1648         struct drm_i915_private *dev_priv = dev->dev_private;
1649         u32 dpfc_ctl;
1650
1651         /* Disable compression */
1652         dpfc_ctl = I915_READ(DPFC_CONTROL);
1653         if (dpfc_ctl & DPFC_CTL_EN) {
1654                 dpfc_ctl &= ~DPFC_CTL_EN;
1655                 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1656
1657                 DRM_DEBUG_KMS("disabled FBC\n");
1658         }
1659 }
1660
1661 static bool g4x_fbc_enabled(struct drm_device *dev)
1662 {
1663         struct drm_i915_private *dev_priv = dev->dev_private;
1664
1665         return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1666 }
1667
1668 static void sandybridge_blit_fbc_update(struct drm_device *dev)
1669 {
1670         struct drm_i915_private *dev_priv = dev->dev_private;
1671         u32 blt_ecoskpd;
1672
1673         /* Make sure blitter notifies FBC of writes */
1674         gen6_gt_force_wake_get(dev_priv);
1675         blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
1676         blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
1677                 GEN6_BLITTER_LOCK_SHIFT;
1678         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1679         blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
1680         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1681         blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
1682                          GEN6_BLITTER_LOCK_SHIFT);
1683         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1684         POSTING_READ(GEN6_BLITTER_ECOSKPD);
1685         gen6_gt_force_wake_put(dev_priv);
1686 }
1687
1688 static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1689 {
1690         struct drm_device *dev = crtc->dev;
1691         struct drm_i915_private *dev_priv = dev->dev_private;
1692         struct drm_framebuffer *fb = crtc->fb;
1693         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1694         struct drm_i915_gem_object *obj = intel_fb->obj;
1695         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1696         int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1697         unsigned long stall_watermark = 200;
1698         u32 dpfc_ctl;
1699
1700         dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1701         dpfc_ctl &= DPFC_RESERVED;
1702         dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
1703         /* Set persistent mode for front-buffer rendering, ala X. */
1704         dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
1705         dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
1706         I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
1707
1708         I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1709                    (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1710                    (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1711         I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
1712         I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
1713         /* enable it... */
1714         I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
1715
1716         if (IS_GEN6(dev)) {
1717                 I915_WRITE(SNB_DPFC_CTL_SA,
1718                            SNB_CPU_FENCE_ENABLE | obj->fence_reg);
1719                 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
1720                 sandybridge_blit_fbc_update(dev);
1721         }
1722
1723         DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1724 }
1725
1726 static void ironlake_disable_fbc(struct drm_device *dev)
1727 {
1728         struct drm_i915_private *dev_priv = dev->dev_private;
1729         u32 dpfc_ctl;
1730
1731         /* Disable compression */
1732         dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1733         if (dpfc_ctl & DPFC_CTL_EN) {
1734                 dpfc_ctl &= ~DPFC_CTL_EN;
1735                 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
1736
1737                 DRM_DEBUG_KMS("disabled FBC\n");
1738         }
1739 }
1740
1741 static bool ironlake_fbc_enabled(struct drm_device *dev)
1742 {
1743         struct drm_i915_private *dev_priv = dev->dev_private;
1744
1745         return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
1746 }
1747
1748 bool intel_fbc_enabled(struct drm_device *dev)
1749 {
1750         struct drm_i915_private *dev_priv = dev->dev_private;
1751
1752         if (!dev_priv->display.fbc_enabled)
1753                 return false;
1754
1755         return dev_priv->display.fbc_enabled(dev);
1756 }
1757
1758 static void intel_fbc_work_fn(struct work_struct *__work)
1759 {
1760         struct intel_fbc_work *work =
1761                 container_of(to_delayed_work(__work),
1762                              struct intel_fbc_work, work);
1763         struct drm_device *dev = work->crtc->dev;
1764         struct drm_i915_private *dev_priv = dev->dev_private;
1765
1766         mutex_lock(&dev->struct_mutex);
1767         if (work == dev_priv->fbc_work) {
1768                 /* Double check that we haven't switched fb without cancelling
1769                  * the prior work.
1770                  */
1771                 if (work->crtc->fb == work->fb) {
1772                         dev_priv->display.enable_fbc(work->crtc,
1773                                                      work->interval);
1774
1775                         dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
1776                         dev_priv->cfb_fb = work->crtc->fb->base.id;
1777                         dev_priv->cfb_y = work->crtc->y;
1778                 }
1779
1780                 dev_priv->fbc_work = NULL;
1781         }
1782         mutex_unlock(&dev->struct_mutex);
1783
1784         kfree(work);
1785 }
1786
1787 static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
1788 {
1789         if (dev_priv->fbc_work == NULL)
1790                 return;
1791
1792         DRM_DEBUG_KMS("cancelling pending FBC enable\n");
1793
1794         /* Synchronisation is provided by struct_mutex and checking of
1795          * dev_priv->fbc_work, so we can perform the cancellation
1796          * entirely asynchronously.
1797          */
1798         if (cancel_delayed_work(&dev_priv->fbc_work->work))
1799                 /* tasklet was killed before being run, clean up */
1800                 kfree(dev_priv->fbc_work);
1801
1802         /* Mark the work as no longer wanted so that if it does
1803          * wake-up (because the work was already running and waiting
1804          * for our mutex), it will discover that is no longer
1805          * necessary to run.
1806          */
1807         dev_priv->fbc_work = NULL;
1808 }
1809
1810 static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1811 {
1812         struct intel_fbc_work *work;
1813         struct drm_device *dev = crtc->dev;
1814         struct drm_i915_private *dev_priv = dev->dev_private;
1815
1816         if (!dev_priv->display.enable_fbc)
1817                 return;
1818
1819         intel_cancel_fbc_work(dev_priv);
1820
1821         work = kzalloc(sizeof *work, GFP_KERNEL);
1822         if (work == NULL) {
1823                 dev_priv->display.enable_fbc(crtc, interval);
1824                 return;
1825         }
1826
1827         work->crtc = crtc;
1828         work->fb = crtc->fb;
1829         work->interval = interval;
1830         INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
1831
1832         dev_priv->fbc_work = work;
1833
1834         DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
1835
1836         /* Delay the actual enabling to let pageflipping cease and the
1837          * display to settle before starting the compression. Note that
1838          * this delay also serves a second purpose: it allows for a
1839          * vblank to pass after disabling the FBC before we attempt
1840          * to modify the control registers.
1841          *
1842          * A more complicated solution would involve tracking vblanks
1843          * following the termination of the page-flipping sequence
1844          * and indeed performing the enable as a co-routine and not
1845          * waiting synchronously upon the vblank.
1846          */
1847         schedule_delayed_work(&work->work, msecs_to_jiffies(50));
1848 }
1849
1850 void intel_disable_fbc(struct drm_device *dev)
1851 {
1852         struct drm_i915_private *dev_priv = dev->dev_private;
1853
1854         intel_cancel_fbc_work(dev_priv);
1855
1856         if (!dev_priv->display.disable_fbc)
1857                 return;
1858
1859         dev_priv->display.disable_fbc(dev);
1860         dev_priv->cfb_plane = -1;
1861 }
1862
1863 /**
1864  * intel_update_fbc - enable/disable FBC as needed
1865  * @dev: the drm_device
1866  *
1867  * Set up the framebuffer compression hardware at mode set time.  We
1868  * enable it if possible:
1869  *   - plane A only (on pre-965)
1870  *   - no pixel mulitply/line duplication
1871  *   - no alpha buffer discard
1872  *   - no dual wide
1873  *   - framebuffer <= 2048 in width, 1536 in height
1874  *
1875  * We can't assume that any compression will take place (worst case),
1876  * so the compressed buffer has to be the same size as the uncompressed
1877  * one.  It also must reside (along with the line length buffer) in
1878  * stolen memory.
1879  *
1880  * We need to enable/disable FBC on a global basis.
1881  */
1882 static void intel_update_fbc(struct drm_device *dev)
1883 {
1884         struct drm_i915_private *dev_priv = dev->dev_private;
1885         struct drm_crtc *crtc = NULL, *tmp_crtc;
1886         struct intel_crtc *intel_crtc;
1887         struct drm_framebuffer *fb;
1888         struct intel_framebuffer *intel_fb;
1889         struct drm_i915_gem_object *obj;
1890         int enable_fbc;
1891
1892         DRM_DEBUG_KMS("\n");
1893
1894         if (!i915_powersave)
1895                 return;
1896
1897         if (!I915_HAS_FBC(dev))
1898                 return;
1899
1900         /*
1901          * If FBC is already on, we just have to verify that we can
1902          * keep it that way...
1903          * Need to disable if:
1904          *   - more than one pipe is active
1905          *   - changing FBC params (stride, fence, mode)
1906          *   - new fb is too large to fit in compressed buffer
1907          *   - going to an unsupported config (interlace, pixel multiply, etc.)
1908          */
1909         list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
1910                 if (tmp_crtc->enabled && tmp_crtc->fb) {
1911                         if (crtc) {
1912                                 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
1913                                 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
1914                                 goto out_disable;
1915                         }
1916                         crtc = tmp_crtc;
1917                 }
1918         }
1919
1920         if (!crtc || crtc->fb == NULL) {
1921                 DRM_DEBUG_KMS("no output, disabling\n");
1922                 dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
1923                 goto out_disable;
1924         }
1925
1926         intel_crtc = to_intel_crtc(crtc);
1927         fb = crtc->fb;
1928         intel_fb = to_intel_framebuffer(fb);
1929         obj = intel_fb->obj;
1930
1931         enable_fbc = i915_enable_fbc;
1932         if (enable_fbc < 0) {
1933                 DRM_DEBUG_KMS("fbc set to per-chip default\n");
1934                 enable_fbc = 1;
1935                 if (INTEL_INFO(dev)->gen <= 6)
1936                         enable_fbc = 0;
1937         }
1938         if (!enable_fbc) {
1939                 DRM_DEBUG_KMS("fbc disabled per module param\n");
1940                 dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
1941                 goto out_disable;
1942         }
1943         if (intel_fb->obj->base.size > dev_priv->cfb_size) {
1944                 DRM_DEBUG_KMS("framebuffer too large, disabling "
1945                               "compression\n");
1946                 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1947                 goto out_disable;
1948         }
1949         if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
1950             (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
1951                 DRM_DEBUG_KMS("mode incompatible with compression, "
1952                               "disabling\n");
1953                 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
1954                 goto out_disable;
1955         }
1956         if ((crtc->mode.hdisplay > 2048) ||
1957             (crtc->mode.vdisplay > 1536)) {
1958                 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
1959                 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
1960                 goto out_disable;
1961         }
1962         if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
1963                 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
1964                 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
1965                 goto out_disable;
1966         }
1967
1968         /* The use of a CPU fence is mandatory in order to detect writes
1969          * by the CPU to the scanout and trigger updates to the FBC.
1970          */
1971         if (obj->tiling_mode != I915_TILING_X ||
1972             obj->fence_reg == I915_FENCE_REG_NONE) {
1973                 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
1974                 dev_priv->no_fbc_reason = FBC_NOT_TILED;
1975                 goto out_disable;
1976         }
1977
1978         /* If the kernel debugger is active, always disable compression */
1979         if (in_dbg_master())
1980                 goto out_disable;
1981
1982         /* If the scanout has not changed, don't modify the FBC settings.
1983          * Note that we make the fundamental assumption that the fb->obj
1984          * cannot be unpinned (and have its GTT offset and fence revoked)
1985          * without first being decoupled from the scanout and FBC disabled.
1986          */
1987         if (dev_priv->cfb_plane == intel_crtc->plane &&
1988             dev_priv->cfb_fb == fb->base.id &&
1989             dev_priv->cfb_y == crtc->y)
1990                 return;
1991
1992         if (intel_fbc_enabled(dev)) {
1993                 /* We update FBC along two paths, after changing fb/crtc
1994                  * configuration (modeswitching) and after page-flipping
1995                  * finishes. For the latter, we know that not only did
1996                  * we disable the FBC at the start of the page-flip
1997                  * sequence, but also more than one vblank has passed.
1998                  *
1999                  * For the former case of modeswitching, it is possible
2000                  * to switch between two FBC valid configurations
2001                  * instantaneously so we do need to disable the FBC
2002                  * before we can modify its control registers. We also
2003                  * have to wait for the next vblank for that to take
2004                  * effect. However, since we delay enabling FBC we can
2005                  * assume that a vblank has passed since disabling and
2006                  * that we can safely alter the registers in the deferred
2007                  * callback.
2008                  *
2009                  * In the scenario that we go from a valid to invalid
2010                  * and then back to valid FBC configuration we have
2011                  * no strict enforcement that a vblank occurred since
2012                  * disabling the FBC. However, along all current pipe
2013                  * disabling paths we do need to wait for a vblank at
2014                  * some point. And we wait before enabling FBC anyway.
2015                  */
2016                 DRM_DEBUG_KMS("disabling active FBC for update\n");
2017                 intel_disable_fbc(dev);
2018         }
2019
2020         intel_enable_fbc(crtc, 500);
2021         return;
2022
2023 out_disable:
2024         /* Multiple disables should be harmless */
2025         if (intel_fbc_enabled(dev)) {
2026                 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
2027                 intel_disable_fbc(dev);
2028         }
2029 }
2030
2031 int
2032 intel_pin_and_fence_fb_obj(struct drm_device *dev,
2033                            struct drm_i915_gem_object *obj,
2034                            struct intel_ring_buffer *pipelined)
2035 {
2036         struct drm_i915_private *dev_priv = dev->dev_private;
2037         u32 alignment;
2038         int ret;
2039
2040         switch (obj->tiling_mode) {
2041         case I915_TILING_NONE:
2042                 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
2043                         alignment = 128 * 1024;
2044                 else if (INTEL_INFO(dev)->gen >= 4)
2045                         alignment = 4 * 1024;
2046                 else
2047                         alignment = 64 * 1024;
2048                 break;
2049         case I915_TILING_X:
2050                 /* pin() will align the object as required by fence */
2051                 alignment = 0;
2052                 break;
2053         case I915_TILING_Y:
2054                 /* FIXME: Is this true? */
2055                 DRM_ERROR("Y tiled not allowed for scan out buffers\n");
2056                 return -EINVAL;
2057         default:
2058                 BUG();
2059         }
2060
2061         dev_priv->mm.interruptible = false;
2062         ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
2063         if (ret)
2064                 goto err_interruptible;
2065
2066         /* Install a fence for tiled scan-out. Pre-i965 always needs a
2067          * fence, whereas 965+ only requires a fence if using
2068          * framebuffer compression.  For simplicity, we always install
2069          * a fence as the cost is not that onerous.
2070          */
2071         if (obj->tiling_mode != I915_TILING_NONE) {
2072                 ret = i915_gem_object_get_fence(obj, pipelined);
2073                 if (ret)
2074                         goto err_unpin;
2075
2076                 i915_gem_object_pin_fence(obj);
2077         }
2078
2079         dev_priv->mm.interruptible = true;
2080         return 0;
2081
2082 err_unpin:
2083         i915_gem_object_unpin(obj);
2084 err_interruptible:
2085         dev_priv->mm.interruptible = true;
2086         return ret;
2087 }
2088
2089 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
2090 {
2091         i915_gem_object_unpin_fence(obj);
2092         i915_gem_object_unpin(obj);
2093 }
2094
2095 static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2096                              int x, int y)
2097 {
2098         struct drm_device *dev = crtc->dev;
2099         struct drm_i915_private *dev_priv = dev->dev_private;
2100         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2101         struct intel_framebuffer *intel_fb;
2102         struct drm_i915_gem_object *obj;
2103         int plane = intel_crtc->plane;
2104         unsigned long Start, Offset;
2105         u32 dspcntr;
2106         u32 reg;
2107
2108         switch (plane) {
2109         case 0:
2110         case 1:
2111                 break;
2112         default:
2113                 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2114                 return -EINVAL;
2115         }
2116
2117         intel_fb = to_intel_framebuffer(fb);
2118         obj = intel_fb->obj;
2119
2120         reg = DSPCNTR(plane);
2121         dspcntr = I915_READ(reg);
2122         /* Mask out pixel format bits in case we change it */
2123         dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2124         switch (fb->bits_per_pixel) {
2125         case 8:
2126                 dspcntr |= DISPPLANE_8BPP;
2127                 break;
2128         case 16:
2129                 if (fb->depth == 15)
2130                         dspcntr |= DISPPLANE_15_16BPP;
2131                 else
2132                         dspcntr |= DISPPLANE_16BPP;
2133                 break;
2134         case 24:
2135         case 32:
2136                 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2137                 break;
2138         default:
2139                 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2140                 return -EINVAL;
2141         }
2142         if (INTEL_INFO(dev)->gen >= 4) {
2143                 if (obj->tiling_mode != I915_TILING_NONE)
2144                         dspcntr |= DISPPLANE_TILED;
2145                 else
2146                         dspcntr &= ~DISPPLANE_TILED;
2147         }
2148
2149         I915_WRITE(reg, dspcntr);
2150
2151         Start = obj->gtt_offset;
2152         Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2153
2154         DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2155                       Start, Offset, x, y, fb->pitches[0]);
2156         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2157         if (INTEL_INFO(dev)->gen >= 4) {
2158                 I915_WRITE(DSPSURF(plane), Start);
2159                 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2160                 I915_WRITE(DSPADDR(plane), Offset);
2161         } else
2162                 I915_WRITE(DSPADDR(plane), Start + Offset);
2163         POSTING_READ(reg);
2164
2165         return 0;
2166 }
2167
2168 static int ironlake_update_plane(struct drm_crtc *crtc,
2169                                  struct drm_framebuffer *fb, int x, int y)
2170 {
2171         struct drm_device *dev = crtc->dev;
2172         struct drm_i915_private *dev_priv = dev->dev_private;
2173         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2174         struct intel_framebuffer *intel_fb;
2175         struct drm_i915_gem_object *obj;
2176         int plane = intel_crtc->plane;
2177         unsigned long Start, Offset;
2178         u32 dspcntr;
2179         u32 reg;
2180
2181         switch (plane) {
2182         case 0:
2183         case 1:
2184         case 2:
2185                 break;
2186         default:
2187                 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2188                 return -EINVAL;
2189         }
2190
2191         intel_fb = to_intel_framebuffer(fb);
2192         obj = intel_fb->obj;
2193
2194         reg = DSPCNTR(plane);
2195         dspcntr = I915_READ(reg);
2196         /* Mask out pixel format bits in case we change it */
2197         dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2198         switch (fb->bits_per_pixel) {
2199         case 8:
2200                 dspcntr |= DISPPLANE_8BPP;
2201                 break;
2202         case 16:
2203                 if (fb->depth != 16)
2204                         return -EINVAL;
2205
2206                 dspcntr |= DISPPLANE_16BPP;
2207                 break;
2208         case 24:
2209         case 32:
2210                 if (fb->depth == 24)
2211                         dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2212                 else if (fb->depth == 30)
2213                         dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
2214                 else
2215                         return -EINVAL;
2216                 break;
2217         default:
2218                 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2219                 return -EINVAL;
2220         }
2221
2222         if (obj->tiling_mode != I915_TILING_NONE)
2223                 dspcntr |= DISPPLANE_TILED;
2224         else
2225                 dspcntr &= ~DISPPLANE_TILED;
2226
2227         /* must disable */
2228         dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2229
2230         I915_WRITE(reg, dspcntr);
2231
2232         Start = obj->gtt_offset;
2233         Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2234
2235         DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2236                       Start, Offset, x, y, fb->pitches[0]);
2237         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2238         I915_WRITE(DSPSURF(plane), Start);
2239         I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2240         I915_WRITE(DSPADDR(plane), Offset);
2241         POSTING_READ(reg);
2242
2243         return 0;
2244 }
2245
2246 /* Assume fb object is pinned & idle & fenced and just update base pointers */
2247 static int
2248 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2249                            int x, int y, enum mode_set_atomic state)
2250 {
2251         struct drm_device *dev = crtc->dev;
2252         struct drm_i915_private *dev_priv = dev->dev_private;
2253         int ret;
2254
2255         ret = dev_priv->display.update_plane(crtc, fb, x, y);
2256         if (ret)
2257                 return ret;
2258
2259         intel_update_fbc(dev);
2260         intel_increase_pllclock(crtc);
2261
2262         return 0;
2263 }
2264
2265 static int
2266 intel_finish_fb(struct drm_framebuffer *old_fb)
2267 {
2268         struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2269         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2270         bool was_interruptible = dev_priv->mm.interruptible;
2271         int ret;
2272
2273         wait_event(dev_priv->pending_flip_queue,
2274                    atomic_read(&dev_priv->mm.wedged) ||
2275                    atomic_read(&obj->pending_flip) == 0);
2276
2277         /* Big Hammer, we also need to ensure that any pending
2278          * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2279          * current scanout is retired before unpinning the old
2280          * framebuffer.
2281          *
2282          * This should only fail upon a hung GPU, in which case we
2283          * can safely continue.
2284          */
2285         dev_priv->mm.interruptible = false;
2286         ret = i915_gem_object_finish_gpu(obj);
2287         dev_priv->mm.interruptible = was_interruptible;
2288
2289         return ret;
2290 }
2291
2292 static int
2293 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2294                     struct drm_framebuffer *old_fb)
2295 {
2296         struct drm_device *dev = crtc->dev;
2297         struct drm_i915_master_private *master_priv;
2298         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2299         int ret;
2300
2301         /* no fb bound */
2302         if (!crtc->fb) {
2303                 DRM_ERROR("No FB bound\n");
2304                 return 0;
2305         }
2306
2307         switch (intel_crtc->plane) {
2308         case 0:
2309         case 1:
2310                 break;
2311         case 2:
2312                 if (IS_IVYBRIDGE(dev))
2313                         break;
2314                 /* fall through otherwise */
2315         default:
2316                 DRM_ERROR("no plane for crtc\n");
2317                 return -EINVAL;
2318         }
2319
2320         mutex_lock(&dev->struct_mutex);
2321         ret = intel_pin_and_fence_fb_obj(dev,
2322                                          to_intel_framebuffer(crtc->fb)->obj,
2323                                          NULL);
2324         if (ret != 0) {
2325                 mutex_unlock(&dev->struct_mutex);
2326                 DRM_ERROR("pin & fence failed\n");
2327                 return ret;
2328         }
2329
2330         if (old_fb)
2331                 intel_finish_fb(old_fb);
2332
2333         ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
2334                                          LEAVE_ATOMIC_MODE_SET);
2335         if (ret) {
2336                 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
2337                 mutex_unlock(&dev->struct_mutex);
2338                 DRM_ERROR("failed to update base address\n");
2339                 return ret;
2340         }
2341
2342         if (old_fb) {
2343                 intel_wait_for_vblank(dev, intel_crtc->pipe);
2344                 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
2345         }
2346
2347         mutex_unlock(&dev->struct_mutex);
2348
2349         if (!dev->primary->master)
2350                 return 0;
2351
2352         master_priv = dev->primary->master->driver_priv;
2353         if (!master_priv->sarea_priv)
2354                 return 0;
2355
2356         if (intel_crtc->pipe) {
2357                 master_priv->sarea_priv->pipeB_x = x;
2358                 master_priv->sarea_priv->pipeB_y = y;
2359         } else {
2360                 master_priv->sarea_priv->pipeA_x = x;
2361                 master_priv->sarea_priv->pipeA_y = y;
2362         }
2363
2364         return 0;
2365 }
2366
2367 static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
2368 {
2369         struct drm_device *dev = crtc->dev;
2370         struct drm_i915_private *dev_priv = dev->dev_private;
2371         u32 dpa_ctl;
2372
2373         DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
2374         dpa_ctl = I915_READ(DP_A);
2375         dpa_ctl &= ~DP_PLL_FREQ_MASK;
2376
2377         if (clock < 200000) {
2378                 u32 temp;
2379                 dpa_ctl |= DP_PLL_FREQ_160MHZ;
2380                 /* workaround for 160Mhz:
2381                    1) program 0x4600c bits 15:0 = 0x8124
2382                    2) program 0x46010 bit 0 = 1
2383                    3) program 0x46034 bit 24 = 1
2384                    4) program 0x64000 bit 14 = 1
2385                    */
2386                 temp = I915_READ(0x4600c);
2387                 temp &= 0xffff0000;
2388                 I915_WRITE(0x4600c, temp | 0x8124);
2389
2390                 temp = I915_READ(0x46010);
2391                 I915_WRITE(0x46010, temp | 1);
2392
2393                 temp = I915_READ(0x46034);
2394                 I915_WRITE(0x46034, temp | (1 << 24));
2395         } else {
2396                 dpa_ctl |= DP_PLL_FREQ_270MHZ;
2397         }
2398         I915_WRITE(DP_A, dpa_ctl);
2399
2400         POSTING_READ(DP_A);
2401         udelay(500);
2402 }
2403
2404 static void intel_fdi_normal_train(struct drm_crtc *crtc)
2405 {
2406         struct drm_device *dev = crtc->dev;
2407         struct drm_i915_private *dev_priv = dev->dev_private;
2408         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2409         int pipe = intel_crtc->pipe;
2410         u32 reg, temp;
2411
2412         /* enable normal train */
2413         reg = FDI_TX_CTL(pipe);
2414         temp = I915_READ(reg);
2415         if (IS_IVYBRIDGE(dev)) {
2416                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2417                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2418         } else {
2419                 temp &= ~FDI_LINK_TRAIN_NONE;
2420                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2421         }
2422         I915_WRITE(reg, temp);
2423
2424         reg = FDI_RX_CTL(pipe);
2425         temp = I915_READ(reg);
2426         if (HAS_PCH_CPT(dev)) {
2427                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2428                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2429         } else {
2430                 temp &= ~FDI_LINK_TRAIN_NONE;
2431                 temp |= FDI_LINK_TRAIN_NONE;
2432         }
2433         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2434
2435         /* wait one idle pattern time */
2436         POSTING_READ(reg);
2437         udelay(1000);
2438
2439         /* IVB wants error correction enabled */
2440         if (IS_IVYBRIDGE(dev))
2441                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2442                            FDI_FE_ERRC_ENABLE);
2443 }
2444
2445 /* The FDI link training functions for ILK/Ibexpeak. */
2446 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2447 {
2448         struct drm_device *dev = crtc->dev;
2449         struct drm_i915_private *dev_priv = dev->dev_private;
2450         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2451         int pipe = intel_crtc->pipe;
2452         int plane = intel_crtc->plane;
2453         u32 reg, temp, tries;
2454
2455         /* FDI needs bits from pipe & plane first */
2456         assert_pipe_enabled(dev_priv, pipe);
2457         assert_plane_enabled(dev_priv, plane);
2458
2459         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2460            for train result */
2461         reg = FDI_RX_IMR(pipe);
2462         temp = I915_READ(reg);
2463         temp &= ~FDI_RX_SYMBOL_LOCK;
2464         temp &= ~FDI_RX_BIT_LOCK;
2465         I915_WRITE(reg, temp);
2466         I915_READ(reg);
2467         udelay(150);
2468
2469         /* enable CPU FDI TX and PCH FDI RX */
2470         reg = FDI_TX_CTL(pipe);
2471         temp = I915_READ(reg);
2472         temp &= ~(7 << 19);
2473         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2474         temp &= ~FDI_LINK_TRAIN_NONE;
2475         temp |= FDI_LINK_TRAIN_PATTERN_1;
2476         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2477
2478         reg = FDI_RX_CTL(pipe);
2479         temp = I915_READ(reg);
2480         temp &= ~FDI_LINK_TRAIN_NONE;
2481         temp |= FDI_LINK_TRAIN_PATTERN_1;
2482         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2483
2484         POSTING_READ(reg);
2485         udelay(150);
2486
2487         /* Ironlake workaround, enable clock pointer after FDI enable*/
2488         if (HAS_PCH_IBX(dev)) {
2489                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2490                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2491                            FDI_RX_PHASE_SYNC_POINTER_EN);
2492         }
2493
2494         reg = FDI_RX_IIR(pipe);
2495         for (tries = 0; tries < 5; tries++) {
2496                 temp = I915_READ(reg);
2497                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2498
2499                 if ((temp & FDI_RX_BIT_LOCK)) {
2500                         DRM_DEBUG_KMS("FDI train 1 done.\n");
2501                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2502                         break;
2503                 }
2504         }
2505         if (tries == 5)
2506                 DRM_ERROR("FDI train 1 fail!\n");
2507
2508         /* Train 2 */
2509         reg = FDI_TX_CTL(pipe);
2510         temp = I915_READ(reg);
2511         temp &= ~FDI_LINK_TRAIN_NONE;
2512         temp |= FDI_LINK_TRAIN_PATTERN_2;
2513         I915_WRITE(reg, temp);
2514
2515         reg = FDI_RX_CTL(pipe);
2516         temp = I915_READ(reg);
2517         temp &= ~FDI_LINK_TRAIN_NONE;
2518         temp |= FDI_LINK_TRAIN_PATTERN_2;
2519         I915_WRITE(reg, temp);
2520
2521         POSTING_READ(reg);
2522         udelay(150);
2523
2524         reg = FDI_RX_IIR(pipe);
2525         for (tries = 0; tries < 5; tries++) {
2526                 temp = I915_READ(reg);
2527                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2528
2529                 if (temp & FDI_RX_SYMBOL_LOCK) {
2530                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2531                         DRM_DEBUG_KMS("FDI train 2 done.\n");
2532                         break;
2533                 }
2534         }
2535         if (tries == 5)
2536                 DRM_ERROR("FDI train 2 fail!\n");
2537
2538         DRM_DEBUG_KMS("FDI train done\n");
2539
2540 }
2541
2542 static const int snb_b_fdi_train_param[] = {
2543         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2544         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2545         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2546         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2547 };
2548
2549 /* The FDI link training functions for SNB/Cougarpoint. */
2550 static void gen6_fdi_link_train(struct drm_crtc *crtc)
2551 {
2552         struct drm_device *dev = crtc->dev;
2553         struct drm_i915_private *dev_priv = dev->dev_private;
2554         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2555         int pipe = intel_crtc->pipe;
2556         u32 reg, temp, i;
2557
2558         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2559            for train result */
2560         reg = FDI_RX_IMR(pipe);
2561         temp = I915_READ(reg);
2562         temp &= ~FDI_RX_SYMBOL_LOCK;
2563         temp &= ~FDI_RX_BIT_LOCK;
2564         I915_WRITE(reg, temp);
2565
2566         POSTING_READ(reg);
2567         udelay(150);
2568
2569         /* enable CPU FDI TX and PCH FDI RX */
2570         reg = FDI_TX_CTL(pipe);
2571         temp = I915_READ(reg);
2572         temp &= ~(7 << 19);
2573         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2574         temp &= ~FDI_LINK_TRAIN_NONE;
2575         temp |= FDI_LINK_TRAIN_PATTERN_1;
2576         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2577         /* SNB-B */
2578         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2579         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2580
2581         reg = FDI_RX_CTL(pipe);
2582         temp = I915_READ(reg);
2583         if (HAS_PCH_CPT(dev)) {
2584                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2585                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2586         } else {
2587                 temp &= ~FDI_LINK_TRAIN_NONE;
2588                 temp |= FDI_LINK_TRAIN_PATTERN_1;
2589         }
2590         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2591
2592         POSTING_READ(reg);
2593         udelay(150);
2594
2595         for (i = 0; i < 4; i++) {
2596                 reg = FDI_TX_CTL(pipe);
2597                 temp = I915_READ(reg);
2598                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2599                 temp |= snb_b_fdi_train_param[i];
2600                 I915_WRITE(reg, temp);
2601
2602                 POSTING_READ(reg);
2603                 udelay(500);
2604
2605                 reg = FDI_RX_IIR(pipe);
2606                 temp = I915_READ(reg);
2607                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2608
2609                 if (temp & FDI_RX_BIT_LOCK) {
2610                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2611                         DRM_DEBUG_KMS("FDI train 1 done.\n");
2612                         break;
2613                 }
2614         }
2615         if (i == 4)
2616                 DRM_ERROR("FDI train 1 fail!\n");
2617
2618         /* Train 2 */
2619         reg = FDI_TX_CTL(pipe);
2620         temp = I915_READ(reg);
2621         temp &= ~FDI_LINK_TRAIN_NONE;
2622         temp |= FDI_LINK_TRAIN_PATTERN_2;
2623         if (IS_GEN6(dev)) {
2624                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2625                 /* SNB-B */
2626                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2627         }
2628         I915_WRITE(reg, temp);
2629
2630         reg = FDI_RX_CTL(pipe);
2631         temp = I915_READ(reg);
2632         if (HAS_PCH_CPT(dev)) {
2633                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2634                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2635         } else {
2636                 temp &= ~FDI_LINK_TRAIN_NONE;
2637                 temp |= FDI_LINK_TRAIN_PATTERN_2;
2638         }
2639         I915_WRITE(reg, temp);
2640
2641         POSTING_READ(reg);
2642         udelay(150);
2643
2644         for (i = 0; i < 4; i++) {
2645                 reg = FDI_TX_CTL(pipe);
2646                 temp = I915_READ(reg);
2647                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2648                 temp |= snb_b_fdi_train_param[i];
2649                 I915_WRITE(reg, temp);
2650
2651                 POSTING_READ(reg);
2652                 udelay(500);
2653
2654                 reg = FDI_RX_IIR(pipe);
2655                 temp = I915_READ(reg);
2656                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2657
2658                 if (temp & FDI_RX_SYMBOL_LOCK) {
2659                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2660                         DRM_DEBUG_KMS("FDI train 2 done.\n");
2661                         break;
2662                 }
2663         }
2664         if (i == 4)
2665                 DRM_ERROR("FDI train 2 fail!\n");
2666
2667         DRM_DEBUG_KMS("FDI train done.\n");
2668 }
2669
2670 /* Manual link training for Ivy Bridge A0 parts */
2671 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2672 {
2673         struct drm_device *dev = crtc->dev;
2674         struct drm_i915_private *dev_priv = dev->dev_private;
2675         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2676         int pipe = intel_crtc->pipe;
2677         u32 reg, temp, i;
2678
2679         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2680            for train result */
2681         reg = FDI_RX_IMR(pipe);
2682         temp = I915_READ(reg);
2683         temp &= ~FDI_RX_SYMBOL_LOCK;
2684         temp &= ~FDI_RX_BIT_LOCK;
2685         I915_WRITE(reg, temp);
2686
2687         POSTING_READ(reg);
2688         udelay(150);
2689
2690         /* enable CPU FDI TX and PCH FDI RX */
2691         reg = FDI_TX_CTL(pipe);
2692         temp = I915_READ(reg);
2693         temp &= ~(7 << 19);
2694         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2695         temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2696         temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2697         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2698         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2699         temp |= FDI_COMPOSITE_SYNC;
2700         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2701
2702         reg = FDI_RX_CTL(pipe);
2703         temp = I915_READ(reg);
2704         temp &= ~FDI_LINK_TRAIN_AUTO;
2705         temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2706         temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2707         temp |= FDI_COMPOSITE_SYNC;
2708         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2709
2710         POSTING_READ(reg);
2711         udelay(150);
2712
2713         for (i = 0; i < 4; i++) {
2714                 reg = FDI_TX_CTL(pipe);
2715                 temp = I915_READ(reg);
2716                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2717                 temp |= snb_b_fdi_train_param[i];
2718                 I915_WRITE(reg, temp);
2719
2720                 POSTING_READ(reg);
2721                 udelay(500);
2722
2723                 reg = FDI_RX_IIR(pipe);
2724                 temp = I915_READ(reg);
2725                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2726
2727                 if (temp & FDI_RX_BIT_LOCK ||
2728                     (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2729                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2730                         DRM_DEBUG_KMS("FDI train 1 done.\n");
2731                         break;
2732                 }
2733         }
2734         if (i == 4)
2735                 DRM_ERROR("FDI train 1 fail!\n");
2736
2737         /* Train 2 */
2738         reg = FDI_TX_CTL(pipe);
2739         temp = I915_READ(reg);
2740         temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2741         temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2742         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2743         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2744         I915_WRITE(reg, temp);
2745
2746         reg = FDI_RX_CTL(pipe);
2747         temp = I915_READ(reg);
2748         temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2749         temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2750         I915_WRITE(reg, temp);
2751
2752         POSTING_READ(reg);
2753         udelay(150);
2754
2755         for (i = 0; i < 4; i++) {
2756                 reg = FDI_TX_CTL(pipe);
2757                 temp = I915_READ(reg);
2758                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2759                 temp |= snb_b_fdi_train_param[i];
2760                 I915_WRITE(reg, temp);
2761
2762                 POSTING_READ(reg);
2763                 udelay(500);
2764
2765                 reg = FDI_RX_IIR(pipe);
2766                 temp = I915_READ(reg);
2767                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2768
2769                 if (temp & FDI_RX_SYMBOL_LOCK) {
2770                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2771                         DRM_DEBUG_KMS("FDI train 2 done.\n");
2772                         break;
2773                 }
2774         }
2775         if (i == 4)
2776                 DRM_ERROR("FDI train 2 fail!\n");
2777
2778         DRM_DEBUG_KMS("FDI train done.\n");
2779 }
2780
2781 static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
2782 {
2783         struct drm_device *dev = crtc->dev;
2784         struct drm_i915_private *dev_priv = dev->dev_private;
2785         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2786         int pipe = intel_crtc->pipe;
2787         u32 reg, temp;
2788
2789         /* Write the TU size bits so error detection works */
2790         I915_WRITE(FDI_RX_TUSIZE1(pipe),
2791                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2792
2793         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2794         reg = FDI_RX_CTL(pipe);
2795         temp = I915_READ(reg);
2796         temp &= ~((0x7 << 19) | (0x7 << 16));
2797         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2798         temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2799         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2800
2801         POSTING_READ(reg);
2802         udelay(200);
2803
2804         /* Switch from Rawclk to PCDclk */
2805         temp = I915_READ(reg);
2806         I915_WRITE(reg, temp | FDI_PCDCLK);
2807
2808         POSTING_READ(reg);
2809         udelay(200);
2810
2811         /* Enable CPU FDI TX PLL, always on for Ironlake */
2812         reg = FDI_TX_CTL(pipe);
2813         temp = I915_READ(reg);
2814         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2815                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2816
2817                 POSTING_READ(reg);
2818                 udelay(100);
2819         }
2820 }
2821
2822 static void ironlake_fdi_disable(struct drm_crtc *crtc)
2823 {
2824         struct drm_device *dev = crtc->dev;
2825         struct drm_i915_private *dev_priv = dev->dev_private;
2826         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2827         int pipe = intel_crtc->pipe;
2828         u32 reg, temp;
2829
2830         /* disable CPU FDI tx and PCH FDI rx */
2831         reg = FDI_TX_CTL(pipe);
2832         temp = I915_READ(reg);
2833         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2834         POSTING_READ(reg);
2835
2836         reg = FDI_RX_CTL(pipe);
2837         temp = I915_READ(reg);
2838         temp &= ~(0x7 << 16);
2839         temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2840         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2841
2842         POSTING_READ(reg);
2843         udelay(100);
2844
2845         /* Ironlake workaround, disable clock pointer after downing FDI */
2846         if (HAS_PCH_IBX(dev)) {
2847                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2848                 I915_WRITE(FDI_RX_CHICKEN(pipe),
2849                            I915_READ(FDI_RX_CHICKEN(pipe) &
2850                                      ~FDI_RX_PHASE_SYNC_POINTER_EN));
2851         }
2852
2853         /* still set train pattern 1 */
2854         reg = FDI_TX_CTL(pipe);
2855         temp = I915_READ(reg);
2856         temp &= ~FDI_LINK_TRAIN_NONE;
2857         temp |= FDI_LINK_TRAIN_PATTERN_1;
2858         I915_WRITE(reg, temp);
2859
2860         reg = FDI_RX_CTL(pipe);
2861         temp = I915_READ(reg);
2862         if (HAS_PCH_CPT(dev)) {
2863                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2864                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2865         } else {
2866                 temp &= ~FDI_LINK_TRAIN_NONE;
2867                 temp |= FDI_LINK_TRAIN_PATTERN_1;
2868         }
2869         /* BPC in FDI rx is consistent with that in PIPECONF */
2870         temp &= ~(0x07 << 16);
2871         temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2872         I915_WRITE(reg, temp);
2873
2874         POSTING_READ(reg);
2875         udelay(100);
2876 }
2877
2878 /*
2879  * When we disable a pipe, we need to clear any pending scanline wait events
2880  * to avoid hanging the ring, which we assume we are waiting on.
2881  */
2882 static void intel_clear_scanline_wait(struct drm_device *dev)
2883 {
2884         struct drm_i915_private *dev_priv = dev->dev_private;
2885         struct intel_ring_buffer *ring;
2886         u32 tmp;
2887
2888         if (IS_GEN2(dev))
2889                 /* Can't break the hang on i8xx */
2890                 return;
2891
2892         ring = LP_RING(dev_priv);
2893         tmp = I915_READ_CTL(ring);
2894         if (tmp & RING_WAIT)
2895                 I915_WRITE_CTL(ring, tmp);
2896 }
2897
2898 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
2899 {
2900         struct drm_device *dev = crtc->dev;
2901         struct drm_i915_private *dev_priv = dev->dev_private;
2902         unsigned long flags;
2903         bool pending;
2904
2905         if (atomic_read(&dev_priv->mm.wedged))
2906                 return false;
2907
2908         spin_lock_irqsave(&dev->event_lock, flags);
2909         pending = to_intel_crtc(crtc)->unpin_work != NULL;
2910         spin_unlock_irqrestore(&dev->event_lock, flags);
2911
2912         return pending;
2913 }
2914
2915 static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2916 {
2917         struct drm_device *dev = crtc->dev;
2918         struct drm_i915_private *dev_priv = dev->dev_private;
2919
2920         if (crtc->fb == NULL)
2921                 return;
2922
2923         wait_event(dev_priv->pending_flip_queue,
2924                    !intel_crtc_has_pending_flip(crtc));
2925
2926         mutex_lock(&dev->struct_mutex);
2927         intel_finish_fb(crtc->fb);
2928         mutex_unlock(&dev->struct_mutex);
2929 }
2930
2931 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2932 {
2933         struct drm_device *dev = crtc->dev;
2934         struct drm_mode_config *mode_config = &dev->mode_config;
2935         struct intel_encoder *encoder;
2936
2937         /*
2938          * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2939          * must be driven by its own crtc; no sharing is possible.
2940          */
2941         list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
2942                 if (encoder->base.crtc != crtc)
2943                         continue;
2944
2945                 switch (encoder->type) {
2946                 case INTEL_OUTPUT_EDP:
2947                         if (!intel_encoder_is_pch_edp(&encoder->base))
2948                                 return false;
2949                         continue;
2950                 }
2951         }
2952
2953         return true;
2954 }
2955
2956 /*
2957  * Enable PCH resources required for PCH ports:
2958  *   - PCH PLLs
2959  *   - FDI training & RX/TX
2960  *   - update transcoder timings
2961  *   - DP transcoding bits
2962  *   - transcoder
2963  */
2964 static void ironlake_pch_enable(struct drm_crtc *crtc)
2965 {
2966         struct drm_device *dev = crtc->dev;
2967         struct drm_i915_private *dev_priv = dev->dev_private;
2968         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2969         int pipe = intel_crtc->pipe;
2970         u32 reg, temp, transc_sel;
2971
2972         /* For PCH output, training FDI link */
2973         dev_priv->display.fdi_link_train(crtc);
2974
2975         intel_enable_pch_pll(dev_priv, pipe);
2976
2977         if (HAS_PCH_CPT(dev)) {
2978                 transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL :
2979                         TRANSC_DPLLB_SEL;
2980
2981                 /* Be sure PCH DPLL SEL is set */
2982                 temp = I915_READ(PCH_DPLL_SEL);
2983                 if (pipe == 0) {
2984                         temp &= ~(TRANSA_DPLLB_SEL);
2985                         temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
2986                 } else if (pipe == 1) {
2987                         temp &= ~(TRANSB_DPLLB_SEL);
2988                         temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
2989                 } else if (pipe == 2) {
2990                         temp &= ~(TRANSC_DPLLB_SEL);
2991                         temp |= (TRANSC_DPLL_ENABLE | transc_sel);
2992                 }
2993                 I915_WRITE(PCH_DPLL_SEL, temp);
2994         }
2995
2996         /* set transcoder timing, panel must allow it */
2997         assert_panel_unlocked(dev_priv, pipe);
2998         I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
2999         I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
3000         I915_WRITE(TRANS_HSYNC(pipe),  I915_READ(HSYNC(pipe)));
3001
3002         I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
3003         I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
3004         I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
3005         I915_WRITE(TRANS_VSYNCSHIFT(pipe),  I915_READ(VSYNCSHIFT(pipe)));
3006
3007         intel_fdi_normal_train(crtc);
3008
3009         /* For PCH DP, enable TRANS_DP_CTL */
3010         if (HAS_PCH_CPT(dev) &&
3011             (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
3012              intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3013                 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
3014                 reg = TRANS_DP_CTL(pipe);
3015                 temp = I915_READ(reg);
3016                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
3017                           TRANS_DP_SYNC_MASK |
3018                           TRANS_DP_BPC_MASK);
3019                 temp |= (TRANS_DP_OUTPUT_ENABLE |
3020                          TRANS_DP_ENH_FRAMING);
3021                 temp |= bpc << 9; /* same format but at 11:9 */
3022
3023                 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
3024                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3025                 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
3026                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3027
3028                 switch (intel_trans_dp_port_sel(crtc)) {
3029                 case PCH_DP_B:
3030                         temp |= TRANS_DP_PORT_SEL_B;
3031                         break;
3032                 case PCH_DP_C:
3033                         temp |= TRANS_DP_PORT_SEL_C;
3034                         break;
3035                 case PCH_DP_D:
3036                         temp |= TRANS_DP_PORT_SEL_D;
3037                         break;
3038                 default:
3039                         DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
3040                         temp |= TRANS_DP_PORT_SEL_B;
3041                         break;
3042                 }
3043
3044                 I915_WRITE(reg, temp);
3045         }
3046
3047         intel_enable_transcoder(dev_priv, pipe);
3048 }
3049
3050 void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
3051 {
3052         struct drm_i915_private *dev_priv = dev->dev_private;
3053         int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe);
3054         u32 temp;
3055
3056         temp = I915_READ(dslreg);
3057         udelay(500);
3058         if (wait_for(I915_READ(dslreg) != temp, 5)) {
3059                 /* Without this, mode sets may fail silently on FDI */
3060                 I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS);
3061                 udelay(250);
3062                 I915_WRITE(tc2reg, 0);
3063                 if (wait_for(I915_READ(dslreg) != temp, 5))
3064                         DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
3065         }
3066 }
3067
3068 static void ironlake_crtc_enable(struct drm_crtc *crtc)
3069 {
3070         struct drm_device *dev = crtc->dev;
3071         struct drm_i915_private *dev_priv = dev->dev_private;
3072         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3073         int pipe = intel_crtc->pipe;
3074         int plane = intel_crtc->plane;
3075         u32 temp;
3076         bool is_pch_port;
3077
3078         if (intel_crtc->active)
3079                 return;
3080
3081         intel_crtc->active = true;
3082         intel_update_watermarks(dev);
3083
3084         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3085                 temp = I915_READ(PCH_LVDS);
3086                 if ((temp & LVDS_PORT_EN) == 0)
3087                         I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
3088         }
3089
3090         is_pch_port = intel_crtc_driving_pch(crtc);
3091
3092         if (is_pch_port)
3093                 ironlake_fdi_pll_enable(crtc);
3094         else
3095                 ironlake_fdi_disable(crtc);
3096
3097         /* Enable panel fitting for LVDS */
3098         if (dev_priv->pch_pf_size &&
3099             (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
3100                 /* Force use of hard-coded filter coefficients
3101                  * as some pre-programmed values are broken,
3102                  * e.g. x201.
3103                  */
3104                 if (IS_IVYBRIDGE(dev))
3105                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3106                                                  PF_PIPE_SEL_IVB(pipe));
3107                 else
3108                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3109                 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3110                 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
3111         }
3112
3113         /*
3114          * On ILK+ LUT must be loaded before the pipe is running but with
3115          * clocks enabled
3116          */
3117         intel_crtc_load_lut(crtc);
3118
3119         intel_enable_pipe(dev_priv, pipe, is_pch_port);
3120         intel_enable_plane(dev_priv, plane, pipe);
3121
3122         if (is_pch_port)
3123                 ironlake_pch_enable(crtc);
3124
3125         mutex_lock(&dev->struct_mutex);
3126         intel_update_fbc(dev);
3127         mutex_unlock(&dev->struct_mutex);
3128
3129         intel_crtc_update_cursor(crtc, true);
3130 }
3131
3132 static void ironlake_crtc_disable(struct drm_crtc *crtc)
3133 {
3134         struct drm_device *dev = crtc->dev;
3135         struct drm_i915_private *dev_priv = dev->dev_private;
3136         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3137         int pipe = intel_crtc->pipe;
3138         int plane = intel_crtc->plane;
3139         u32 reg, temp;
3140
3141         if (!intel_crtc->active)
3142                 return;
3143
3144         intel_crtc_wait_for_pending_flips(crtc);
3145         drm_vblank_off(dev, pipe);
3146         intel_crtc_update_cursor(crtc, false);
3147
3148         intel_disable_plane(dev_priv, plane, pipe);
3149
3150         if (dev_priv->cfb_plane == plane)
3151                 intel_disable_fbc(dev);
3152
3153         intel_disable_pipe(dev_priv, pipe);
3154
3155         /* Disable PF */
3156         I915_WRITE(PF_CTL(pipe), 0);
3157         I915_WRITE(PF_WIN_SZ(pipe), 0);
3158
3159         ironlake_fdi_disable(crtc);
3160
3161         /* This is a horrible layering violation; we should be doing this in
3162          * the connector/encoder ->prepare instead, but we don't always have
3163          * enough information there about the config to know whether it will
3164          * actually be necessary or just cause undesired flicker.
3165          */
3166         intel_disable_pch_ports(dev_priv, pipe);
3167
3168         intel_disable_transcoder(dev_priv, pipe);
3169
3170         if (HAS_PCH_CPT(dev)) {
3171                 /* disable TRANS_DP_CTL */
3172                 reg = TRANS_DP_CTL(pipe);
3173                 temp = I915_READ(reg);
3174                 temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
3175                 temp |= TRANS_DP_PORT_SEL_NONE;
3176                 I915_WRITE(reg, temp);
3177
3178                 /* disable DPLL_SEL */
3179                 temp = I915_READ(PCH_DPLL_SEL);
3180                 switch (pipe) {
3181                 case 0:
3182                         temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
3183                         break;
3184                 case 1:
3185                         temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
3186                         break;
3187                 case 2:
3188                         /* C shares PLL A or B */
3189                         temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
3190                         break;
3191                 default:
3192                         BUG(); /* wtf */
3193                 }
3194                 I915_WRITE(PCH_DPLL_SEL, temp);
3195         }
3196
3197         /* disable PCH DPLL */
3198         if (!intel_crtc->no_pll)
3199                 intel_disable_pch_pll(dev_priv, pipe);
3200
3201         /* Switch from PCDclk to Rawclk */
3202         reg = FDI_RX_CTL(pipe);
3203         temp = I915_READ(reg);
3204         I915_WRITE(reg, temp & ~FDI_PCDCLK);
3205
3206         /* Disable CPU FDI TX PLL */
3207         reg = FDI_TX_CTL(pipe);
3208         temp = I915_READ(reg);
3209         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3210
3211         POSTING_READ(reg);
3212         udelay(100);
3213
3214         reg = FDI_RX_CTL(pipe);
3215         temp = I915_READ(reg);
3216         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3217
3218         /* Wait for the clocks to turn off. */
3219         POSTING_READ(reg);
3220         udelay(100);
3221
3222         intel_crtc->active = false;
3223         intel_update_watermarks(dev);
3224
3225         mutex_lock(&dev->struct_mutex);
3226         intel_update_fbc(dev);
3227         intel_clear_scanline_wait(dev);
3228         mutex_unlock(&dev->struct_mutex);
3229 }
3230
3231 static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
3232 {
3233         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3234         int pipe = intel_crtc->pipe;
3235         int plane = intel_crtc->plane;
3236
3237         /* XXX: When our outputs are all unaware of DPMS modes other than off
3238          * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3239          */
3240         switch (mode) {
3241         case DRM_MODE_DPMS_ON:
3242         case DRM_MODE_DPMS_STANDBY:
3243         case DRM_MODE_DPMS_SUSPEND:
3244                 DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
3245                 ironlake_crtc_enable(crtc);
3246                 break;
3247
3248         case DRM_MODE_DPMS_OFF:
3249                 DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
3250                 ironlake_crtc_disable(crtc);
3251                 break;
3252         }
3253 }
3254
3255 static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3256 {
3257         if (!enable && intel_crtc->overlay) {
3258                 struct drm_device *dev = intel_crtc->base.dev;
3259                 struct drm_i915_private *dev_priv = dev->dev_private;
3260
3261                 mutex_lock(&dev->struct_mutex);
3262                 dev_priv->mm.interruptible = false;
3263                 (void) intel_overlay_switch_off(intel_crtc->overlay);
3264                 dev_priv->mm.interruptible = true;
3265                 mutex_unlock(&dev->struct_mutex);
3266         }
3267
3268         /* Let userspace switch the overlay on again. In most cases userspace
3269          * has to recompute where to put it anyway.
3270          */
3271 }
3272
3273 static void i9xx_crtc_enable(struct drm_crtc *crtc)
3274 {
3275         struct drm_device *dev = crtc->dev;
3276         struct drm_i915_private *dev_priv = dev->dev_private;
3277         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3278         int pipe = intel_crtc->pipe;
3279         int plane = intel_crtc->plane;
3280
3281         if (intel_crtc->active)
3282                 return;
3283
3284         intel_crtc->active = true;
3285         intel_update_watermarks(dev);
3286
3287         intel_enable_pll(dev_priv, pipe);
3288         intel_enable_pipe(dev_priv, pipe, false);
3289         intel_enable_plane(dev_priv, plane, pipe);
3290
3291         intel_crtc_load_lut(crtc);
3292         intel_update_fbc(dev);
3293
3294         /* Give the overlay scaler a chance to enable if it's on this pipe */
3295         intel_crtc_dpms_overlay(intel_crtc, true);
3296         intel_crtc_update_cursor(crtc, true);
3297 }
3298
3299 static void i9xx_crtc_disable(struct drm_crtc *crtc)
3300 {
3301         struct drm_device *dev = crtc->dev;
3302         struct drm_i915_private *dev_priv = dev->dev_private;
3303         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3304         int pipe = intel_crtc->pipe;
3305         int plane = intel_crtc->plane;
3306         u32 pctl;
3307
3308         if (!intel_crtc->active)
3309                 return;
3310
3311         /* Give the overlay scaler a chance to disable if it's on this pipe */
3312         intel_crtc_wait_for_pending_flips(crtc);
3313         drm_vblank_off(dev, pipe);
3314         intel_crtc_dpms_overlay(intel_crtc, false);
3315         intel_crtc_update_cursor(crtc, false);
3316
3317         if (dev_priv->cfb_plane == plane)
3318                 intel_disable_fbc(dev);
3319
3320         intel_disable_plane(dev_priv, plane, pipe);
3321         intel_disable_pipe(dev_priv, pipe);
3322
3323         /* Disable pannel fitter if it is on this pipe. */
3324         pctl = I915_READ(PFIT_CONTROL);
3325         if ((pctl & PFIT_ENABLE) &&
3326             ((pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT) == pipe)
3327                 I915_WRITE(PFIT_CONTROL, 0);
3328
3329         intel_disable_pll(dev_priv, pipe);
3330
3331         intel_crtc->active = false;
3332         intel_update_fbc(dev);
3333         intel_update_watermarks(dev);
3334         intel_clear_scanline_wait(dev);
3335 }
3336
3337 static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
3338 {
3339         /* XXX: When our outputs are all unaware of DPMS modes other than off
3340          * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3341          */
3342         switch (mode) {
3343         case DRM_MODE_DPMS_ON:
3344         case DRM_MODE_DPMS_STANDBY:
3345         case DRM_MODE_DPMS_SUSPEND:
3346                 i9xx_crtc_enable(crtc);
3347                 break;
3348         case DRM_MODE_DPMS_OFF:
3349                 i9xx_crtc_disable(crtc);
3350                 break;
3351         }
3352 }
3353
3354 /**
3355  * Sets the power management mode of the pipe and plane.
3356  */
3357 static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
3358 {
3359         struct drm_device *dev = crtc->dev;
3360         struct drm_i915_private *dev_priv = dev->dev_private;
3361         struct drm_i915_master_private *master_priv;
3362         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3363         int pipe = intel_crtc->pipe;
3364         bool enabled;
3365
3366         if (intel_crtc->dpms_mode == mode)
3367                 return;
3368
3369         intel_crtc->dpms_mode = mode;
3370
3371         dev_priv->display.dpms(crtc, mode);
3372
3373         if (!dev->primary->master)
3374                 return;
3375
3376         master_priv = dev->primary->master->driver_priv;
3377         if (!master_priv->sarea_priv)
3378                 return;
3379
3380         enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
3381
3382         switch (pipe) {
3383         case 0:
3384                 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
3385                 master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
3386                 break;
3387         case 1:
3388                 master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
3389                 master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
3390                 break;
3391         default:
3392                 DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
3393                 break;
3394         }
3395 }
3396
3397 static void intel_crtc_disable(struct drm_crtc *crtc)
3398 {
3399         struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
3400         struct drm_device *dev = crtc->dev;
3401
3402         crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
3403         assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
3404         assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
3405
3406         if (crtc->fb) {
3407                 mutex_lock(&dev->struct_mutex);
3408                 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
3409                 mutex_unlock(&dev->struct_mutex);
3410         }
3411 }
3412
3413 /* Prepare for a mode set.
3414  *
3415  * Note we could be a lot smarter here.  We need to figure out which outputs
3416  * will be enabled, which disabled (in short, how the config will changes)
3417  * and perform the minimum necessary steps to accomplish that, e.g. updating
3418  * watermarks, FBC configuration, making sure PLLs are programmed correctly,
3419  * panel fitting is in the proper state, etc.
3420  */
3421 static void i9xx_crtc_prepare(struct drm_crtc *crtc)
3422 {
3423         i9xx_crtc_disable(crtc);
3424 }
3425
3426 static void i9xx_crtc_commit(struct drm_crtc *crtc)
3427 {
3428         i9xx_crtc_enable(crtc);
3429 }
3430
3431 static void ironlake_crtc_prepare(struct drm_crtc *crtc)
3432 {
3433         ironlake_crtc_disable(crtc);
3434 }
3435
3436 static void ironlake_crtc_commit(struct drm_crtc *crtc)
3437 {
3438         ironlake_crtc_enable(crtc);
3439 }
3440
3441 void intel_encoder_prepare(struct drm_encoder *encoder)
3442 {
3443         struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3444         /* lvds has its own version of prepare see intel_lvds_prepare */
3445         encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
3446 }
3447
3448 void intel_encoder_commit(struct drm_encoder *encoder)
3449 {
3450         struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3451         struct drm_device *dev = encoder->dev;
3452         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3453         struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
3454
3455         /* lvds has its own version of commit see intel_lvds_commit */
3456         encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
3457
3458         if (HAS_PCH_CPT(dev))
3459                 intel_cpt_verify_modeset(dev, intel_crtc->pipe);
3460 }
3461
3462 void intel_encoder_destroy(struct drm_encoder *encoder)
3463 {
3464         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3465
3466         drm_encoder_cleanup(encoder);
3467         kfree(intel_encoder);
3468 }
3469
3470 static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
3471                                   struct drm_display_mode *mode,
3472                                   struct drm_display_mode *adjusted_mode)
3473 {
3474         struct drm_device *dev = crtc->dev;
3475
3476         if (HAS_PCH_SPLIT(dev)) {
3477                 /* FDI link clock is fixed at 2.7G */
3478                 if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
3479                         return false;
3480         }
3481
3482         /* All interlaced capable intel hw wants timings in frames. Note though
3483          * that intel_lvds_mode_fixup does some funny tricks with the crtc
3484          * timings, so we need to be careful not to clobber these.*/
3485         if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
3486                 drm_mode_set_crtcinfo(adjusted_mode, 0);
3487
3488         return true;
3489 }
3490
3491 static int i945_get_display_clock_speed(struct drm_device *dev)
3492 {
3493         return 400000;
3494 }
3495
3496 static int i915_get_display_clock_speed(struct drm_device *dev)
3497 {
3498         return 333000;
3499 }
3500
3501 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
3502 {
3503         return 200000;
3504 }
3505
3506 static int i915gm_get_display_clock_speed(struct drm_device *dev)
3507 {
3508         u16 gcfgc = 0;
3509
3510         pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3511
3512         if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
3513                 return 133000;
3514         else {
3515                 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
3516                 case GC_DISPLAY_CLOCK_333_MHZ:
3517                         return 333000;
3518                 default:
3519                 case GC_DISPLAY_CLOCK_190_200_MHZ:
3520                         return 190000;
3521                 }
3522         }
3523 }
3524
3525 static int i865_get_display_clock_speed(struct drm_device *dev)
3526 {
3527         return 266000;
3528 }
3529
3530 static int i855_get_display_clock_speed(struct drm_device *dev)
3531 {
3532         u16 hpllcc = 0;
3533         /* Assume that the hardware is in the high speed state.  This
3534          * should be the default.
3535          */
3536         switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
3537         case GC_CLOCK_133_200:
3538         case GC_CLOCK_100_200:
3539                 return 200000;
3540         case GC_CLOCK_166_250:
3541                 return 250000;
3542         case GC_CLOCK_100_133:
3543                 return 133000;
3544         }
3545
3546         /* Shouldn't happen */
3547         return 0;
3548 }
3549
3550 static int i830_get_display_clock_speed(struct drm_device *dev)
3551 {
3552         return 133000;
3553 }
3554
3555 struct fdi_m_n {
3556         u32        tu;
3557         u32        gmch_m;
3558         u32        gmch_n;
3559         u32        link_m;
3560         u32        link_n;
3561 };
3562
3563 static void
3564 fdi_reduce_ratio(u32 *num, u32 *den)
3565 {
3566         while (*num > 0xffffff || *den > 0xffffff) {
3567                 *num >>= 1;
3568                 *den >>= 1;
3569         }
3570 }
3571
3572 static void
3573 ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
3574                      int link_clock, struct fdi_m_n *m_n)
3575 {
3576         m_n->tu = 64; /* default size */
3577
3578         /* BUG_ON(pixel_clock > INT_MAX / 36); */
3579         m_n->gmch_m = bits_per_pixel * pixel_clock;
3580         m_n->gmch_n = link_clock * nlanes * 8;
3581         fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
3582
3583         m_n->link_m = pixel_clock;
3584         m_n->link_n = link_clock;
3585         fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
3586 }
3587
3588
3589 struct intel_watermark_params {
3590         unsigned long fifo_size;
3591         unsigned long max_wm;
3592         unsigned long default_wm;
3593         unsigned long guard_size;
3594         unsigned long cacheline_size;
3595 };
3596
3597 /* Pineview has different values for various configs */
3598 static const struct intel_watermark_params pineview_display_wm = {
3599         PINEVIEW_DISPLAY_FIFO,
3600         PINEVIEW_MAX_WM,
3601         PINEVIEW_DFT_WM,
3602         PINEVIEW_GUARD_WM,
3603         PINEVIEW_FIFO_LINE_SIZE
3604 };
3605 static const struct intel_watermark_params pineview_display_hplloff_wm = {
3606         PINEVIEW_DISPLAY_FIFO,
3607         PINEVIEW_MAX_WM,
3608         PINEVIEW_DFT_HPLLOFF_WM,
3609         PINEVIEW_GUARD_WM,
3610         PINEVIEW_FIFO_LINE_SIZE
3611 };
3612 static const struct intel_watermark_params pineview_cursor_wm = {
3613         PINEVIEW_CURSOR_FIFO,
3614         PINEVIEW_CURSOR_MAX_WM,
3615         PINEVIEW_CURSOR_DFT_WM,
3616         PINEVIEW_CURSOR_GUARD_WM,
3617         PINEVIEW_FIFO_LINE_SIZE,
3618 };
3619 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
3620         PINEVIEW_CURSOR_FIFO,
3621         PINEVIEW_CURSOR_MAX_WM,
3622         PINEVIEW_CURSOR_DFT_WM,
3623         PINEVIEW_CURSOR_GUARD_WM,
3624         PINEVIEW_FIFO_LINE_SIZE
3625 };
3626 static const struct intel_watermark_params g4x_wm_info = {
3627         G4X_FIFO_SIZE,
3628         G4X_MAX_WM,
3629         G4X_MAX_WM,
3630         2,
3631         G4X_FIFO_LINE_SIZE,
3632 };
3633 static const struct intel_watermark_params g4x_cursor_wm_info = {
3634         I965_CURSOR_FIFO,
3635         I965_CURSOR_MAX_WM,
3636         I965_CURSOR_DFT_WM,
3637         2,
3638         G4X_FIFO_LINE_SIZE,
3639 };
3640 static const struct intel_watermark_params i965_cursor_wm_info = {
3641         I965_CURSOR_FIFO,
3642         I965_CURSOR_MAX_WM,
3643         I965_CURSOR_DFT_WM,
3644         2,
3645         I915_FIFO_LINE_SIZE,
3646 };
3647 static const struct intel_watermark_params i945_wm_info = {
3648         I945_FIFO_SIZE,
3649         I915_MAX_WM,
3650         1,
3651         2,
3652         I915_FIFO_LINE_SIZE
3653 };
3654 static const struct intel_watermark_params i915_wm_info = {
3655         I915_FIFO_SIZE,
3656         I915_MAX_WM,
3657         1,
3658         2,
3659         I915_FIFO_LINE_SIZE
3660 };
3661 static const struct intel_watermark_params i855_wm_info = {
3662         I855GM_FIFO_SIZE,
3663         I915_MAX_WM,
3664         1,
3665         2,
3666         I830_FIFO_LINE_SIZE
3667 };
3668 static const struct intel_watermark_params i830_wm_info = {
3669         I830_FIFO_SIZE,
3670         I915_MAX_WM,
3671         1,
3672         2,
3673         I830_FIFO_LINE_SIZE
3674 };
3675
3676 static const struct intel_watermark_params ironlake_display_wm_info = {
3677         ILK_DISPLAY_FIFO,
3678         ILK_DISPLAY_MAXWM,
3679         ILK_DISPLAY_DFTWM,
3680         2,
3681         ILK_FIFO_LINE_SIZE
3682 };
3683 static const struct intel_watermark_params ironlake_cursor_wm_info = {
3684         ILK_CURSOR_FIFO,
3685         ILK_CURSOR_MAXWM,
3686         ILK_CURSOR_DFTWM,
3687         2,
3688         ILK_FIFO_LINE_SIZE
3689 };
3690 static const struct intel_watermark_params ironlake_display_srwm_info = {
3691         ILK_DISPLAY_SR_FIFO,
3692         ILK_DISPLAY_MAX_SRWM,
3693         ILK_DISPLAY_DFT_SRWM,
3694         2,
3695         ILK_FIFO_LINE_SIZE
3696 };
3697 static const struct intel_watermark_params ironlake_cursor_srwm_info = {
3698         ILK_CURSOR_SR_FIFO,
3699         ILK_CURSOR_MAX_SRWM,
3700         ILK_CURSOR_DFT_SRWM,
3701         2,
3702         ILK_FIFO_LINE_SIZE
3703 };
3704
3705 static const struct intel_watermark_params sandybridge_display_wm_info = {
3706         SNB_DISPLAY_FIFO,
3707         SNB_DISPLAY_MAXWM,
3708         SNB_DISPLAY_DFTWM,
3709         2,
3710         SNB_FIFO_LINE_SIZE
3711 };
3712 static const struct intel_watermark_params sandybridge_cursor_wm_info = {
3713         SNB_CURSOR_FIFO,
3714         SNB_CURSOR_MAXWM,
3715         SNB_CURSOR_DFTWM,
3716         2,
3717         SNB_FIFO_LINE_SIZE
3718 };
3719 static const struct intel_watermark_params sandybridge_display_srwm_info = {
3720         SNB_DISPLAY_SR_FIFO,
3721         SNB_DISPLAY_MAX_SRWM,
3722         SNB_DISPLAY_DFT_SRWM,
3723         2,
3724         SNB_FIFO_LINE_SIZE
3725 };
3726 static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
3727         SNB_CURSOR_SR_FIFO,
3728         SNB_CURSOR_MAX_SRWM,
3729         SNB_CURSOR_DFT_SRWM,
3730         2,
3731         SNB_FIFO_LINE_SIZE
3732 };
3733
3734
3735 /**
3736  * intel_calculate_wm - calculate watermark level
3737  * @clock_in_khz: pixel clock
3738  * @wm: chip FIFO params
3739  * @pixel_size: display pixel size
3740  * @latency_ns: memory latency for the platform
3741  *
3742  * Calculate the watermark level (the level at which the display plane will
3743  * start fetching from memory again).  Each chip has a different display
3744  * FIFO size and allocation, so the caller needs to figure that out and pass
3745  * in the correct intel_watermark_params structure.
3746  *
3747  * As the pixel clock runs, the FIFO will be drained at a rate that depends
3748  * on the pixel size.  When it reaches the watermark level, it'll start
3749  * fetching FIFO line sized based chunks from memory until the FIFO fills
3750  * past the watermark point.  If the FIFO drains completely, a FIFO underrun
3751  * will occur, and a display engine hang could result.
3752  */
3753 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
3754                                         const struct intel_watermark_params *wm,
3755                                         int fifo_size,
3756                                         int pixel_size,
3757                                         unsigned long latency_ns)
3758 {
3759         long entries_required, wm_size;
3760
3761         /*
3762          * Note: we need to make sure we don't overflow for various clock &
3763          * latency values.
3764          * clocks go from a few thousand to several hundred thousand.
3765          * latency is usually a few thousand
3766          */
3767         entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
3768                 1000;
3769         entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
3770
3771         DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
3772
3773         wm_size = fifo_size - (entries_required + wm->guard_size);
3774
3775         DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
3776
3777         /* Don't promote wm_size to unsigned... */
3778         if (wm_size > (long)wm->max_wm)
3779                 wm_size = wm->max_wm;
3780         if (wm_size <= 0)
3781                 wm_size = wm->default_wm;
3782         return wm_size;
3783 }
3784
3785 struct cxsr_latency {
3786         int is_desktop;
3787         int is_ddr3;
3788         unsigned long fsb_freq;
3789         unsigned long mem_freq;
3790         unsigned long display_sr;
3791         unsigned long display_hpll_disable;
3792         unsigned long cursor_sr;
3793         unsigned long cursor_hpll_disable;
3794 };
3795
3796 static const struct cxsr_latency cxsr_latency_table[] = {
3797         {1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
3798         {1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
3799         {1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
3800         {1, 1, 800, 667, 6420, 36420, 6873, 36873},    /* DDR3-667 SC */
3801         {1, 1, 800, 800, 5902, 35902, 6318, 36318},    /* DDR3-800 SC */
3802
3803         {1, 0, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
3804         {1, 0, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
3805         {1, 0, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
3806         {1, 1, 667, 667, 6438, 36438, 6911, 36911},    /* DDR3-667 SC */
3807         {1, 1, 667, 800, 5941, 35941, 6377, 36377},    /* DDR3-800 SC */
3808
3809         {1, 0, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
3810         {1, 0, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
3811         {1, 0, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
3812         {1, 1, 400, 667, 6509, 36509, 7062, 37062},    /* DDR3-667 SC */
3813         {1, 1, 400, 800, 5985, 35985, 6501, 36501},    /* DDR3-800 SC */
3814
3815         {0, 0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
3816         {0, 0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
3817         {0, 0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
3818         {0, 1, 800, 667, 6476, 36476, 6955, 36955},    /* DDR3-667 SC */
3819         {0, 1, 800, 800, 5958, 35958, 6400, 36400},    /* DDR3-800 SC */
3820
3821         {0, 0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
3822         {0, 0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
3823         {0, 0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
3824         {0, 1, 667, 667, 6494, 36494, 6993, 36993},    /* DDR3-667 SC */
3825         {0, 1, 667, 800, 5998, 35998, 6460, 36460},    /* DDR3-800 SC */
3826
3827         {0, 0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
3828         {0, 0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
3829         {0, 0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
3830         {0, 1, 400, 667, 6566, 36566, 7145, 37145},    /* DDR3-667 SC */
3831         {0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
3832 };
3833
3834 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
3835                                                          int is_ddr3,
3836                                                          int fsb,
3837                                                          int mem)
3838 {
3839         const struct cxsr_latency *latency;
3840         int i;
3841
3842         if (fsb == 0 || mem == 0)
3843                 return NULL;
3844
3845         for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
3846                 latency = &cxsr_latency_table[i];
3847                 if (is_desktop == latency->is_desktop &&
3848                     is_ddr3 == latency->is_ddr3 &&
3849                     fsb == latency->fsb_freq && mem == latency->mem_freq)
3850                         return latency;
3851         }
3852
3853         DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3854
3855         return NULL;
3856 }
3857
3858 static void pineview_disable_cxsr(struct drm_device *dev)
3859 {
3860         struct drm_i915_private *dev_priv = dev->dev_private;
3861
3862         /* deactivate cxsr */
3863         I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
3864 }
3865
3866 /*
3867  * Latency for FIFO fetches is dependent on several factors:
3868  *   - memory configuration (speed, channels)
3869  *   - chipset
3870  *   - current MCH state
3871  * It can be fairly high in some situations, so here we assume a fairly
3872  * pessimal value.  It's a tradeoff between extra memory fetches (if we
3873  * set this value too high, the FIFO will fetch frequently to stay full)
3874  * and power consumption (set it too low to save power and we might see
3875  * FIFO underruns and display "flicker").
3876  *
3877  * A value of 5us seems to be a good balance; safe for very low end
3878  * platforms but not overly aggressive on lower latency configs.
3879  */
3880 static const int latency_ns = 5000;
3881
3882 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
3883 {
3884         struct drm_i915_private *dev_priv = dev->dev_private;
3885         uint32_t dsparb = I915_READ(DSPARB);
3886         int size;
3887
3888         size = dsparb & 0x7f;
3889         if (plane)
3890                 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
3891
3892         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3893                       plane ? "B" : "A", size);
3894
3895         return size;
3896 }
3897
3898 static int i85x_get_fifo_size(struct drm_device *dev, int plane)
3899 {
3900         struct drm_i915_private *dev_priv = dev->dev_private;
3901         uint32_t dsparb = I915_READ(DSPARB);
3902         int size;
3903
3904         size = dsparb & 0x1ff;
3905         if (plane)
3906                 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
3907         size >>= 1; /* Convert to cachelines */
3908
3909         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3910                       plane ? "B" : "A", size);
3911
3912         return size;
3913 }
3914
3915 static int i845_get_fifo_size(struct drm_device *dev, int plane)
3916 {
3917         struct drm_i915_private *dev_priv = dev->dev_private;
3918         uint32_t dsparb = I915_READ(DSPARB);
3919         int size;
3920
3921         size = dsparb & 0x7f;
3922         size >>= 2; /* Convert to cachelines */
3923
3924         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3925                       plane ? "B" : "A",
3926                       size);
3927
3928         return size;
3929 }
3930
3931 static int i830_get_fifo_size(struct drm_device *dev, int plane)
3932 {
3933         struct drm_i915_private *dev_priv = dev->dev_private;
3934         uint32_t dsparb = I915_READ(DSPARB);
3935         int size;
3936
3937         size = dsparb & 0x7f;
3938         size >>= 1; /* Convert to cachelines */
3939
3940         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3941                       plane ? "B" : "A", size);
3942
3943         return size;
3944 }
3945
3946 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
3947 {
3948         struct drm_crtc *crtc, *enabled = NULL;
3949
3950         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3951                 if (crtc->enabled && crtc->fb) {
3952                         if (enabled)
3953                                 return NULL;
3954                         enabled = crtc;
3955                 }
3956         }
3957
3958         return enabled;
3959 }
3960
3961 static void pineview_update_wm(struct drm_device *dev)
3962 {
3963         struct drm_i915_private *dev_priv = dev->dev_private;
3964         struct drm_crtc *crtc;
3965         const struct cxsr_latency *latency;
3966         u32 reg;
3967         unsigned long wm;
3968
3969         latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
3970                                          dev_priv->fsb_freq, dev_priv->mem_freq);
3971         if (!latency) {
3972                 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3973                 pineview_disable_cxsr(dev);
3974                 return;
3975         }
3976
3977         crtc = single_enabled_crtc(dev);
3978         if (crtc) {
3979                 int clock = crtc->mode.clock;
3980                 int pixel_size = crtc->fb->bits_per_pixel / 8;
3981
3982                 /* Display SR */
3983                 wm = intel_calculate_wm(clock, &pineview_display_wm,
3984                                         pineview_display_wm.fifo_size,
3985                                         pixel_size, latency->display_sr);
3986                 reg = I915_READ(DSPFW1);
3987                 reg &= ~DSPFW_SR_MASK;
3988                 reg |= wm << DSPFW_SR_SHIFT;
3989                 I915_WRITE(DSPFW1, reg);
3990                 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
3991
3992                 /* cursor SR */
3993                 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
3994                                         pineview_display_wm.fifo_size,
3995                                         pixel_size, latency->cursor_sr);
3996                 reg = I915_READ(DSPFW3);
3997                 reg &= ~DSPFW_CURSOR_SR_MASK;
3998                 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
3999                 I915_WRITE(DSPFW3, reg);
4000
4001                 /* Display HPLL off SR */
4002                 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
4003                                         pineview_display_hplloff_wm.fifo_size,
4004                                         pixel_size, latency->display_hpll_disable);
4005                 reg = I915_READ(DSPFW3);
4006                 reg &= ~DSPFW_HPLL_SR_MASK;
4007                 reg |= wm & DSPFW_HPLL_SR_MASK;
4008                 I915_WRITE(DSPFW3, reg);
4009
4010                 /* cursor HPLL off SR */
4011                 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
4012                                         pineview_display_hplloff_wm.fifo_size,
4013                                         pixel_size, latency->cursor_hpll_disable);
4014                 reg = I915_READ(DSPFW3);
4015                 reg &= ~DSPFW_HPLL_CURSOR_MASK;
4016                 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
4017                 I915_WRITE(DSPFW3, reg);
4018                 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
4019
4020                 /* activate cxsr */
4021                 I915_WRITE(DSPFW3,
4022                            I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
4023                 DRM_DEBUG_KMS("Self-refresh is enabled\n");
4024         } else {
4025                 pineview_disable_cxsr(dev);
4026                 DRM_DEBUG_KMS("Self-refresh is disabled\n");
4027         }
4028 }
4029
4030 static bool g4x_compute_wm0(struct drm_device *dev,
4031                             int plane,
4032                             const struct intel_watermark_params *display,
4033                             int display_latency_ns,
4034                             const struct intel_watermark_params *cursor,
4035                             int cursor_latency_ns,
4036                             int *plane_wm,
4037                             int *cursor_wm)
4038 {
4039         struct drm_crtc *crtc;
4040         int htotal, hdisplay, clock, pixel_size;
4041         int line_time_us, line_count;
4042         int entries, tlb_miss;
4043
4044         crtc = intel_get_crtc_for_plane(dev, plane);
4045         if (crtc->fb == NULL || !crtc->enabled) {
4046                 *cursor_wm = cursor->guard_size;
4047                 *plane_wm = display->guard_size;
4048                 return false;
4049         }
4050
4051         htotal = crtc->mode.htotal;
4052         hdisplay = crtc->mode.hdisplay;
4053         clock = crtc->mode.clock;
4054         pixel_size = crtc->fb->bits_per_pixel / 8;
4055
4056         /* Use the small buffer method to calculate plane watermark */
4057         entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
4058         tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
4059         if (tlb_miss > 0)
4060                 entries += tlb_miss;
4061         entries = DIV_ROUND_UP(entries, display->cacheline_size);
4062         *plane_wm = entries + display->guard_size;
4063         if (*plane_wm > (int)display->max_wm)
4064                 *plane_wm = display->max_wm;
4065
4066         /* Use the large buffer method to calculate cursor watermark */
4067         line_time_us = ((htotal * 1000) / clock);
4068         line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
4069         entries = line_count * 64 * pixel_size;
4070         tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
4071         if (tlb_miss > 0)
4072                 entries += tlb_miss;
4073         entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4074         *cursor_wm = entries + cursor->guard_size;
4075         if (*cursor_wm > (int)cursor->max_wm)
4076                 *cursor_wm = (int)cursor->max_wm;
4077
4078         return true;
4079 }
4080
4081 /*
4082  * Check the wm result.
4083  *
4084  * If any calculated watermark values is larger than the maximum value that
4085  * can be programmed into the associated watermark register, that watermark
4086  * must be disabled.
4087  */
4088 static bool g4x_check_srwm(struct drm_device *dev,
4089                            int display_wm, int cursor_wm,
4090                            const struct intel_watermark_params *display,
4091                            const struct intel_watermark_params *cursor)
4092 {
4093         DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
4094                       display_wm, cursor_wm);
4095
4096         if (display_wm > display->max_wm) {
4097                 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
4098                               display_wm, display->max_wm);
4099                 return false;
4100         }
4101
4102         if (cursor_wm > cursor->max_wm) {
4103                 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
4104                               cursor_wm, cursor->max_wm);
4105                 return false;
4106         }
4107
4108         if (!(display_wm || cursor_wm)) {
4109                 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
4110                 return false;
4111         }
4112
4113         return true;
4114 }
4115
4116 static bool g4x_compute_srwm(struct drm_device *dev,
4117                              int plane,
4118                              int latency_ns,
4119                              const struct intel_watermark_params *display,
4120                              const struct intel_watermark_params *cursor,
4121                              int *display_wm, int *cursor_wm)
4122 {
4123         struct drm_crtc *crtc;
4124         int hdisplay, htotal, pixel_size, clock;
4125         unsigned long line_time_us;
4126         int line_count, line_size;
4127         int small, large;
4128         int entries;
4129
4130         if (!latency_ns) {
4131                 *display_wm = *cursor_wm = 0;
4132                 return false;
4133         }
4134
4135         crtc = intel_get_crtc_for_plane(dev, plane);
4136         hdisplay = crtc->mode.hdisplay;
4137         htotal = crtc->mode.htotal;
4138         clock = crtc->mode.clock;
4139         pixel_size = crtc->fb->bits_per_pixel / 8;
4140
4141         line_time_us = (htotal * 1000) / clock;
4142         line_count = (latency_ns / line_time_us + 1000) / 1000;
4143         line_size = hdisplay * pixel_size;
4144
4145         /* Use the minimum of the small and large buffer method for primary */
4146         small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4147         large = line_count * line_size;
4148
4149         entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4150         *display_wm = entries + display->guard_size;
4151
4152         /* calculate the self-refresh watermark for display cursor */
4153         entries = line_count * pixel_size * 64;
4154         entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4155         *cursor_wm = entries + cursor->guard_size;
4156
4157         return g4x_check_srwm(dev,
4158                               *display_wm, *cursor_wm,
4159                               display, cursor);
4160 }
4161
4162 #define single_plane_enabled(mask) is_power_of_2(mask)
4163
4164 static void g4x_update_wm(struct drm_device *dev)
4165 {
4166         static const int sr_latency_ns = 12000;
4167         struct drm_i915_private *dev_priv = dev->dev_private;
4168         int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
4169         int plane_sr, cursor_sr;
4170         unsigned int enabled = 0;
4171
4172         if (g4x_compute_wm0(dev, 0,
4173                             &g4x_wm_info, latency_ns,
4174                             &g4x_cursor_wm_info, latency_ns,
4175                             &planea_wm, &cursora_wm))
4176                 enabled |= 1;
4177
4178         if (g4x_compute_wm0(dev, 1,
4179                             &g4x_wm_info, latency_ns,
4180                             &g4x_cursor_wm_info, latency_ns,
4181                             &planeb_wm, &cursorb_wm))
4182                 enabled |= 2;
4183
4184         plane_sr = cursor_sr = 0;
4185         if (single_plane_enabled(enabled) &&
4186             g4x_compute_srwm(dev, ffs(enabled) - 1,
4187                              sr_latency_ns,
4188                              &g4x_wm_info,
4189                              &g4x_cursor_wm_info,
4190                              &plane_sr, &cursor_sr))
4191                 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4192         else
4193                 I915_WRITE(FW_BLC_SELF,
4194                            I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
4195
4196         DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
4197                       planea_wm, cursora_wm,
4198                       planeb_wm, cursorb_wm,
4199                       plane_sr, cursor_sr);
4200
4201         I915_WRITE(DSPFW1,
4202                    (plane_sr << DSPFW_SR_SHIFT) |
4203                    (cursorb_wm << DSPFW_CURSORB_SHIFT) |
4204                    (planeb_wm << DSPFW_PLANEB_SHIFT) |
4205                    planea_wm);
4206         I915_WRITE(DSPFW2,
4207                    (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
4208                    (cursora_wm << DSPFW_CURSORA_SHIFT));
4209         /* HPLL off in SR has some issues on G4x... disable it */
4210         I915_WRITE(DSPFW3,
4211                    (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
4212                    (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4213 }
4214
4215 static void i965_update_wm(struct drm_device *dev)
4216 {
4217         struct drm_i915_private *dev_priv = dev->dev_private;
4218         struct drm_crtc *crtc;
4219         int srwm = 1;
4220         int cursor_sr = 16;
4221
4222         /* Calc sr entries for one plane configs */
4223         crtc = single_enabled_crtc(dev);
4224         if (crtc) {
4225                 /* self-refresh has much higher latency */
4226                 static const int sr_latency_ns = 12000;
4227                 int clock = crtc->mode.clock;
4228                 int htotal = crtc->mode.htotal;
4229                 int hdisplay = crtc->mode.hdisplay;
4230                 int pixel_size = crtc->fb->bits_per_pixel / 8;
4231                 unsigned long line_time_us;
4232                 int entries;
4233
4234                 line_time_us = ((htotal * 1000) / clock);
4235
4236                 /* Use ns/us then divide to preserve precision */
4237                 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4238                         pixel_size * hdisplay;
4239                 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
4240                 srwm = I965_FIFO_SIZE - entries;
4241                 if (srwm < 0)
4242                         srwm = 1;
4243                 srwm &= 0x1ff;
4244                 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
4245                               entries, srwm);
4246
4247                 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4248                         pixel_size * 64;
4249                 entries = DIV_ROUND_UP(entries,
4250                                           i965_cursor_wm_info.cacheline_size);
4251                 cursor_sr = i965_cursor_wm_info.fifo_size -
4252                         (entries + i965_cursor_wm_info.guard_size);
4253
4254                 if (cursor_sr > i965_cursor_wm_info.max_wm)
4255                         cursor_sr = i965_cursor_wm_info.max_wm;
4256
4257                 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
4258                               "cursor %d\n", srwm, cursor_sr);
4259
4260                 if (IS_CRESTLINE(dev))
4261                         I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4262         } else {
4263                 /* Turn off self refresh if both pipes are enabled */
4264                 if (IS_CRESTLINE(dev))
4265                         I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
4266                                    & ~FW_BLC_SELF_EN);
4267         }
4268
4269         DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
4270                       srwm);
4271
4272         /* 965 has limitations... */
4273         I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
4274                    (8 << 16) | (8 << 8) | (8 << 0));
4275         I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
4276         /* update cursor SR watermark */
4277         I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4278 }
4279
4280 static void i9xx_update_wm(struct drm_device *dev)
4281 {
4282         struct drm_i915_private *dev_priv = dev->dev_private;
4283         const struct intel_watermark_params *wm_info;
4284         uint32_t fwater_lo;
4285         uint32_t fwater_hi;
4286         int cwm, srwm = 1;
4287         int fifo_size;
4288         int planea_wm, planeb_wm;
4289         struct drm_crtc *crtc, *enabled = NULL;
4290
4291         if (IS_I945GM(dev))
4292                 wm_info = &i945_wm_info;
4293         else if (!IS_GEN2(dev))
4294                 wm_info = &i915_wm_info;
4295         else
4296                 wm_info = &i855_wm_info;
4297
4298         fifo_size = dev_priv->display.get_fifo_size(dev, 0);
4299         crtc = intel_get_crtc_for_plane(dev, 0);
4300         if (crtc->enabled && crtc->fb) {
4301                 planea_wm = intel_calculate_wm(crtc->mode.clock,
4302                                                wm_info, fifo_size,
4303                                                crtc->fb->bits_per_pixel / 8,
4304                                                latency_ns);
4305                 enabled = crtc;
4306         } else
4307                 planea_wm = fifo_size - wm_info->guard_size;
4308
4309         fifo_size = dev_priv->display.get_fifo_size(dev, 1);
4310         crtc = intel_get_crtc_for_plane(dev, 1);
4311         if (crtc->enabled && crtc->fb) {
4312                 planeb_wm = intel_calculate_wm(crtc->mode.clock,
4313                                                wm_info, fifo_size,
4314                                                crtc->fb->bits_per_pixel / 8,
4315                                                latency_ns);
4316                 if (enabled == NULL)
4317                         enabled = crtc;
4318                 else
4319                         enabled = NULL;
4320         } else
4321                 planeb_wm = fifo_size - wm_info->guard_size;
4322
4323         DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
4324
4325         /*
4326          * Overlay gets an aggressive default since video jitter is bad.
4327          */
4328         cwm = 2;
4329
4330         /* Play safe and disable self-refresh before adjusting watermarks. */
4331         if (IS_I945G(dev) || IS_I945GM(dev))
4332                 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
4333         else if (IS_I915GM(dev))
4334                 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
4335
4336         /* Calc sr entries for one plane configs */
4337         if (HAS_FW_BLC(dev) && enabled) {
4338                 /* self-refresh has much higher latency */
4339                 static const int sr_latency_ns = 6000;
4340                 int clock = enabled->mode.clock;
4341                 int htotal = enabled->mode.htotal;
4342                 int hdisplay = enabled->mode.hdisplay;
4343                 int pixel_size = enabled->fb->bits_per_pixel / 8;
4344                 unsigned long line_time_us;
4345                 int entries;
4346
4347                 line_time_us = (htotal * 1000) / clock;
4348
4349                 /* Use ns/us then divide to preserve precision */
4350                 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4351                         pixel_size * hdisplay;
4352                 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
4353                 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
4354                 srwm = wm_info->fifo_size - entries;
4355                 if (srwm < 0)
4356                         srwm = 1;
4357
4358                 if (IS_I945G(dev) || IS_I945GM(dev))
4359                         I915_WRITE(FW_BLC_SELF,
4360                                    FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
4361                 else if (IS_I915GM(dev))
4362                         I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
4363         }
4364
4365         DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
4366                       planea_wm, planeb_wm, cwm, srwm);
4367
4368         fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
4369         fwater_hi = (cwm & 0x1f);
4370
4371         /* Set request length to 8 cachelines per fetch */
4372         fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
4373         fwater_hi = fwater_hi | (1 << 8);
4374
4375         I915_WRITE(FW_BLC, fwater_lo);
4376         I915_WRITE(FW_BLC2, fwater_hi);
4377
4378         if (HAS_FW_BLC(dev)) {
4379                 if (enabled) {
4380                         if (IS_I945G(dev) || IS_I945GM(dev))
4381                                 I915_WRITE(FW_BLC_SELF,
4382                                            FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
4383                         else if (IS_I915GM(dev))
4384                                 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
4385                         DRM_DEBUG_KMS("memory self refresh enabled\n");
4386                 } else
4387                         DRM_DEBUG_KMS("memory self refresh disabled\n");
4388         }
4389 }
4390
4391 static void i830_update_wm(struct drm_device *dev)
4392 {
4393         struct drm_i915_private *dev_priv = dev->dev_private;
4394         struct drm_crtc *crtc;
4395         uint32_t fwater_lo;
4396         int planea_wm;
4397
4398         crtc = single_enabled_crtc(dev);
4399         if (crtc == NULL)
4400                 return;
4401
4402         planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
4403                                        dev_priv->display.get_fifo_size(dev, 0),
4404                                        crtc->fb->bits_per_pixel / 8,
4405                                        latency_ns);
4406         fwater_lo = I915_READ(FW_BLC) & ~0xfff;
4407         fwater_lo |= (3<<8) | planea_wm;
4408
4409         DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
4410
4411         I915_WRITE(FW_BLC, fwater_lo);
4412 }
4413
4414 #define ILK_LP0_PLANE_LATENCY           700
4415 #define ILK_LP0_CURSOR_LATENCY          1300
4416
4417 /*
4418  * Check the wm result.
4419  *
4420  * If any calculated watermark values is larger than the maximum value that
4421  * can be programmed into the associated watermark register, that watermark
4422  * must be disabled.
4423  */
4424 static bool ironlake_check_srwm(struct drm_device *dev, int level,
4425                                 int fbc_wm, int display_wm, int cursor_wm,
4426                                 const struct intel_watermark_params *display,
4427                                 const struct intel_watermark_params *cursor)
4428 {
4429         struct drm_i915_private *dev_priv = dev->dev_private;
4430
4431         DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
4432                       " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
4433
4434         if (fbc_wm > SNB_FBC_MAX_SRWM) {
4435                 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
4436                               fbc_wm, SNB_FBC_MAX_SRWM, level);
4437
4438                 /* fbc has it's own way to disable FBC WM */
4439                 I915_WRITE(DISP_ARB_CTL,
4440                            I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
4441                 return false;
4442         }
4443
4444         if (display_wm > display->max_wm) {
4445                 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
4446                               display_wm, SNB_DISPLAY_MAX_SRWM, level);
4447                 return false;
4448         }
4449
4450         if (cursor_wm > cursor->max_wm) {
4451                 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
4452                               cursor_wm, SNB_CURSOR_MAX_SRWM, level);
4453                 return false;
4454         }
4455
4456         if (!(fbc_wm || display_wm || cursor_wm)) {
4457                 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
4458                 return false;
4459         }
4460
4461         return true;
4462 }
4463
4464 /*
4465  * Compute watermark values of WM[1-3],
4466  */
4467 static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
4468                                   int latency_ns,
4469                                   const struct intel_watermark_params *display,
4470                                   const struct intel_watermark_params *cursor,
4471                                   int *fbc_wm, int *display_wm, int *cursor_wm)
4472 {
4473         struct drm_crtc *crtc;
4474         unsigned long line_time_us;
4475         int hdisplay, htotal, pixel_size, clock;
4476         int line_count, line_size;
4477         int small, large;
4478         int entries;
4479
4480         if (!latency_ns) {
4481                 *fbc_wm = *display_wm = *cursor_wm = 0;
4482                 return false;
4483         }
4484
4485         crtc = intel_get_crtc_for_plane(dev, plane);
4486         hdisplay = crtc->mode.hdisplay;
4487         htotal = crtc->mode.htotal;
4488         clock = crtc->mode.clock;
4489         pixel_size = crtc->fb->bits_per_pixel / 8;
4490
4491         line_time_us = (htotal * 1000) / clock;
4492         line_count = (latency_ns / line_time_us + 1000) / 1000;
4493         line_size = hdisplay * pixel_size;
4494
4495         /* Use the minimum of the small and large buffer method for primary */
4496         small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4497         large = line_count * line_size;
4498
4499         entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4500         *display_wm = entries + display->guard_size;
4501
4502         /*
4503          * Spec says:
4504          * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
4505          */
4506         *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
4507
4508         /* calculate the self-refresh watermark for display cursor */
4509         entries = line_count * pixel_size * 64;
4510         entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4511         *cursor_wm = entries + cursor->guard_size;
4512
4513         return ironlake_check_srwm(dev, level,
4514                                    *fbc_wm, *display_wm, *cursor_wm,
4515                                    display, cursor);
4516 }
4517
4518 static void ironlake_update_wm(struct drm_device *dev)
4519 {
4520         struct drm_i915_private *dev_priv = dev->dev_private;
4521         int fbc_wm, plane_wm, cursor_wm;
4522         unsigned int enabled;
4523
4524         enabled = 0;
4525         if (g4x_compute_wm0(dev, 0,
4526                             &ironlake_display_wm_info,
4527                             ILK_LP0_PLANE_LATENCY,
4528                             &ironlake_cursor_wm_info,
4529                             ILK_LP0_CURSOR_LATENCY,
4530                             &plane_wm, &cursor_wm)) {
4531                 I915_WRITE(WM0_PIPEA_ILK,
4532                            (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4533                 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4534                               " plane %d, " "cursor: %d\n",
4535                               plane_wm, cursor_wm);
4536                 enabled |= 1;
4537         }
4538
4539         if (g4x_compute_wm0(dev, 1,
4540                             &ironlake_display_wm_info,
4541                             ILK_LP0_PLANE_LATENCY,
4542                             &ironlake_cursor_wm_info,
4543                             ILK_LP0_CURSOR_LATENCY,
4544                             &plane_wm, &cursor_wm)) {
4545                 I915_WRITE(WM0_PIPEB_ILK,
4546                            (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4547                 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4548                               " plane %d, cursor: %d\n",
4549                               plane_wm, cursor_wm);
4550                 enabled |= 2;
4551         }
4552
4553         /*
4554          * Calculate and update the self-refresh watermark only when one
4555          * display plane is used.
4556          */
4557         I915_WRITE(WM3_LP_ILK, 0);
4558         I915_WRITE(WM2_LP_ILK, 0);
4559         I915_WRITE(WM1_LP_ILK, 0);
4560
4561         if (!single_plane_enabled(enabled))
4562                 return;
4563         enabled = ffs(enabled) - 1;
4564
4565         /* WM1 */
4566         if (!ironlake_compute_srwm(dev, 1, enabled,
4567                                    ILK_READ_WM1_LATENCY() * 500,
4568                                    &ironlake_display_srwm_info,
4569                                    &ironlake_cursor_srwm_info,
4570                                    &fbc_wm, &plane_wm, &cursor_wm))
4571                 return;
4572
4573         I915_WRITE(WM1_LP_ILK,
4574                    WM1_LP_SR_EN |
4575                    (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4576                    (fbc_wm << WM1_LP_FBC_SHIFT) |
4577                    (plane_wm << WM1_LP_SR_SHIFT) |
4578                    cursor_wm);
4579
4580         /* WM2 */
4581         if (!ironlake_compute_srwm(dev, 2, enabled,
4582                                    ILK_READ_WM2_LATENCY() * 500,
4583                                    &ironlake_display_srwm_info,
4584                                    &ironlake_cursor_srwm_info,
4585                                    &fbc_wm, &plane_wm, &cursor_wm))
4586                 return;
4587
4588         I915_WRITE(WM2_LP_ILK,
4589                    WM2_LP_EN |
4590                    (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4591                    (fbc_wm << WM1_LP_FBC_SHIFT) |
4592                    (plane_wm << WM1_LP_SR_SHIFT) |
4593                    cursor_wm);
4594
4595         /*
4596          * WM3 is unsupported on ILK, probably because we don't have latency
4597          * data for that power state
4598          */
4599 }
4600
4601 void sandybridge_update_wm(struct drm_device *dev)
4602 {
4603         struct drm_i915_private *dev_priv = dev->dev_private;
4604         int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
4605         u32 val;
4606         int fbc_wm, plane_wm, cursor_wm;
4607         unsigned int enabled;
4608
4609         enabled = 0;
4610         if (g4x_compute_wm0(dev, 0,
4611                             &sandybridge_display_wm_info, latency,
4612                             &sandybridge_cursor_wm_info, latency,
4613                             &plane_wm, &cursor_wm)) {
4614                 val = I915_READ(WM0_PIPEA_ILK);
4615                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4616                 I915_WRITE(WM0_PIPEA_ILK, val |
4617                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4618                 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4619                               " plane %d, " "cursor: %d\n",
4620                               plane_wm, cursor_wm);
4621                 enabled |= 1;
4622         }
4623
4624         if (g4x_compute_wm0(dev, 1,
4625                             &sandybridge_display_wm_info, latency,
4626                             &sandybridge_cursor_wm_info, latency,
4627                             &plane_wm, &cursor_wm)) {
4628                 val = I915_READ(WM0_PIPEB_ILK);
4629                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4630                 I915_WRITE(WM0_PIPEB_ILK, val |
4631                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4632                 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4633                               " plane %d, cursor: %d\n",
4634                               plane_wm, cursor_wm);
4635                 enabled |= 2;
4636         }
4637
4638         /* IVB has 3 pipes */
4639         if (IS_IVYBRIDGE(dev) &&
4640             g4x_compute_wm0(dev, 2,
4641                             &sandybridge_display_wm_info, latency,
4642                             &sandybridge_cursor_wm_info, latency,
4643                             &plane_wm, &cursor_wm)) {
4644                 val = I915_READ(WM0_PIPEC_IVB);
4645                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4646                 I915_WRITE(WM0_PIPEC_IVB, val |
4647                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4648                 DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
4649                               " plane %d, cursor: %d\n",
4650                               plane_wm, cursor_wm);
4651                 enabled |= 3;
4652         }
4653
4654         /*
4655          * Calculate and update the self-refresh watermark only when one
4656          * display plane is used.
4657          *
4658          * SNB support 3 levels of watermark.
4659          *
4660          * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
4661          * and disabled in the descending order
4662          *
4663          */
4664         I915_WRITE(WM3_LP_ILK, 0);
4665         I915_WRITE(WM2_LP_ILK, 0);
4666         I915_WRITE(WM1_LP_ILK, 0);
4667
4668         if (!single_plane_enabled(enabled) ||
4669             dev_priv->sprite_scaling_enabled)
4670                 return;
4671         enabled = ffs(enabled) - 1;
4672
4673         /* WM1 */
4674         if (!ironlake_compute_srwm(dev, 1, enabled,
4675                                    SNB_READ_WM1_LATENCY() * 500,
4676                                    &sandybridge_display_srwm_info,
4677                                    &sandybridge_cursor_srwm_info,
4678                                    &fbc_wm, &plane_wm, &cursor_wm))
4679                 return;
4680
4681         I915_WRITE(WM1_LP_ILK,
4682                    WM1_LP_SR_EN |
4683                    (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4684                    (fbc_wm << WM1_LP_FBC_SHIFT) |
4685                    (plane_wm << WM1_LP_SR_SHIFT) |
4686                    cursor_wm);
4687
4688         /* WM2 */
4689         if (!ironlake_compute_srwm(dev, 2, enabled,
4690                                    SNB_READ_WM2_LATENCY() * 500,
4691                                    &sandybridge_display_srwm_info,
4692                                    &sandybridge_cursor_srwm_info,
4693                                    &fbc_wm, &plane_wm, &cursor_wm))
4694                 return;
4695
4696         I915_WRITE(WM2_LP_ILK,
4697                    WM2_LP_EN |
4698                    (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4699                    (fbc_wm << WM1_LP_FBC_SHIFT) |
4700                    (plane_wm << WM1_LP_SR_SHIFT) |
4701                    cursor_wm);
4702
4703         /* WM3 */
4704         if (!ironlake_compute_srwm(dev, 3, enabled,
4705                                    SNB_READ_WM3_LATENCY() * 500,
4706                                    &sandybridge_display_srwm_info,
4707                                    &sandybridge_cursor_srwm_info,
4708                                    &fbc_wm, &plane_wm, &cursor_wm))
4709                 return;
4710
4711         I915_WRITE(WM3_LP_ILK,
4712                    WM3_LP_EN |
4713                    (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4714                    (fbc_wm << WM1_LP_FBC_SHIFT) |
4715                    (plane_wm << WM1_LP_SR_SHIFT) |
4716                    cursor_wm);
4717 }
4718
4719 static bool
4720 sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
4721                               uint32_t sprite_width, int pixel_size,
4722                               const struct intel_watermark_params *display,
4723                               int display_latency_ns, int *sprite_wm)
4724 {
4725         struct drm_crtc *crtc;
4726         int clock;
4727         int entries, tlb_miss;
4728
4729         crtc = intel_get_crtc_for_plane(dev, plane);
4730         if (crtc->fb == NULL || !crtc->enabled) {
4731                 *sprite_wm = display->guard_size;
4732                 return false;
4733         }
4734
4735         clock = crtc->mode.clock;
4736
4737         /* Use the small buffer method to calculate the sprite watermark */
4738         entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
4739         tlb_miss = display->fifo_size*display->cacheline_size -
4740                 sprite_width * 8;
4741         if (tlb_miss > 0)
4742                 entries += tlb_miss;
4743         entries = DIV_ROUND_UP(entries, display->cacheline_size);
4744         *sprite_wm = entries + display->guard_size;
4745         if (*sprite_wm > (int)display->max_wm)
4746                 *sprite_wm = display->max_wm;
4747
4748         return true;
4749 }
4750
4751 static bool
4752 sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
4753                                 uint32_t sprite_width, int pixel_size,
4754                                 const struct intel_watermark_params *display,
4755                                 int latency_ns, int *sprite_wm)
4756 {
4757         struct drm_crtc *crtc;
4758         unsigned long line_time_us;
4759         int clock;
4760         int line_count, line_size;
4761         int small, large;
4762         int entries;
4763
4764         if (!latency_ns) {
4765                 *sprite_wm = 0;
4766                 return false;
4767         }
4768
4769         crtc = intel_get_crtc_for_plane(dev, plane);
4770         clock = crtc->mode.clock;
4771         if (!clock) {
4772                 *sprite_wm = 0;
4773                 return false;
4774         }
4775
4776         line_time_us = (sprite_width * 1000) / clock;
4777         if (!line_time_us) {
4778                 *sprite_wm = 0;
4779                 return false;
4780         }
4781
4782         line_count = (latency_ns / line_time_us + 1000) / 1000;
4783         line_size = sprite_width * pixel_size;
4784
4785         /* Use the minimum of the small and large buffer method for primary */
4786         small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4787         large = line_count * line_size;
4788
4789         entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4790         *sprite_wm = entries + display->guard_size;
4791
4792         return *sprite_wm > 0x3ff ? false : true;
4793 }
4794
4795 static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
4796                                          uint32_t sprite_width, int pixel_size)
4797 {
4798         struct drm_i915_private *dev_priv = dev->dev_private;
4799         int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
4800         u32 val;
4801         int sprite_wm, reg;
4802         int ret;
4803
4804         switch (pipe) {
4805         case 0:
4806                 reg = WM0_PIPEA_ILK;
4807                 break;
4808         case 1:
4809                 reg = WM0_PIPEB_ILK;
4810                 break;
4811         case 2:
4812                 reg = WM0_PIPEC_IVB;
4813                 break;
4814         default:
4815                 return; /* bad pipe */
4816         }
4817
4818         ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
4819                                             &sandybridge_display_wm_info,
4820                                             latency, &sprite_wm);
4821         if (!ret) {
4822                 DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
4823                               pipe);
4824                 return;
4825         }
4826
4827         val = I915_READ(reg);
4828         val &= ~WM0_PIPE_SPRITE_MASK;
4829         I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
4830         DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
4831
4832
4833         ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4834                                               pixel_size,
4835                                               &sandybridge_display_srwm_info,
4836                                               SNB_READ_WM1_LATENCY() * 500,
4837                                               &sprite_wm);
4838         if (!ret) {
4839                 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
4840                               pipe);
4841                 return;
4842         }
4843         I915_WRITE(WM1S_LP_ILK, sprite_wm);
4844
4845         /* Only IVB has two more LP watermarks for sprite */
4846         if (!IS_IVYBRIDGE(dev))
4847                 return;
4848
4849         ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4850                                               pixel_size,
4851                                               &sandybridge_display_srwm_info,
4852                                               SNB_READ_WM2_LATENCY() * 500,
4853                                               &sprite_wm);
4854         if (!ret) {
4855                 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
4856                               pipe);
4857                 return;
4858         }
4859         I915_WRITE(WM2S_LP_IVB, sprite_wm);
4860
4861         ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4862                                               pixel_size,
4863                                               &sandybridge_display_srwm_info,
4864                                               SNB_READ_WM3_LATENCY() * 500,
4865                                               &sprite_wm);
4866         if (!ret) {
4867                 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
4868                               pipe);
4869                 return;
4870         }
4871         I915_WRITE(WM3S_LP_IVB, sprite_wm);
4872 }
4873
4874 /**
4875  * intel_update_watermarks - update FIFO watermark values based on current modes
4876  *
4877  * Calculate watermark values for the various WM regs based on current mode
4878  * and plane configuration.
4879  *
4880  * There are several cases to deal with here:
4881  *   - normal (i.e. non-self-refresh)
4882  *   - self-refresh (SR) mode
4883  *   - lines are large relative to FIFO size (buffer can hold up to 2)
4884  *   - lines are small relative to FIFO size (buffer can hold more than 2
4885  *     lines), so need to account for TLB latency
4886  *
4887  *   The normal calculation is:
4888  *     watermark = dotclock * bytes per pixel * latency
4889  *   where latency is platform & configuration dependent (we assume pessimal
4890  *   values here).
4891  *
4892  *   The SR calculation is:
4893  *     watermark = (trunc(latency/line time)+1) * surface width *
4894  *       bytes per pixel
4895  *   where
4896  *     line time = htotal / dotclock
4897  *     surface width = hdisplay for normal plane and 64 for cursor
4898  *   and latency is assumed to be high, as above.
4899  *
4900  * The final value programmed to the register should always be rounded up,
4901  * and include an extra 2 entries to account for clock crossings.
4902  *
4903  * We don't use the sprite, so we can ignore that.  And on Crestline we have
4904  * to set the non-SR watermarks to 8.
4905  */
4906 static void intel_update_watermarks(struct drm_device *dev)
4907 {
4908         struct drm_i915_private *dev_priv = dev->dev_private;
4909
4910         if (dev_priv->display.update_wm)
4911                 dev_priv->display.update_wm(dev);
4912 }
4913
4914 void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
4915                                     uint32_t sprite_width, int pixel_size)
4916 {
4917         struct drm_i915_private *dev_priv = dev->dev_private;
4918
4919         if (dev_priv->display.update_sprite_wm)
4920                 dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
4921                                                    pixel_size);
4922 }
4923
4924 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4925 {
4926         if (i915_panel_use_ssc >= 0)
4927                 return i915_panel_use_ssc != 0;
4928         return dev_priv->lvds_use_ssc
4929                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4930 }
4931
4932 /**
4933  * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
4934  * @crtc: CRTC structure
4935  * @mode: requested mode
4936  *
4937  * A pipe may be connected to one or more outputs.  Based on the depth of the
4938  * attached framebuffer, choose a good color depth to use on the pipe.
4939  *
4940  * If possible, match the pipe depth to the fb depth.  In some cases, this
4941  * isn't ideal, because the connected output supports a lesser or restricted
4942  * set of depths.  Resolve that here:
4943  *    LVDS typically supports only 6bpc, so clamp down in that case
4944  *    HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
4945  *    Displays may support a restricted set as well, check EDID and clamp as
4946  *      appropriate.
4947  *    DP may want to dither down to 6bpc to fit larger modes
4948  *
4949  * RETURNS:
4950  * Dithering requirement (i.e. false if display bpc and pipe bpc match,
4951  * true if they don't match).
4952  */
4953 static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4954                                          unsigned int *pipe_bpp,
4955                                          struct drm_display_mode *mode)
4956 {
4957         struct drm_device *dev = crtc->dev;
4958         struct drm_i915_private *dev_priv = dev->dev_private;
4959         struct drm_encoder *encoder;
4960         struct drm_connector *connector;
4961         unsigned int display_bpc = UINT_MAX, bpc;
4962
4963         /* Walk the encoders & connectors on this crtc, get min bpc */
4964         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
4965                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4966
4967                 if (encoder->crtc != crtc)
4968                         continue;
4969
4970                 if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
4971                         unsigned int lvds_bpc;
4972
4973                         if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
4974                             LVDS_A3_POWER_UP)
4975                                 lvds_bpc = 8;
4976                         else
4977                                 lvds_bpc = 6;
4978
4979                         if (lvds_bpc < display_bpc) {
4980                                 DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
4981                                 display_bpc = lvds_bpc;
4982                         }
4983                         continue;
4984                 }
4985
4986                 /* Not one of the known troublemakers, check the EDID */
4987                 list_for_each_entry(connector, &dev->mode_config.connector_list,
4988                                     head) {
4989                         if (connector->encoder != encoder)
4990                                 continue;
4991
4992                         /* Don't use an invalid EDID bpc value */
4993                         if (connector->display_info.bpc &&
4994                             connector->display_info.bpc < display_bpc) {
4995                                 DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
4996                                 display_bpc = connector->display_info.bpc;
4997                         }
4998                 }
4999
5000                 if (intel_encoder->type == INTEL_OUTPUT_EDP) {
5001                         /* Use VBT settings if we have an eDP panel */
5002                         unsigned int edp_bpc = dev_priv->edp.bpp / 3;
5003
5004                         if (edp_bpc && edp_bpc < display_bpc) {
5005                                 DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
5006                                 display_bpc = edp_bpc;
5007                         }
5008                         continue;
5009                 }
5010
5011                 /*
5012                  * HDMI is either 12 or 8, so if the display lets 10bpc sneak
5013                  * through, clamp it down.  (Note: >12bpc will be caught below.)
5014                  */
5015                 if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
5016                         if (display_bpc > 8 && display_bpc < 12) {
5017                                 DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
5018                                 display_bpc = 12;
5019                         } else {
5020                                 DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
5021                                 display_bpc = 8;
5022                         }
5023                 }
5024         }
5025
5026         if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
5027                 DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
5028                 display_bpc = 6;
5029         }
5030
5031         /*
5032          * We could just drive the pipe at the highest bpc all the time and
5033          * enable dithering as needed, but that costs bandwidth.  So choose
5034          * the minimum value that expresses the full color range of the fb but
5035          * also stays within the max display bpc discovered above.
5036          */
5037
5038         switch (crtc->fb->depth) {
5039         case 8:
5040                 bpc = 8; /* since we go through a colormap */
5041                 break;
5042         case 15:
5043         case 16:
5044                 bpc = 6; /* min is 18bpp */
5045                 break;
5046         case 24:
5047                 bpc = 8;
5048                 break;
5049         case 30:
5050                 bpc = 10;
5051                 break;
5052         case 48:
5053                 bpc = 12;
5054                 break;
5055         default:
5056                 DRM_DEBUG("unsupported depth, assuming 24 bits\n");
5057                 bpc = min((unsigned int)8, display_bpc);
5058                 break;
5059         }
5060
5061         display_bpc = min(display_bpc, bpc);
5062
5063         DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
5064                       bpc, display_bpc);
5065
5066         *pipe_bpp = display_bpc * 3;
5067
5068         return display_bpc != bpc;
5069 }
5070
5071 static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
5072 {
5073         struct drm_device *dev = crtc->dev;
5074         struct drm_i915_private *dev_priv = dev->dev_private;
5075         int refclk;
5076
5077         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5078             intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5079                 refclk = dev_priv->lvds_ssc_freq * 1000;
5080                 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
5081                               refclk / 1000);
5082         } else if (!IS_GEN2(dev)) {
5083                 refclk = 96000;
5084         } else {
5085                 refclk = 48000;
5086         }
5087
5088         return refclk;
5089 }
5090
5091 static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode,
5092                                       intel_clock_t *clock)
5093 {
5094         /* SDVO TV has fixed PLL values depend on its clock range,
5095            this mirrors vbios setting. */
5096         if (adjusted_mode->clock >= 100000
5097             && adjusted_mode->clock < 140500) {
5098                 clock->p1 = 2;
5099                 clock->p2 = 10;
5100                 clock->n = 3;
5101                 clock->m1 = 16;
5102                 clock->m2 = 8;
5103         } else if (adjusted_mode->clock >= 140500
5104                    && adjusted_mode->clock <= 200000) {
5105                 clock->p1 = 1;
5106                 clock->p2 = 10;
5107                 clock->n = 6;
5108                 clock->m1 = 12;
5109                 clock->m2 = 8;
5110         }
5111 }
5112
5113 static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
5114                                      intel_clock_t *clock,
5115                                      intel_clock_t *reduced_clock)
5116 {
5117         struct drm_device *dev = crtc->dev;
5118         struct drm_i915_private *dev_priv = dev->dev_private;
5119         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5120         int pipe = intel_crtc->pipe;
5121         u32 fp, fp2 = 0;
5122
5123         if (IS_PINEVIEW(dev)) {
5124                 fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
5125                 if (reduced_clock)
5126                         fp2 = (1 << reduced_clock->n) << 16 |
5127                                 reduced_clock->m1 << 8 | reduced_clock->m2;
5128         } else {
5129                 fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
5130                 if (reduced_clock)
5131                         fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
5132                                 reduced_clock->m2;
5133         }
5134
5135         I915_WRITE(FP0(pipe), fp);
5136
5137         intel_crtc->lowfreq_avail = false;
5138         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5139             reduced_clock && i915_powersave) {
5140                 I915_WRITE(FP1(pipe), fp2);
5141                 intel_crtc->lowfreq_avail = true;
5142         } else {
5143                 I915_WRITE(FP1(pipe), fp);
5144         }
5145 }
5146
5147 static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5148                               struct drm_display_mode *mode,
5149                               struct drm_display_mode *adjusted_mode,
5150                               int x, int y,
5151                               struct drm_framebuffer *old_fb)
5152 {
5153         struct drm_device *dev = crtc->dev;
5154         struct drm_i915_private *dev_priv = dev->dev_private;
5155         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5156         int pipe = intel_crtc->pipe;
5157         int plane = intel_crtc->plane;
5158         int refclk, num_connectors = 0;
5159         intel_clock_t clock, reduced_clock;
5160         u32 dpll, dspcntr, pipeconf, vsyncshift;
5161         bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
5162         bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
5163         struct drm_mode_config *mode_config = &dev->mode_config;
5164         struct intel_encoder *encoder;
5165         const intel_limit_t *limit;
5166         int ret;
5167         u32 temp;
5168         u32 lvds_sync = 0;
5169
5170         list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5171                 if (encoder->base.crtc != crtc)
5172                         continue;
5173
5174                 switch (encoder->type) {
5175                 case INTEL_OUTPUT_LVDS:
5176                         is_lvds = true;
5177                         break;
5178                 case INTEL_OUTPUT_SDVO:
5179                 case INTEL_OUTPUT_HDMI:
5180                         is_sdvo = true;
5181                         if (encoder->needs_tv_clock)
5182                                 is_tv = true;
5183                         break;
5184                 case INTEL_OUTPUT_DVO:
5185                         is_dvo = true;
5186                         break;
5187                 case INTEL_OUTPUT_TVOUT:
5188                         is_tv = true;
5189                         break;
5190                 case INTEL_OUTPUT_ANALOG:
5191                         is_crt = true;
5192                         break;
5193                 case INTEL_OUTPUT_DISPLAYPORT:
5194                         is_dp = true;
5195                         break;
5196                 }
5197
5198                 num_connectors++;
5199         }
5200
5201         refclk = i9xx_get_refclk(crtc, num_connectors);
5202
5203         /*
5204          * Returns a set of divisors for the desired target clock with the given
5205          * refclk, or FALSE.  The returned values represent the clock equation:
5206          * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5207          */
5208         limit = intel_limit(crtc, refclk);
5209         ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
5210                              &clock);
5211         if (!ok) {
5212                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
5213                 return -EINVAL;
5214         }
5215
5216         /* Ensure that the cursor is valid for the new mode before changing... */
5217         intel_crtc_update_cursor(crtc, true);
5218
5219         if (is_lvds && dev_priv->lvds_downclock_avail) {
5220                 /*
5221                  * Ensure we match the reduced clock's P to the target clock.
5222                  * If the clocks don't match, we can't switch the display clock
5223                  * by using the FP0/FP1. In such case we will disable the LVDS
5224                  * downclock feature.
5225                 */
5226                 has_reduced_clock = limit->find_pll(limit, crtc,
5227                                                     dev_priv->lvds_downclock,
5228                                                     refclk,
5229                                                     &clock,
5230                                                     &reduced_clock);
5231         }
5232
5233         if (is_sdvo && is_tv)
5234                 i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
5235
5236         i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
5237                                  &reduced_clock : NULL);
5238
5239         dpll = DPLL_VGA_MODE_DIS;
5240
5241         if (!IS_GEN2(dev)) {
5242                 if (is_lvds)
5243                         dpll |= DPLLB_MODE_LVDS;
5244                 else
5245                         dpll |= DPLLB_MODE_DAC_SERIAL;
5246                 if (is_sdvo) {
5247                         int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5248                         if (pixel_multiplier > 1) {
5249                                 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
5250                                         dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
5251                         }
5252                         dpll |= DPLL_DVO_HIGH_SPEED;
5253                 }
5254                 if (is_dp)
5255                         dpll |= DPLL_DVO_HIGH_SPEED;
5256
5257                 /* compute bitmask from p1 value */
5258                 if (IS_PINEVIEW(dev))
5259                         dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
5260                 else {
5261                         dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5262                         if (IS_G4X(dev) && has_reduced_clock)
5263                                 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5264                 }
5265                 switch (clock.p2) {
5266                 case 5:
5267                         dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5268                         break;
5269                 case 7:
5270                         dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5271                         break;
5272                 case 10:
5273                         dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5274                         break;
5275                 case 14:
5276                         dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5277                         break;
5278                 }
5279                 if (INTEL_INFO(dev)->gen >= 4)
5280                         dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
5281         } else {
5282                 if (is_lvds) {
5283                         dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5284                 } else {
5285                         if (clock.p1 == 2)
5286                                 dpll |= PLL_P1_DIVIDE_BY_TWO;
5287                         else
5288                                 dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5289                         if (clock.p2 == 4)
5290                                 dpll |= PLL_P2_DIVIDE_BY_4;
5291                 }
5292         }
5293
5294         if (is_sdvo && is_tv)
5295                 dpll |= PLL_REF_INPUT_TVCLKINBC;
5296         else if (is_tv)
5297                 /* XXX: just matching BIOS for now */
5298                 /*      dpll |= PLL_REF_INPUT_TVCLKINBC; */
5299                 dpll |= 3;
5300         else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5301                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5302         else
5303                 dpll |= PLL_REF_INPUT_DREFCLK;
5304
5305         /* setup pipeconf */
5306         pipeconf = I915_READ(PIPECONF(pipe));
5307
5308         /* Set up the display plane register */
5309         dspcntr = DISPPLANE_GAMMA_ENABLE;
5310
5311         if (pipe == 0)
5312                 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
5313         else
5314                 dspcntr |= DISPPLANE_SEL_PIPE_B;
5315
5316         if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
5317                 /* Enable pixel doubling when the dot clock is > 90% of the (display)
5318                  * core speed.
5319                  *
5320                  * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
5321                  * pipe == 0 check?
5322                  */
5323                 if (mode->clock >
5324                     dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
5325                         pipeconf |= PIPECONF_DOUBLE_WIDE;
5326                 else
5327                         pipeconf &= ~PIPECONF_DOUBLE_WIDE;
5328         }
5329
5330         /* default to 8bpc */
5331         pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
5332         if (is_dp) {
5333                 if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
5334                         pipeconf |= PIPECONF_BPP_6 |
5335                                     PIPECONF_DITHER_EN |
5336                                     PIPECONF_DITHER_TYPE_SP;
5337                 }
5338         }
5339
5340         dpll |= DPLL_VCO_ENABLE;
5341
5342         DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
5343         drm_mode_debug_printmodeline(mode);
5344
5345         I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5346
5347         POSTING_READ(DPLL(pipe));
5348         udelay(150);
5349
5350         /* The LVDS pin pair needs to be on before the DPLLs are enabled.
5351          * This is an exception to the general rule that mode_set doesn't turn
5352          * things on.
5353          */
5354         if (is_lvds) {
5355                 temp = I915_READ(LVDS);
5356                 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5357                 if (pipe == 1) {
5358                         temp |= LVDS_PIPEB_SELECT;
5359                 } else {
5360                         temp &= ~LVDS_PIPEB_SELECT;
5361                 }
5362                 /* set the corresponsding LVDS_BORDER bit */
5363                 temp |= dev_priv->lvds_border_bits;
5364                 /* Set the B0-B3 data pairs corresponding to whether we're going to
5365                  * set the DPLLs for dual-channel mode or not.
5366                  */
5367                 if (clock.p2 == 7)
5368                         temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5369                 else
5370                         temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
5371
5372                 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
5373                  * appropriately here, but we need to look more thoroughly into how
5374                  * panels behave in the two modes.
5375                  */
5376                 /* set the dithering flag on LVDS as needed */
5377                 if (INTEL_INFO(dev)->gen >= 4) {
5378                         if (dev_priv->lvds_dither)
5379                                 temp |= LVDS_ENABLE_DITHER;
5380                         else
5381                                 temp &= ~LVDS_ENABLE_DITHER;
5382                 }
5383                 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5384                         lvds_sync |= LVDS_HSYNC_POLARITY;
5385                 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5386                         lvds_sync |= LVDS_VSYNC_POLARITY;
5387                 if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
5388                     != lvds_sync) {
5389                         char flags[2] = "-+";
5390                         DRM_INFO("Changing LVDS panel from "
5391                                  "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
5392                                  flags[!(temp & LVDS_HSYNC_POLARITY)],
5393                                  flags[!(temp & LVDS_VSYNC_POLARITY)],
5394                                  flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5395                                  flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5396                         temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5397                         temp |= lvds_sync;
5398                 }
5399                 I915_WRITE(LVDS, temp);
5400         }
5401
5402         if (is_dp) {
5403                 intel_dp_set_m_n(crtc, mode, adjusted_mode);
5404         }
5405
5406         I915_WRITE(DPLL(pipe), dpll);
5407
5408         /* Wait for the clocks to stabilize. */
5409         POSTING_READ(DPLL(pipe));
5410         udelay(150);
5411
5412         if (INTEL_INFO(dev)->gen >= 4) {
5413                 temp = 0;
5414                 if (is_sdvo) {
5415                         temp = intel_mode_get_pixel_multiplier(adjusted_mode);
5416                         if (temp > 1)
5417                                 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5418                         else
5419                                 temp = 0;
5420                 }
5421                 I915_WRITE(DPLL_MD(pipe), temp);
5422         } else {
5423                 /* The pixel multiplier can only be updated once the
5424                  * DPLL is enabled and the clocks are stable.
5425                  *
5426                  * So write it again.
5427                  */
5428                 I915_WRITE(DPLL(pipe), dpll);
5429         }
5430
5431         if (HAS_PIPE_CXSR(dev)) {
5432                 if (intel_crtc->lowfreq_avail) {
5433                         DRM_DEBUG_KMS("enabling CxSR downclocking\n");
5434                         pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
5435                 } else {
5436                         DRM_DEBUG_KMS("disabling CxSR downclocking\n");
5437                         pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
5438                 }
5439         }
5440
5441         pipeconf &= ~PIPECONF_INTERLACE_MASK;
5442         if (!IS_GEN2(dev) &&
5443             adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5444                 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5445                 /* the chip adds 2 halflines automatically */
5446                 adjusted_mode->crtc_vtotal -= 1;
5447                 adjusted_mode->crtc_vblank_end -= 1;
5448                 vsyncshift = adjusted_mode->crtc_hsync_start
5449                              - adjusted_mode->crtc_htotal/2;
5450         } else {
5451                 pipeconf |= PIPECONF_PROGRESSIVE;
5452                 vsyncshift = 0;
5453         }
5454
5455         if (!IS_GEN3(dev))
5456                 I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
5457
5458         I915_WRITE(HTOTAL(pipe),
5459                    (adjusted_mode->crtc_hdisplay - 1) |
5460                    ((adjusted_mode->crtc_htotal - 1) << 16));
5461         I915_WRITE(HBLANK(pipe),
5462                    (adjusted_mode->crtc_hblank_start - 1) |
5463                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
5464         I915_WRITE(HSYNC(pipe),
5465                    (adjusted_mode->crtc_hsync_start - 1) |
5466                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
5467
5468         I915_WRITE(VTOTAL(pipe),
5469                    (adjusted_mode->crtc_vdisplay - 1) |
5470                    ((adjusted_mode->crtc_vtotal - 1) << 16));
5471         I915_WRITE(VBLANK(pipe),
5472                    (adjusted_mode->crtc_vblank_start - 1) |
5473                    ((adjusted_mode->crtc_vblank_end - 1) << 16));
5474         I915_WRITE(VSYNC(pipe),
5475                    (adjusted_mode->crtc_vsync_start - 1) |
5476                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
5477
5478         /* pipesrc and dspsize control the size that is scaled from,
5479          * which should always be the user's requested size.
5480          */
5481         I915_WRITE(DSPSIZE(plane),
5482                    ((mode->vdisplay - 1) << 16) |
5483                    (mode->hdisplay - 1));
5484         I915_WRITE(DSPPOS(plane), 0);
5485         I915_WRITE(PIPESRC(pipe),
5486                    ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
5487
5488         I915_WRITE(PIPECONF(pipe), pipeconf);
5489         POSTING_READ(PIPECONF(pipe));
5490         intel_enable_pipe(dev_priv, pipe, false);
5491
5492         intel_wait_for_vblank(dev, pipe);
5493
5494         I915_WRITE(DSPCNTR(plane), dspcntr);
5495         POSTING_READ(DSPCNTR(plane));
5496         intel_enable_plane(dev_priv, plane, pipe);
5497
5498         ret = intel_pipe_set_base(crtc, x, y, old_fb);
5499
5500         intel_update_watermarks(dev);
5501
5502         return ret;
5503 }
5504
5505 /*
5506  * Initialize reference clocks when the driver loads
5507  */
5508 void ironlake_init_pch_refclk(struct drm_device *dev)
5509 {
5510         struct drm_i915_private *dev_priv = dev->dev_private;
5511         struct drm_mode_config *mode_config = &dev->mode_config;
5512         struct intel_encoder *encoder;
5513         u32 temp;
5514         bool has_lvds = false;
5515         bool has_cpu_edp = false;
5516         bool has_pch_edp = false;
5517         bool has_panel = false;
5518         bool has_ck505 = false;
5519         bool can_ssc = false;
5520
5521         /* We need to take the global config into account */
5522         list_for_each_entry(encoder, &mode_config->encoder_list,
5523                             base.head) {
5524                 switch (encoder->type) {
5525                 case INTEL_OUTPUT_LVDS:
5526                         has_panel = true;
5527                         has_lvds = true;
5528                         break;
5529                 case INTEL_OUTPUT_EDP:
5530                         has_panel = true;
5531                         if (intel_encoder_is_pch_edp(&encoder->base))
5532                                 has_pch_edp = true;
5533                         else
5534                                 has_cpu_edp = true;
5535                         break;
5536                 }
5537         }
5538
5539         if (HAS_PCH_IBX(dev)) {
5540                 has_ck505 = dev_priv->display_clock_mode;
5541                 can_ssc = has_ck505;
5542         } else {
5543                 has_ck505 = false;
5544                 can_ssc = true;
5545         }
5546
5547         DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
5548                       has_panel, has_lvds, has_pch_edp, has_cpu_edp,
5549                       has_ck505);
5550
5551         /* Ironlake: try to setup display ref clock before DPLL
5552          * enabling. This is only under driver's control after
5553          * PCH B stepping, previous chipset stepping should be
5554          * ignoring this setting.
5555          */
5556         temp = I915_READ(PCH_DREF_CONTROL);
5557         /* Always enable nonspread source */
5558         temp &= ~DREF_NONSPREAD_SOURCE_MASK;
5559
5560         if (has_ck505)
5561                 temp |= DREF_NONSPREAD_CK505_ENABLE;
5562         else
5563                 temp |= DREF_NONSPREAD_SOURCE_ENABLE;
5564
5565         if (has_panel) {
5566                 temp &= ~DREF_SSC_SOURCE_MASK;
5567                 temp |= DREF_SSC_SOURCE_ENABLE;
5568
5569                 /* SSC must be turned on before enabling the CPU output  */
5570                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5571                         DRM_DEBUG_KMS("Using SSC on panel\n");
5572                         temp |= DREF_SSC1_ENABLE;
5573                 } else
5574                         temp &= ~DREF_SSC1_ENABLE;
5575
5576                 /* Get SSC going before enabling the outputs */
5577                 I915_WRITE(PCH_DREF_CONTROL, temp);
5578                 POSTING_READ(PCH_DREF_CONTROL);
5579                 udelay(200);
5580
5581                 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5582
5583                 /* Enable CPU source on CPU attached eDP */
5584                 if (has_cpu_edp) {
5585                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5586                                 DRM_DEBUG_KMS("Using SSC on eDP\n");
5587                                 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5588                         }
5589                         else
5590                                 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5591                 } else
5592                         temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5593
5594                 I915_WRITE(PCH_DREF_CONTROL, temp);
5595                 POSTING_READ(PCH_DREF_CONTROL);
5596                 udelay(200);
5597         } else {
5598                 DRM_DEBUG_KMS("Disabling SSC entirely\n");
5599
5600                 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5601
5602                 /* Turn off CPU output */
5603                 temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5604
5605                 I915_WRITE(PCH_DREF_CONTROL, temp);
5606                 POSTING_READ(PCH_DREF_CONTROL);
5607                 udelay(200);
5608
5609                 /* Turn off the SSC source */
5610                 temp &= ~DREF_SSC_SOURCE_MASK;
5611                 temp |= DREF_SSC_SOURCE_DISABLE;
5612
5613                 /* Turn off SSC1 */
5614                 temp &= ~ DREF_SSC1_ENABLE;
5615
5616                 I915_WRITE(PCH_DREF_CONTROL, temp);
5617                 POSTING_READ(PCH_DREF_CONTROL);
5618                 udelay(200);
5619         }
5620 }
5621
5622 static int ironlake_get_refclk(struct drm_crtc *crtc)
5623 {
5624         struct drm_device *dev = crtc->dev;
5625         struct drm_i915_private *dev_priv = dev->dev_private;
5626         struct intel_encoder *encoder;
5627         struct drm_mode_config *mode_config = &dev->mode_config;
5628         struct intel_encoder *edp_encoder = NULL;
5629         int num_connectors = 0;
5630         bool is_lvds = false;
5631
5632         list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5633                 if (encoder->base.crtc != crtc)
5634                         continue;
5635
5636                 switch (encoder->type) {
5637                 case INTEL_OUTPUT_LVDS:
5638                         is_lvds = true;
5639                         break;
5640                 case INTEL_OUTPUT_EDP:
5641                         edp_encoder = encoder;
5642                         break;
5643                 }
5644                 num_connectors++;
5645         }
5646
5647         if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5648                 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
5649                               dev_priv->lvds_ssc_freq);
5650                 return dev_priv->lvds_ssc_freq * 1000;
5651         }
5652
5653         return 120000;
5654 }
5655
5656 static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5657                                   struct drm_display_mode *mode,
5658                                   struct drm_display_mode *adjusted_mode,
5659                                   int x, int y,
5660                                   struct drm_framebuffer *old_fb)
5661 {
5662         struct drm_device *dev = crtc->dev;
5663         struct drm_i915_private *dev_priv = dev->dev_private;
5664         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5665         int pipe = intel_crtc->pipe;
5666         int plane = intel_crtc->plane;
5667         int refclk, num_connectors = 0;
5668         intel_clock_t clock, reduced_clock;
5669         u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
5670         bool ok, has_reduced_clock = false, is_sdvo = false;
5671         bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
5672         struct intel_encoder *has_edp_encoder = NULL;
5673         struct drm_mode_config *mode_config = &dev->mode_config;
5674         struct intel_encoder *encoder;
5675         const intel_limit_t *limit;
5676         int ret;
5677         struct fdi_m_n m_n = {0};
5678         u32 temp;
5679         u32 lvds_sync = 0;
5680         int target_clock, pixel_multiplier, lane, link_bw, factor;
5681         unsigned int pipe_bpp;
5682         bool dither;
5683
5684         list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5685                 if (encoder->base.crtc != crtc)
5686                         continue;
5687
5688                 switch (encoder->type) {
5689                 case INTEL_OUTPUT_LVDS:
5690                         is_lvds = true;
5691                         break;
5692                 case INTEL_OUTPUT_SDVO:
5693                 case INTEL_OUTPUT_HDMI:
5694                         is_sdvo = true;
5695                         if (encoder->needs_tv_clock)
5696                                 is_tv = true;
5697                         break;
5698                 case INTEL_OUTPUT_TVOUT:
5699                         is_tv = true;
5700                         break;
5701                 case INTEL_OUTPUT_ANALOG:
5702                         is_crt = true;
5703                         break;
5704                 case INTEL_OUTPUT_DISPLAYPORT:
5705                         is_dp = true;
5706                         break;
5707                 case INTEL_OUTPUT_EDP:
5708                         has_edp_encoder = encoder;
5709                         break;
5710                 }
5711
5712                 num_connectors++;
5713         }
5714
5715         refclk = ironlake_get_refclk(crtc);
5716
5717         /*
5718          * Returns a set of divisors for the desired target clock with the given
5719          * refclk, or FALSE.  The returned values represent the clock equation:
5720          * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5721          */
5722         limit = intel_limit(crtc, refclk);
5723         ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
5724                              &clock);
5725         if (!ok) {
5726                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
5727                 return -EINVAL;
5728         }
5729
5730         /* Ensure that the cursor is valid for the new mode before changing... */
5731         intel_crtc_update_cursor(crtc, true);
5732
5733         if (is_lvds && dev_priv->lvds_downclock_avail) {
5734                 /*
5735                  * Ensure we match the reduced clock's P to the target clock.
5736                  * If the clocks don't match, we can't switch the display clock
5737                  * by using the FP0/FP1. In such case we will disable the LVDS
5738                  * downclock feature.
5739                 */
5740                 has_reduced_clock = limit->find_pll(limit, crtc,
5741                                                     dev_priv->lvds_downclock,
5742                                                     refclk,
5743                                                     &clock,
5744                                                     &reduced_clock);
5745         }
5746         /* SDVO TV has fixed PLL values depend on its clock range,
5747            this mirrors vbios setting. */
5748         if (is_sdvo && is_tv) {
5749                 if (adjusted_mode->clock >= 100000
5750                     && adjusted_mode->clock < 140500) {
5751                         clock.p1 = 2;
5752                         clock.p2 = 10;
5753                         clock.n = 3;
5754                         clock.m1 = 16;
5755                         clock.m2 = 8;
5756                 } else if (adjusted_mode->clock >= 140500
5757                            && adjusted_mode->clock <= 200000) {
5758                         clock.p1 = 1;
5759                         clock.p2 = 10;
5760                         clock.n = 6;
5761                         clock.m1 = 12;
5762                         clock.m2 = 8;
5763                 }
5764         }
5765
5766         /* FDI link */
5767         pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5768         lane = 0;
5769         /* CPU eDP doesn't require FDI link, so just set DP M/N
5770            according to current link config */
5771         if (has_edp_encoder &&
5772             !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5773                 target_clock = mode->clock;
5774                 intel_edp_link_config(has_edp_encoder,
5775                                       &lane, &link_bw);
5776         } else {
5777                 /* [e]DP over FDI requires target mode clock
5778                    instead of link clock */
5779                 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5780                         target_clock = mode->clock;
5781                 else
5782                         target_clock = adjusted_mode->clock;
5783
5784                 /* FDI is a binary signal running at ~2.7GHz, encoding
5785                  * each output octet as 10 bits. The actual frequency
5786                  * is stored as a divider into a 100MHz clock, and the
5787                  * mode pixel clock is stored in units of 1KHz.
5788                  * Hence the bw of each lane in terms of the mode signal
5789                  * is:
5790                  */
5791                 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5792         }
5793
5794         /* determine panel color depth */
5795         temp = I915_READ(PIPECONF(pipe));
5796         temp &= ~PIPE_BPC_MASK;
5797         dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, adjusted_mode);
5798         switch (pipe_bpp) {
5799         case 18:
5800                 temp |= PIPE_6BPC;
5801                 break;
5802         case 24:
5803                 temp |= PIPE_8BPC;
5804                 break;
5805         case 30:
5806                 temp |= PIPE_10BPC;
5807                 break;
5808         case 36:
5809                 temp |= PIPE_12BPC;
5810                 break;
5811         default:
5812                 WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
5813                         pipe_bpp);
5814                 temp |= PIPE_8BPC;
5815                 pipe_bpp = 24;
5816                 break;
5817         }
5818
5819         intel_crtc->bpp = pipe_bpp;
5820         I915_WRITE(PIPECONF(pipe), temp);
5821
5822         if (!lane) {
5823                 /*
5824                  * Account for spread spectrum to avoid
5825                  * oversubscribing the link. Max center spread
5826                  * is 2.5%; use 5% for safety's sake.
5827                  */
5828                 u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
5829                 lane = bps / (link_bw * 8) + 1;
5830         }
5831
5832         intel_crtc->fdi_lanes = lane;
5833
5834         if (pixel_multiplier > 1)
5835                 link_bw *= pixel_multiplier;
5836         ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
5837                              &m_n);
5838
5839         fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5840         if (has_reduced_clock)
5841                 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5842                         reduced_clock.m2;
5843
5844         /* Enable autotuning of the PLL clock (if permissible) */
5845         factor = 21;
5846         if (is_lvds) {
5847                 if ((intel_panel_use_ssc(dev_priv) &&
5848                      dev_priv->lvds_ssc_freq == 100) ||
5849                     (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
5850                         factor = 25;
5851         } else if (is_sdvo && is_tv)
5852                 factor = 20;
5853
5854         if (clock.m < factor * clock.n)
5855                 fp |= FP_CB_TUNE;
5856
5857         dpll = 0;
5858
5859         if (is_lvds)
5860                 dpll |= DPLLB_MODE_LVDS;
5861         else
5862                 dpll |= DPLLB_MODE_DAC_SERIAL;
5863         if (is_sdvo) {
5864                 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5865                 if (pixel_multiplier > 1) {
5866                         dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
5867                 }
5868                 dpll |= DPLL_DVO_HIGH_SPEED;
5869         }
5870         if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5871                 dpll |= DPLL_DVO_HIGH_SPEED;
5872
5873         /* compute bitmask from p1 value */
5874         dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5875         /* also FPA1 */
5876         dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5877
5878         switch (clock.p2) {
5879         case 5:
5880                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5881                 break;
5882         case 7:
5883                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5884                 break;
5885         case 10:
5886                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5887                 break;
5888         case 14:
5889                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5890                 break;
5891         }
5892
5893         if (is_sdvo && is_tv)
5894                 dpll |= PLL_REF_INPUT_TVCLKINBC;
5895         else if (is_tv)
5896                 /* XXX: just matching BIOS for now */
5897                 /*      dpll |= PLL_REF_INPUT_TVCLKINBC; */
5898                 dpll |= 3;
5899         else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5900                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5901         else
5902                 dpll |= PLL_REF_INPUT_DREFCLK;
5903
5904         /* setup pipeconf */
5905         pipeconf = I915_READ(PIPECONF(pipe));
5906
5907         /* Set up the display plane register */
5908         dspcntr = DISPPLANE_GAMMA_ENABLE;
5909
5910         DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
5911         drm_mode_debug_printmodeline(mode);
5912
5913         /* PCH eDP needs FDI, but CPU eDP does not */
5914         if (!intel_crtc->no_pll) {
5915                 if (!has_edp_encoder ||
5916                     intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5917                         I915_WRITE(PCH_FP0(pipe), fp);
5918                         I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5919
5920                         POSTING_READ(PCH_DPLL(pipe));
5921                         udelay(150);
5922                 }
5923         } else {
5924                 if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) &&
5925                     fp == I915_READ(PCH_FP0(0))) {
5926                         intel_crtc->use_pll_a = true;
5927                         DRM_DEBUG_KMS("using pipe a dpll\n");
5928                 } else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) &&
5929                            fp == I915_READ(PCH_FP0(1))) {
5930                         intel_crtc->use_pll_a = false;
5931                         DRM_DEBUG_KMS("using pipe b dpll\n");
5932                 } else {
5933                         DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n");
5934                         return -EINVAL;
5935                 }
5936         }
5937
5938         /* The LVDS pin pair needs to be on before the DPLLs are enabled.
5939          * This is an exception to the general rule that mode_set doesn't turn
5940          * things on.
5941          */
5942         if (is_lvds) {
5943                 temp = I915_READ(PCH_LVDS);
5944                 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5945                 if (HAS_PCH_CPT(dev)) {
5946                         temp &= ~PORT_TRANS_SEL_MASK;
5947                         temp |= PORT_TRANS_SEL_CPT(pipe);
5948                 } else {
5949                         if (pipe == 1)
5950                                 temp |= LVDS_PIPEB_SELECT;
5951                         else
5952                                 temp &= ~LVDS_PIPEB_SELECT;
5953                 }
5954
5955                 /* set the corresponsding LVDS_BORDER bit */
5956                 temp |= dev_priv->lvds_border_bits;
5957                 /* Set the B0-B3 data pairs corresponding to whether we're going to
5958                  * set the DPLLs for dual-channel mode or not.
5959                  */
5960                 if (clock.p2 == 7)
5961                         temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5962                 else
5963                         temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
5964
5965                 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
5966                  * appropriately here, but we need to look more thoroughly into how
5967                  * panels behave in the two modes.
5968                  */
5969                 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5970                         lvds_sync |= LVDS_HSYNC_POLARITY;
5971                 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5972                         lvds_sync |= LVDS_VSYNC_POLARITY;
5973                 if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
5974                     != lvds_sync) {
5975                         char flags[2] = "-+";
5976                         DRM_INFO("Changing LVDS panel from "
5977                                  "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
5978                                  flags[!(temp & LVDS_HSYNC_POLARITY)],
5979                                  flags[!(temp & LVDS_VSYNC_POLARITY)],
5980                                  flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5981                                  flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5982                         temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5983                         temp |= lvds_sync;
5984                 }
5985                 I915_WRITE(PCH_LVDS, temp);
5986         }
5987
5988   &nbs