drm/i915: Check VBIOS value for determining LVDS dual channel mode, too
[linux-2.6.git] / drivers / gpu / drm / i915 / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/cpufreq.h>
28 #include <linux/module.h>
29 #include <linux/input.h>
30 #include <linux/i2c.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/vgaarb.h>
34 #include <drm/drm_edid.h>
35 #include "drmP.h"
36 #include "intel_drv.h"
37 #include "i915_drm.h"
38 #include "i915_drv.h"
39 #include "i915_trace.h"
40 #include "drm_dp_helper.h"
41 #include "drm_crtc_helper.h"
42 #include <linux/dma_remapping.h>
43
44 #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
45
46 bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
47 static void intel_update_watermarks(struct drm_device *dev);
48 static void intel_increase_pllclock(struct drm_crtc *crtc);
49 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
50
51 typedef struct {
52         /* given values */
53         int n;
54         int m1, m2;
55         int p1, p2;
56         /* derived values */
57         int     dot;
58         int     vco;
59         int     m;
60         int     p;
61 } intel_clock_t;
62
63 typedef struct {
64         int     min, max;
65 } intel_range_t;
66
67 typedef struct {
68         int     dot_limit;
69         int     p2_slow, p2_fast;
70 } intel_p2_t;
71
72 #define INTEL_P2_NUM                  2
73 typedef struct intel_limit intel_limit_t;
74 struct intel_limit {
75         intel_range_t   dot, vco, n, m, m1, m2, p, p1;
76         intel_p2_t          p2;
77         bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
78                         int, int, intel_clock_t *, intel_clock_t *);
79 };
80
81 /* FDI */
82 #define IRONLAKE_FDI_FREQ               2700000 /* in kHz for mode->clock */
83
84 static bool
85 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
86                     int target, int refclk, intel_clock_t *match_clock,
87                     intel_clock_t *best_clock);
88 static bool
89 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
90                         int target, int refclk, intel_clock_t *match_clock,
91                         intel_clock_t *best_clock);
92
93 static bool
94 intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
95                       int target, int refclk, intel_clock_t *match_clock,
96                       intel_clock_t *best_clock);
97 static bool
98 intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
99                            int target, int refclk, intel_clock_t *match_clock,
100                            intel_clock_t *best_clock);
101
102 static inline u32 /* units of 100MHz */
103 intel_fdi_link_freq(struct drm_device *dev)
104 {
105         if (IS_GEN5(dev)) {
106                 struct drm_i915_private *dev_priv = dev->dev_private;
107                 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
108         } else
109                 return 27;
110 }
111
112 static const intel_limit_t intel_limits_i8xx_dvo = {
113         .dot = { .min = 25000, .max = 350000 },
114         .vco = { .min = 930000, .max = 1400000 },
115         .n = { .min = 3, .max = 16 },
116         .m = { .min = 96, .max = 140 },
117         .m1 = { .min = 18, .max = 26 },
118         .m2 = { .min = 6, .max = 16 },
119         .p = { .min = 4, .max = 128 },
120         .p1 = { .min = 2, .max = 33 },
121         .p2 = { .dot_limit = 165000,
122                 .p2_slow = 4, .p2_fast = 2 },
123         .find_pll = intel_find_best_PLL,
124 };
125
126 static const intel_limit_t intel_limits_i8xx_lvds = {
127         .dot = { .min = 25000, .max = 350000 },
128         .vco = { .min = 930000, .max = 1400000 },
129         .n = { .min = 3, .max = 16 },
130         .m = { .min = 96, .max = 140 },
131         .m1 = { .min = 18, .max = 26 },
132         .m2 = { .min = 6, .max = 16 },
133         .p = { .min = 4, .max = 128 },
134         .p1 = { .min = 1, .max = 6 },
135         .p2 = { .dot_limit = 165000,
136                 .p2_slow = 14, .p2_fast = 7 },
137         .find_pll = intel_find_best_PLL,
138 };
139
140 static const intel_limit_t intel_limits_i9xx_sdvo = {
141         .dot = { .min = 20000, .max = 400000 },
142         .vco = { .min = 1400000, .max = 2800000 },
143         .n = { .min = 1, .max = 6 },
144         .m = { .min = 70, .max = 120 },
145         .m1 = { .min = 10, .max = 22 },
146         .m2 = { .min = 5, .max = 9 },
147         .p = { .min = 5, .max = 80 },
148         .p1 = { .min = 1, .max = 8 },
149         .p2 = { .dot_limit = 200000,
150                 .p2_slow = 10, .p2_fast = 5 },
151         .find_pll = intel_find_best_PLL,
152 };
153
154 static const intel_limit_t intel_limits_i9xx_lvds = {
155         .dot = { .min = 20000, .max = 400000 },
156         .vco = { .min = 1400000, .max = 2800000 },
157         .n = { .min = 1, .max = 6 },
158         .m = { .min = 70, .max = 120 },
159         .m1 = { .min = 10, .max = 22 },
160         .m2 = { .min = 5, .max = 9 },
161         .p = { .min = 7, .max = 98 },
162         .p1 = { .min = 1, .max = 8 },
163         .p2 = { .dot_limit = 112000,
164                 .p2_slow = 14, .p2_fast = 7 },
165         .find_pll = intel_find_best_PLL,
166 };
167
168
169 static const intel_limit_t intel_limits_g4x_sdvo = {
170         .dot = { .min = 25000, .max = 270000 },
171         .vco = { .min = 1750000, .max = 3500000},
172         .n = { .min = 1, .max = 4 },
173         .m = { .min = 104, .max = 138 },
174         .m1 = { .min = 17, .max = 23 },
175         .m2 = { .min = 5, .max = 11 },
176         .p = { .min = 10, .max = 30 },
177         .p1 = { .min = 1, .max = 3},
178         .p2 = { .dot_limit = 270000,
179                 .p2_slow = 10,
180                 .p2_fast = 10
181         },
182         .find_pll = intel_g4x_find_best_PLL,
183 };
184
185 static const intel_limit_t intel_limits_g4x_hdmi = {
186         .dot = { .min = 22000, .max = 400000 },
187         .vco = { .min = 1750000, .max = 3500000},
188         .n = { .min = 1, .max = 4 },
189         .m = { .min = 104, .max = 138 },
190         .m1 = { .min = 16, .max = 23 },
191         .m2 = { .min = 5, .max = 11 },
192         .p = { .min = 5, .max = 80 },
193         .p1 = { .min = 1, .max = 8},
194         .p2 = { .dot_limit = 165000,
195                 .p2_slow = 10, .p2_fast = 5 },
196         .find_pll = intel_g4x_find_best_PLL,
197 };
198
199 static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
200         .dot = { .min = 20000, .max = 115000 },
201         .vco = { .min = 1750000, .max = 3500000 },
202         .n = { .min = 1, .max = 3 },
203         .m = { .min = 104, .max = 138 },
204         .m1 = { .min = 17, .max = 23 },
205         .m2 = { .min = 5, .max = 11 },
206         .p = { .min = 28, .max = 112 },
207         .p1 = { .min = 2, .max = 8 },
208         .p2 = { .dot_limit = 0,
209                 .p2_slow = 14, .p2_fast = 14
210         },
211         .find_pll = intel_g4x_find_best_PLL,
212 };
213
214 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
215         .dot = { .min = 80000, .max = 224000 },
216         .vco = { .min = 1750000, .max = 3500000 },
217         .n = { .min = 1, .max = 3 },
218         .m = { .min = 104, .max = 138 },
219         .m1 = { .min = 17, .max = 23 },
220         .m2 = { .min = 5, .max = 11 },
221         .p = { .min = 14, .max = 42 },
222         .p1 = { .min = 2, .max = 6 },
223         .p2 = { .dot_limit = 0,
224                 .p2_slow = 7, .p2_fast = 7
225         },
226         .find_pll = intel_g4x_find_best_PLL,
227 };
228
229 static const intel_limit_t intel_limits_g4x_display_port = {
230         .dot = { .min = 161670, .max = 227000 },
231         .vco = { .min = 1750000, .max = 3500000},
232         .n = { .min = 1, .max = 2 },
233         .m = { .min = 97, .max = 108 },
234         .m1 = { .min = 0x10, .max = 0x12 },
235         .m2 = { .min = 0x05, .max = 0x06 },
236         .p = { .min = 10, .max = 20 },
237         .p1 = { .min = 1, .max = 2},
238         .p2 = { .dot_limit = 0,
239                 .p2_slow = 10, .p2_fast = 10 },
240         .find_pll = intel_find_pll_g4x_dp,
241 };
242
243 static const intel_limit_t intel_limits_pineview_sdvo = {
244         .dot = { .min = 20000, .max = 400000},
245         .vco = { .min = 1700000, .max = 3500000 },
246         /* Pineview's Ncounter is a ring counter */
247         .n = { .min = 3, .max = 6 },
248         .m = { .min = 2, .max = 256 },
249         /* Pineview only has one combined m divider, which we treat as m2. */
250         .m1 = { .min = 0, .max = 0 },
251         .m2 = { .min = 0, .max = 254 },
252         .p = { .min = 5, .max = 80 },
253         .p1 = { .min = 1, .max = 8 },
254         .p2 = { .dot_limit = 200000,
255                 .p2_slow = 10, .p2_fast = 5 },
256         .find_pll = intel_find_best_PLL,
257 };
258
259 static const intel_limit_t intel_limits_pineview_lvds = {
260         .dot = { .min = 20000, .max = 400000 },
261         .vco = { .min = 1700000, .max = 3500000 },
262         .n = { .min = 3, .max = 6 },
263         .m = { .min = 2, .max = 256 },
264         .m1 = { .min = 0, .max = 0 },
265         .m2 = { .min = 0, .max = 254 },
266         .p = { .min = 7, .max = 112 },
267         .p1 = { .min = 1, .max = 8 },
268         .p2 = { .dot_limit = 112000,
269                 .p2_slow = 14, .p2_fast = 14 },
270         .find_pll = intel_find_best_PLL,
271 };
272
273 /* Ironlake / Sandybridge
274  *
275  * We calculate clock using (register_value + 2) for N/M1/M2, so here
276  * the range value for them is (actual_value - 2).
277  */
278 static const intel_limit_t intel_limits_ironlake_dac = {
279         .dot = { .min = 25000, .max = 350000 },
280         .vco = { .min = 1760000, .max = 3510000 },
281         .n = { .min = 1, .max = 5 },
282         .m = { .min = 79, .max = 127 },
283         .m1 = { .min = 12, .max = 22 },
284         .m2 = { .min = 5, .max = 9 },
285         .p = { .min = 5, .max = 80 },
286         .p1 = { .min = 1, .max = 8 },
287         .p2 = { .dot_limit = 225000,
288                 .p2_slow = 10, .p2_fast = 5 },
289         .find_pll = intel_g4x_find_best_PLL,
290 };
291
292 static const intel_limit_t intel_limits_ironlake_single_lvds = {
293         .dot = { .min = 25000, .max = 350000 },
294         .vco = { .min = 1760000, .max = 3510000 },
295         .n = { .min = 1, .max = 3 },
296         .m = { .min = 79, .max = 118 },
297         .m1 = { .min = 12, .max = 22 },
298         .m2 = { .min = 5, .max = 9 },
299         .p = { .min = 28, .max = 112 },
300         .p1 = { .min = 2, .max = 8 },
301         .p2 = { .dot_limit = 225000,
302                 .p2_slow = 14, .p2_fast = 14 },
303         .find_pll = intel_g4x_find_best_PLL,
304 };
305
306 static const intel_limit_t intel_limits_ironlake_dual_lvds = {
307         .dot = { .min = 25000, .max = 350000 },
308         .vco = { .min = 1760000, .max = 3510000 },
309         .n = { .min = 1, .max = 3 },
310         .m = { .min = 79, .max = 127 },
311         .m1 = { .min = 12, .max = 22 },
312         .m2 = { .min = 5, .max = 9 },
313         .p = { .min = 14, .max = 56 },
314         .p1 = { .min = 2, .max = 8 },
315         .p2 = { .dot_limit = 225000,
316                 .p2_slow = 7, .p2_fast = 7 },
317         .find_pll = intel_g4x_find_best_PLL,
318 };
319
320 /* LVDS 100mhz refclk limits. */
321 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
322         .dot = { .min = 25000, .max = 350000 },
323         .vco = { .min = 1760000, .max = 3510000 },
324         .n = { .min = 1, .max = 2 },
325         .m = { .min = 79, .max = 126 },
326         .m1 = { .min = 12, .max = 22 },
327         .m2 = { .min = 5, .max = 9 },
328         .p = { .min = 28, .max = 112 },
329         .p1 = { .min = 2, .max = 8 },
330         .p2 = { .dot_limit = 225000,
331                 .p2_slow = 14, .p2_fast = 14 },
332         .find_pll = intel_g4x_find_best_PLL,
333 };
334
335 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
336         .dot = { .min = 25000, .max = 350000 },
337         .vco = { .min = 1760000, .max = 3510000 },
338         .n = { .min = 1, .max = 3 },
339         .m = { .min = 79, .max = 126 },
340         .m1 = { .min = 12, .max = 22 },
341         .m2 = { .min = 5, .max = 9 },
342         .p = { .min = 14, .max = 42 },
343         .p1 = { .min = 2, .max = 6 },
344         .p2 = { .dot_limit = 225000,
345                 .p2_slow = 7, .p2_fast = 7 },
346         .find_pll = intel_g4x_find_best_PLL,
347 };
348
349 static const intel_limit_t intel_limits_ironlake_display_port = {
350         .dot = { .min = 25000, .max = 350000 },
351         .vco = { .min = 1760000, .max = 3510000},
352         .n = { .min = 1, .max = 2 },
353         .m = { .min = 81, .max = 90 },
354         .m1 = { .min = 12, .max = 22 },
355         .m2 = { .min = 5, .max = 9 },
356         .p = { .min = 10, .max = 20 },
357         .p1 = { .min = 1, .max = 2},
358         .p2 = { .dot_limit = 0,
359                 .p2_slow = 10, .p2_fast = 10 },
360         .find_pll = intel_find_pll_ironlake_dp,
361 };
362
363 static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
364                               unsigned int reg)
365 {
366         unsigned int val;
367
368         if (dev_priv->lvds_val)
369                 val = dev_priv->lvds_val;
370         else {
371                 /* BIOS should set the proper LVDS register value at boot, but
372                  * in reality, it doesn't set the value when the lid is closed;
373                  * we need to check "the value to be set" in VBT when LVDS
374                  * register is uninitialized.
375                  */
376                 val = I915_READ(reg);
377                 if (!(val & ~LVDS_DETECTED))
378                         val = dev_priv->bios_lvds_val;
379                 dev_priv->lvds_val = val;
380         }
381         return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
382 }
383
384 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
385                                                 int refclk)
386 {
387         struct drm_device *dev = crtc->dev;
388         struct drm_i915_private *dev_priv = dev->dev_private;
389         const intel_limit_t *limit;
390
391         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
392                 if (is_dual_link_lvds(dev_priv, PCH_LVDS)) {
393                         /* LVDS dual channel */
394                         if (refclk == 100000)
395                                 limit = &intel_limits_ironlake_dual_lvds_100m;
396                         else
397                                 limit = &intel_limits_ironlake_dual_lvds;
398                 } else {
399                         if (refclk == 100000)
400                                 limit = &intel_limits_ironlake_single_lvds_100m;
401                         else
402                                 limit = &intel_limits_ironlake_single_lvds;
403                 }
404         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
405                         HAS_eDP)
406                 limit = &intel_limits_ironlake_display_port;
407         else
408                 limit = &intel_limits_ironlake_dac;
409
410         return limit;
411 }
412
413 static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
414 {
415         struct drm_device *dev = crtc->dev;
416         struct drm_i915_private *dev_priv = dev->dev_private;
417         const intel_limit_t *limit;
418
419         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
420                 if (is_dual_link_lvds(dev_priv, LVDS))
421                         /* LVDS with dual channel */
422                         limit = &intel_limits_g4x_dual_channel_lvds;
423                 else
424                         /* LVDS with dual channel */
425                         limit = &intel_limits_g4x_single_channel_lvds;
426         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
427                    intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
428                 limit = &intel_limits_g4x_hdmi;
429         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
430                 limit = &intel_limits_g4x_sdvo;
431         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
432                 limit = &intel_limits_g4x_display_port;
433         } else /* The option is for other outputs */
434                 limit = &intel_limits_i9xx_sdvo;
435
436         return limit;
437 }
438
439 static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
440 {
441         struct drm_device *dev = crtc->dev;
442         const intel_limit_t *limit;
443
444         if (HAS_PCH_SPLIT(dev))
445                 limit = intel_ironlake_limit(crtc, refclk);
446         else if (IS_G4X(dev)) {
447                 limit = intel_g4x_limit(crtc);
448         } else if (IS_PINEVIEW(dev)) {
449                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
450                         limit = &intel_limits_pineview_lvds;
451                 else
452                         limit = &intel_limits_pineview_sdvo;
453         } else if (!IS_GEN2(dev)) {
454                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
455                         limit = &intel_limits_i9xx_lvds;
456                 else
457                         limit = &intel_limits_i9xx_sdvo;
458         } else {
459                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
460                         limit = &intel_limits_i8xx_lvds;
461                 else
462                         limit = &intel_limits_i8xx_dvo;
463         }
464         return limit;
465 }
466
467 /* m1 is reserved as 0 in Pineview, n is a ring counter */
468 static void pineview_clock(int refclk, intel_clock_t *clock)
469 {
470         clock->m = clock->m2 + 2;
471         clock->p = clock->p1 * clock->p2;
472         clock->vco = refclk * clock->m / clock->n;
473         clock->dot = clock->vco / clock->p;
474 }
475
476 static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
477 {
478         if (IS_PINEVIEW(dev)) {
479                 pineview_clock(refclk, clock);
480                 return;
481         }
482         clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
483         clock->p = clock->p1 * clock->p2;
484         clock->vco = refclk * clock->m / (clock->n + 2);
485         clock->dot = clock->vco / clock->p;
486 }
487
488 /**
489  * Returns whether any output on the specified pipe is of the specified type
490  */
491 bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
492 {
493         struct drm_device *dev = crtc->dev;
494         struct drm_mode_config *mode_config = &dev->mode_config;
495         struct intel_encoder *encoder;
496
497         list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
498                 if (encoder->base.crtc == crtc && encoder->type == type)
499                         return true;
500
501         return false;
502 }
503
504 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
505 /**
506  * Returns whether the given set of divisors are valid for a given refclk with
507  * the given connectors.
508  */
509
510 static bool intel_PLL_is_valid(struct drm_device *dev,
511                                const intel_limit_t *limit,
512                                const intel_clock_t *clock)
513 {
514         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
515                 INTELPllInvalid("p1 out of range\n");
516         if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
517                 INTELPllInvalid("p out of range\n");
518         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
519                 INTELPllInvalid("m2 out of range\n");
520         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
521                 INTELPllInvalid("m1 out of range\n");
522         if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
523                 INTELPllInvalid("m1 <= m2\n");
524         if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
525                 INTELPllInvalid("m out of range\n");
526         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
527                 INTELPllInvalid("n out of range\n");
528         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
529                 INTELPllInvalid("vco out of range\n");
530         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
531          * connector, etc., rather than just a single range.
532          */
533         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
534                 INTELPllInvalid("dot out of range\n");
535
536         return true;
537 }
538
539 static bool
540 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
541                     int target, int refclk, intel_clock_t *match_clock,
542                     intel_clock_t *best_clock)
543
544 {
545         struct drm_device *dev = crtc->dev;
546         struct drm_i915_private *dev_priv = dev->dev_private;
547         intel_clock_t clock;
548         int err = target;
549
550         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
551             (I915_READ(LVDS)) != 0) {
552                 /*
553                  * For LVDS, if the panel is on, just rely on its current
554                  * settings for dual-channel.  We haven't figured out how to
555                  * reliably set up different single/dual channel state, if we
556                  * even can.
557                  */
558                 if (is_dual_link_lvds(dev_priv, LVDS))
559                         clock.p2 = limit->p2.p2_fast;
560                 else
561                         clock.p2 = limit->p2.p2_slow;
562         } else {
563                 if (target < limit->p2.dot_limit)
564                         clock.p2 = limit->p2.p2_slow;
565                 else
566                         clock.p2 = limit->p2.p2_fast;
567         }
568
569         memset(best_clock, 0, sizeof(*best_clock));
570
571         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
572              clock.m1++) {
573                 for (clock.m2 = limit->m2.min;
574                      clock.m2 <= limit->m2.max; clock.m2++) {
575                         /* m1 is always 0 in Pineview */
576                         if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
577                                 break;
578                         for (clock.n = limit->n.min;
579                              clock.n <= limit->n.max; clock.n++) {
580                                 for (clock.p1 = limit->p1.min;
581                                         clock.p1 <= limit->p1.max; clock.p1++) {
582                                         int this_err;
583
584                                         intel_clock(dev, refclk, &clock);
585                                         if (!intel_PLL_is_valid(dev, limit,
586                                                                 &clock))
587                                                 continue;
588                                         if (match_clock &&
589                                             clock.p != match_clock->p)
590                                                 continue;
591
592                                         this_err = abs(clock.dot - target);
593                                         if (this_err < err) {
594                                                 *best_clock = clock;
595                                                 err = this_err;
596                                         }
597                                 }
598                         }
599                 }
600         }
601
602         return (err != target);
603 }
604
605 static bool
606 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
607                         int target, int refclk, intel_clock_t *match_clock,
608                         intel_clock_t *best_clock)
609 {
610         struct drm_device *dev = crtc->dev;
611         struct drm_i915_private *dev_priv = dev->dev_private;
612         intel_clock_t clock;
613         int max_n;
614         bool found;
615         /* approximately equals target * 0.00585 */
616         int err_most = (target >> 8) + (target >> 9);
617         found = false;
618
619         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
620                 int lvds_reg;
621
622                 if (HAS_PCH_SPLIT(dev))
623                         lvds_reg = PCH_LVDS;
624                 else
625                         lvds_reg = LVDS;
626                 if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
627                     LVDS_CLKB_POWER_UP)
628                         clock.p2 = limit->p2.p2_fast;
629                 else
630                         clock.p2 = limit->p2.p2_slow;
631         } else {
632                 if (target < limit->p2.dot_limit)
633                         clock.p2 = limit->p2.p2_slow;
634                 else
635                         clock.p2 = limit->p2.p2_fast;
636         }
637
638         memset(best_clock, 0, sizeof(*best_clock));
639         max_n = limit->n.max;
640         /* based on hardware requirement, prefer smaller n to precision */
641         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
642                 /* based on hardware requirement, prefere larger m1,m2 */
643                 for (clock.m1 = limit->m1.max;
644                      clock.m1 >= limit->m1.min; clock.m1--) {
645                         for (clock.m2 = limit->m2.max;
646                              clock.m2 >= limit->m2.min; clock.m2--) {
647                                 for (clock.p1 = limit->p1.max;
648                                      clock.p1 >= limit->p1.min; clock.p1--) {
649                                         int this_err;
650
651                                         intel_clock(dev, refclk, &clock);
652                                         if (!intel_PLL_is_valid(dev, limit,
653                                                                 &clock))
654                                                 continue;
655                                         if (match_clock &&
656                                             clock.p != match_clock->p)
657                                                 continue;
658
659                                         this_err = abs(clock.dot - target);
660                                         if (this_err < err_most) {
661                                                 *best_clock = clock;
662                                                 err_most = this_err;
663                                                 max_n = clock.n;
664                                                 found = true;
665                                         }
666                                 }
667                         }
668                 }
669         }
670         return found;
671 }
672
673 static bool
674 intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
675                            int target, int refclk, intel_clock_t *match_clock,
676                            intel_clock_t *best_clock)
677 {
678         struct drm_device *dev = crtc->dev;
679         intel_clock_t clock;
680
681         if (target < 200000) {
682                 clock.n = 1;
683                 clock.p1 = 2;
684                 clock.p2 = 10;
685                 clock.m1 = 12;
686                 clock.m2 = 9;
687         } else {
688                 clock.n = 2;
689                 clock.p1 = 1;
690                 clock.p2 = 10;
691                 clock.m1 = 14;
692                 clock.m2 = 8;
693         }
694         intel_clock(dev, refclk, &clock);
695         memcpy(best_clock, &clock, sizeof(intel_clock_t));
696         return true;
697 }
698
699 /* DisplayPort has only two frequencies, 162MHz and 270MHz */
700 static bool
701 intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
702                       int target, int refclk, intel_clock_t *match_clock,
703                       intel_clock_t *best_clock)
704 {
705         intel_clock_t clock;
706         if (target < 200000) {
707                 clock.p1 = 2;
708                 clock.p2 = 10;
709                 clock.n = 2;
710                 clock.m1 = 23;
711                 clock.m2 = 8;
712         } else {
713                 clock.p1 = 1;
714                 clock.p2 = 10;
715                 clock.n = 1;
716                 clock.m1 = 14;
717                 clock.m2 = 2;
718         }
719         clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
720         clock.p = (clock.p1 * clock.p2);
721         clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
722         clock.vco = 0;
723         memcpy(best_clock, &clock, sizeof(intel_clock_t));
724         return true;
725 }
726
727 /**
728  * intel_wait_for_vblank - wait for vblank on a given pipe
729  * @dev: drm device
730  * @pipe: pipe to wait for
731  *
732  * Wait for vblank to occur on a given pipe.  Needed for various bits of
733  * mode setting code.
734  */
735 void intel_wait_for_vblank(struct drm_device *dev, int pipe)
736 {
737         struct drm_i915_private *dev_priv = dev->dev_private;
738         int pipestat_reg = PIPESTAT(pipe);
739
740         /* Clear existing vblank status. Note this will clear any other
741          * sticky status fields as well.
742          *
743          * This races with i915_driver_irq_handler() with the result
744          * that either function could miss a vblank event.  Here it is not
745          * fatal, as we will either wait upon the next vblank interrupt or
746          * timeout.  Generally speaking intel_wait_for_vblank() is only
747          * called during modeset at which time the GPU should be idle and
748          * should *not* be performing page flips and thus not waiting on
749          * vblanks...
750          * Currently, the result of us stealing a vblank from the irq
751          * handler is that a single frame will be skipped during swapbuffers.
752          */
753         I915_WRITE(pipestat_reg,
754                    I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
755
756         /* Wait for vblank interrupt bit to set */
757         if (wait_for(I915_READ(pipestat_reg) &
758                      PIPE_VBLANK_INTERRUPT_STATUS,
759                      50))
760                 DRM_DEBUG_KMS("vblank wait timed out\n");
761 }
762
763 /*
764  * intel_wait_for_pipe_off - wait for pipe to turn off
765  * @dev: drm device
766  * @pipe: pipe to wait for
767  *
768  * After disabling a pipe, we can't wait for vblank in the usual way,
769  * spinning on the vblank interrupt status bit, since we won't actually
770  * see an interrupt when the pipe is disabled.
771  *
772  * On Gen4 and above:
773  *   wait for the pipe register state bit to turn off
774  *
775  * Otherwise:
776  *   wait for the display line value to settle (it usually
777  *   ends up stopping at the start of the next frame).
778  *
779  */
780 void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
781 {
782         struct drm_i915_private *dev_priv = dev->dev_private;
783
784         if (INTEL_INFO(dev)->gen >= 4) {
785                 int reg = PIPECONF(pipe);
786
787                 /* Wait for the Pipe State to go off */
788                 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
789                              100))
790                         DRM_DEBUG_KMS("pipe_off wait timed out\n");
791         } else {
792                 u32 last_line;
793                 int reg = PIPEDSL(pipe);
794                 unsigned long timeout = jiffies + msecs_to_jiffies(100);
795
796                 /* Wait for the display line to settle */
797                 do {
798                         last_line = I915_READ(reg) & DSL_LINEMASK;
799                         mdelay(5);
800                 } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
801                          time_after(timeout, jiffies));
802                 if (time_after(jiffies, timeout))
803                         DRM_DEBUG_KMS("pipe_off wait timed out\n");
804         }
805 }
806
807 static const char *state_string(bool enabled)
808 {
809         return enabled ? "on" : "off";
810 }
811
812 /* Only for pre-ILK configs */
813 static void assert_pll(struct drm_i915_private *dev_priv,
814                        enum pipe pipe, bool state)
815 {
816         int reg;
817         u32 val;
818         bool cur_state;
819
820         reg = DPLL(pipe);
821         val = I915_READ(reg);
822         cur_state = !!(val & DPLL_VCO_ENABLE);
823         WARN(cur_state != state,
824              "PLL state assertion failure (expected %s, current %s)\n",
825              state_string(state), state_string(cur_state));
826 }
827 #define assert_pll_enabled(d, p) assert_pll(d, p, true)
828 #define assert_pll_disabled(d, p) assert_pll(d, p, false)
829
830 /* For ILK+ */
831 static void assert_pch_pll(struct drm_i915_private *dev_priv,
832                            enum pipe pipe, bool state)
833 {
834         int reg;
835         u32 val;
836         bool cur_state;
837
838         if (HAS_PCH_CPT(dev_priv->dev)) {
839                 u32 pch_dpll;
840
841                 pch_dpll = I915_READ(PCH_DPLL_SEL);
842
843                 /* Make sure the selected PLL is enabled to the transcoder */
844                 WARN(!((pch_dpll >> (4 * pipe)) & 8),
845                      "transcoder %d PLL not enabled\n", pipe);
846
847                 /* Convert the transcoder pipe number to a pll pipe number */
848                 pipe = (pch_dpll >> (4 * pipe)) & 1;
849         }
850
851         reg = PCH_DPLL(pipe);
852         val = I915_READ(reg);
853         cur_state = !!(val & DPLL_VCO_ENABLE);
854         WARN(cur_state != state,
855              "PCH PLL state assertion failure (expected %s, current %s)\n",
856              state_string(state), state_string(cur_state));
857 }
858 #define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
859 #define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
860
861 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
862                           enum pipe pipe, bool state)
863 {
864         int reg;
865         u32 val;
866         bool cur_state;
867
868         reg = FDI_TX_CTL(pipe);
869         val = I915_READ(reg);
870         cur_state = !!(val & FDI_TX_ENABLE);
871         WARN(cur_state != state,
872              "FDI TX state assertion failure (expected %s, current %s)\n",
873              state_string(state), state_string(cur_state));
874 }
875 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
876 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
877
878 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
879                           enum pipe pipe, bool state)
880 {
881         int reg;
882         u32 val;
883         bool cur_state;
884
885         reg = FDI_RX_CTL(pipe);
886         val = I915_READ(reg);
887         cur_state = !!(val & FDI_RX_ENABLE);
888         WARN(cur_state != state,
889              "FDI RX state assertion failure (expected %s, current %s)\n",
890              state_string(state), state_string(cur_state));
891 }
892 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
893 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
894
895 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
896                                       enum pipe pipe)
897 {
898         int reg;
899         u32 val;
900
901         /* ILK FDI PLL is always enabled */
902         if (dev_priv->info->gen == 5)
903                 return;
904
905         reg = FDI_TX_CTL(pipe);
906         val = I915_READ(reg);
907         WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
908 }
909
910 static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
911                                       enum pipe pipe)
912 {
913         int reg;
914         u32 val;
915
916         reg = FDI_RX_CTL(pipe);
917         val = I915_READ(reg);
918         WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
919 }
920
921 static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
922                                   enum pipe pipe)
923 {
924         int pp_reg, lvds_reg;
925         u32 val;
926         enum pipe panel_pipe = PIPE_A;
927         bool locked = true;
928
929         if (HAS_PCH_SPLIT(dev_priv->dev)) {
930                 pp_reg = PCH_PP_CONTROL;
931                 lvds_reg = PCH_LVDS;
932         } else {
933                 pp_reg = PP_CONTROL;
934                 lvds_reg = LVDS;
935         }
936
937         val = I915_READ(pp_reg);
938         if (!(val & PANEL_POWER_ON) ||
939             ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
940                 locked = false;
941
942         if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
943                 panel_pipe = PIPE_B;
944
945         WARN(panel_pipe == pipe && locked,
946              "panel assertion failure, pipe %c regs locked\n",
947              pipe_name(pipe));
948 }
949
950 void assert_pipe(struct drm_i915_private *dev_priv,
951                  enum pipe pipe, bool state)
952 {
953         int reg;
954         u32 val;
955         bool cur_state;
956
957         /* if we need the pipe A quirk it must be always on */
958         if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
959                 state = true;
960
961         reg = PIPECONF(pipe);
962         val = I915_READ(reg);
963         cur_state = !!(val & PIPECONF_ENABLE);
964         WARN(cur_state != state,
965              "pipe %c assertion failure (expected %s, current %s)\n",
966              pipe_name(pipe), state_string(state), state_string(cur_state));
967 }
968
969 static void assert_plane(struct drm_i915_private *dev_priv,
970                          enum plane plane, bool state)
971 {
972         int reg;
973         u32 val;
974         bool cur_state;
975
976         reg = DSPCNTR(plane);
977         val = I915_READ(reg);
978         cur_state = !!(val & DISPLAY_PLANE_ENABLE);
979         WARN(cur_state != state,
980              "plane %c assertion failure (expected %s, current %s)\n",
981              plane_name(plane), state_string(state), state_string(cur_state));
982 }
983
984 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
985 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
986
987 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
988                                    enum pipe pipe)
989 {
990         int reg, i;
991         u32 val;
992         int cur_pipe;
993
994         /* Planes are fixed to pipes on ILK+ */
995         if (HAS_PCH_SPLIT(dev_priv->dev)) {
996                 reg = DSPCNTR(pipe);
997                 val = I915_READ(reg);
998                 WARN((val & DISPLAY_PLANE_ENABLE),
999                      "plane %c assertion failure, should be disabled but not\n",
1000                      plane_name(pipe));
1001                 return;
1002         }
1003
1004         /* Need to check both planes against the pipe */
1005         for (i = 0; i < 2; i++) {
1006                 reg = DSPCNTR(i);
1007                 val = I915_READ(reg);
1008                 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1009                         DISPPLANE_SEL_PIPE_SHIFT;
1010                 WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1011                      "plane %c assertion failure, should be off on pipe %c but is still active\n",
1012                      plane_name(i), pipe_name(pipe));
1013         }
1014 }
1015
1016 static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1017 {
1018         u32 val;
1019         bool enabled;
1020
1021         val = I915_READ(PCH_DREF_CONTROL);
1022         enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1023                             DREF_SUPERSPREAD_SOURCE_MASK));
1024         WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1025 }
1026
1027 static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
1028                                        enum pipe pipe)
1029 {
1030         int reg;
1031         u32 val;
1032         bool enabled;
1033
1034         reg = TRANSCONF(pipe);
1035         val = I915_READ(reg);
1036         enabled = !!(val & TRANS_ENABLE);
1037         WARN(enabled,
1038              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1039              pipe_name(pipe));
1040 }
1041
1042 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1043                             enum pipe pipe, u32 port_sel, u32 val)
1044 {
1045         if ((val & DP_PORT_EN) == 0)
1046                 return false;
1047
1048         if (HAS_PCH_CPT(dev_priv->dev)) {
1049                 u32     trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1050                 u32     trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1051                 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1052                         return false;
1053         } else {
1054                 if ((val & DP_PIPE_MASK) != (pipe << 30))
1055                         return false;
1056         }
1057         return true;
1058 }
1059
1060 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1061                               enum pipe pipe, u32 val)
1062 {
1063         if ((val & PORT_ENABLE) == 0)
1064                 return false;
1065
1066         if (HAS_PCH_CPT(dev_priv->dev)) {
1067                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1068                         return false;
1069         } else {
1070                 if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
1071                         return false;
1072         }
1073         return true;
1074 }
1075
1076 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1077                               enum pipe pipe, u32 val)
1078 {
1079         if ((val & LVDS_PORT_EN) == 0)
1080                 return false;
1081
1082         if (HAS_PCH_CPT(dev_priv->dev)) {
1083                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1084                         return false;
1085         } else {
1086                 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1087                         return false;
1088         }
1089         return true;
1090 }
1091
1092 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1093                               enum pipe pipe, u32 val)
1094 {
1095         if ((val & ADPA_DAC_ENABLE) == 0)
1096                 return false;
1097         if (HAS_PCH_CPT(dev_priv->dev)) {
1098                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1099                         return false;
1100         } else {
1101                 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1102                         return false;
1103         }
1104         return true;
1105 }
1106
1107 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1108                                    enum pipe pipe, int reg, u32 port_sel)
1109 {
1110         u32 val = I915_READ(reg);
1111         WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1112              "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1113              reg, pipe_name(pipe));
1114 }
1115
1116 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1117                                      enum pipe pipe, int reg)
1118 {
1119         u32 val = I915_READ(reg);
1120         WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1121              "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1122              reg, pipe_name(pipe));
1123 }
1124
1125 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1126                                       enum pipe pipe)
1127 {
1128         int reg;
1129         u32 val;
1130
1131         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1132         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1133         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1134
1135         reg = PCH_ADPA;
1136         val = I915_READ(reg);
1137         WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1138              "PCH VGA enabled on transcoder %c, should be disabled\n",
1139              pipe_name(pipe));
1140
1141         reg = PCH_LVDS;
1142         val = I915_READ(reg);
1143         WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1144              "PCH LVDS enabled on transcoder %c, should be disabled\n",
1145              pipe_name(pipe));
1146
1147         assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
1148         assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
1149         assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
1150 }
1151
1152 /**
1153  * intel_enable_pll - enable a PLL
1154  * @dev_priv: i915 private structure
1155  * @pipe: pipe PLL to enable
1156  *
1157  * Enable @pipe's PLL so we can start pumping pixels from a plane.  Check to
1158  * make sure the PLL reg is writable first though, since the panel write
1159  * protect mechanism may be enabled.
1160  *
1161  * Note!  This is for pre-ILK only.
1162  */
1163 static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1164 {
1165         int reg;
1166         u32 val;
1167
1168         /* No really, not for ILK+ */
1169         BUG_ON(dev_priv->info->gen >= 5);
1170
1171         /* PLL is protected by panel, make sure we can write it */
1172         if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1173                 assert_panel_unlocked(dev_priv, pipe);
1174
1175         reg = DPLL(pipe);
1176         val = I915_READ(reg);
1177         val |= DPLL_VCO_ENABLE;
1178
1179         /* We do this three times for luck */
1180         I915_WRITE(reg, val);
1181         POSTING_READ(reg);
1182         udelay(150); /* wait for warmup */
1183         I915_WRITE(reg, val);
1184         POSTING_READ(reg);
1185         udelay(150); /* wait for warmup */
1186         I915_WRITE(reg, val);
1187         POSTING_READ(reg);
1188         udelay(150); /* wait for warmup */
1189 }
1190
1191 /**
1192  * intel_disable_pll - disable a PLL
1193  * @dev_priv: i915 private structure
1194  * @pipe: pipe PLL to disable
1195  *
1196  * Disable the PLL for @pipe, making sure the pipe is off first.
1197  *
1198  * Note!  This is for pre-ILK only.
1199  */
1200 static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1201 {
1202         int reg;
1203         u32 val;
1204
1205         /* Don't disable pipe A or pipe A PLLs if needed */
1206         if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1207                 return;
1208
1209         /* Make sure the pipe isn't still relying on us */
1210         assert_pipe_disabled(dev_priv, pipe);
1211
1212         reg = DPLL(pipe);
1213         val = I915_READ(reg);
1214         val &= ~DPLL_VCO_ENABLE;
1215         I915_WRITE(reg, val);
1216         POSTING_READ(reg);
1217 }
1218
1219 /**
1220  * intel_enable_pch_pll - enable PCH PLL
1221  * @dev_priv: i915 private structure
1222  * @pipe: pipe PLL to enable
1223  *
1224  * The PCH PLL needs to be enabled before the PCH transcoder, since it
1225  * drives the transcoder clock.
1226  */
1227 static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
1228                                  enum pipe pipe)
1229 {
1230         int reg;
1231         u32 val;
1232
1233         if (pipe > 1)
1234                 return;
1235
1236         /* PCH only available on ILK+ */
1237         BUG_ON(dev_priv->info->gen < 5);
1238
1239         /* PCH refclock must be enabled first */
1240         assert_pch_refclk_enabled(dev_priv);
1241
1242         reg = PCH_DPLL(pipe);
1243         val = I915_READ(reg);
1244         val |= DPLL_VCO_ENABLE;
1245         I915_WRITE(reg, val);
1246         POSTING_READ(reg);
1247         udelay(200);
1248 }
1249
1250 static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
1251                                   enum pipe pipe)
1252 {
1253         int reg;
1254         u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL,
1255                 pll_sel = TRANSC_DPLL_ENABLE;
1256
1257         if (pipe > 1)
1258                 return;
1259
1260         /* PCH only available on ILK+ */
1261         BUG_ON(dev_priv->info->gen < 5);
1262
1263         /* Make sure transcoder isn't still depending on us */
1264         assert_transcoder_disabled(dev_priv, pipe);
1265
1266         if (pipe == 0)
1267                 pll_sel |= TRANSC_DPLLA_SEL;
1268         else if (pipe == 1)
1269                 pll_sel |= TRANSC_DPLLB_SEL;
1270
1271
1272         if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel)
1273                 return;
1274
1275         reg = PCH_DPLL(pipe);
1276         val = I915_READ(reg);
1277         val &= ~DPLL_VCO_ENABLE;
1278         I915_WRITE(reg, val);
1279         POSTING_READ(reg);
1280         udelay(200);
1281 }
1282
1283 static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1284                                     enum pipe pipe)
1285 {
1286         int reg;
1287         u32 val, pipeconf_val;
1288         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1289
1290         /* PCH only available on ILK+ */
1291         BUG_ON(dev_priv->info->gen < 5);
1292
1293         /* Make sure PCH DPLL is enabled */
1294         assert_pch_pll_enabled(dev_priv, pipe);
1295
1296         /* FDI must be feeding us bits for PCH ports */
1297         assert_fdi_tx_enabled(dev_priv, pipe);
1298         assert_fdi_rx_enabled(dev_priv, pipe);
1299
1300         reg = TRANSCONF(pipe);
1301         val = I915_READ(reg);
1302         pipeconf_val = I915_READ(PIPECONF(pipe));
1303
1304         if (HAS_PCH_IBX(dev_priv->dev)) {
1305                 /*
1306                  * make the BPC in transcoder be consistent with
1307                  * that in pipeconf reg.
1308                  */
1309                 val &= ~PIPE_BPC_MASK;
1310                 val |= pipeconf_val & PIPE_BPC_MASK;
1311         }
1312
1313         val &= ~TRANS_INTERLACE_MASK;
1314         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1315                 if (HAS_PCH_IBX(dev_priv->dev) &&
1316                     intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1317                         val |= TRANS_LEGACY_INTERLACED_ILK;
1318                 else
1319                         val |= TRANS_INTERLACED;
1320         else
1321                 val |= TRANS_PROGRESSIVE;
1322
1323         I915_WRITE(reg, val | TRANS_ENABLE);
1324         if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1325                 DRM_ERROR("failed to enable transcoder %d\n", pipe);
1326 }
1327
1328 static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
1329                                      enum pipe pipe)
1330 {
1331         int reg;
1332         u32 val;
1333
1334         /* FDI relies on the transcoder */
1335         assert_fdi_tx_disabled(dev_priv, pipe);
1336         assert_fdi_rx_disabled(dev_priv, pipe);
1337
1338         /* Ports must be off as well */
1339         assert_pch_ports_disabled(dev_priv, pipe);
1340
1341         reg = TRANSCONF(pipe);
1342         val = I915_READ(reg);
1343         val &= ~TRANS_ENABLE;
1344         I915_WRITE(reg, val);
1345         /* wait for PCH transcoder off, transcoder state */
1346         if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1347                 DRM_ERROR("failed to disable transcoder %d\n", pipe);
1348 }
1349
1350 /**
1351  * intel_enable_pipe - enable a pipe, asserting requirements
1352  * @dev_priv: i915 private structure
1353  * @pipe: pipe to enable
1354  * @pch_port: on ILK+, is this pipe driving a PCH port or not
1355  *
1356  * Enable @pipe, making sure that various hardware specific requirements
1357  * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1358  *
1359  * @pipe should be %PIPE_A or %PIPE_B.
1360  *
1361  * Will wait until the pipe is actually running (i.e. first vblank) before
1362  * returning.
1363  */
1364 static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1365                               bool pch_port)
1366 {
1367         int reg;
1368         u32 val;
1369
1370         /*
1371          * A pipe without a PLL won't actually be able to drive bits from
1372          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1373          * need the check.
1374          */
1375         if (!HAS_PCH_SPLIT(dev_priv->dev))
1376                 assert_pll_enabled(dev_priv, pipe);
1377         else {
1378                 if (pch_port) {
1379                         /* if driving the PCH, we need FDI enabled */
1380                         assert_fdi_rx_pll_enabled(dev_priv, pipe);
1381                         assert_fdi_tx_pll_enabled(dev_priv, pipe);
1382                 }
1383                 /* FIXME: assert CPU port conditions for SNB+ */
1384         }
1385
1386         reg = PIPECONF(pipe);
1387         val = I915_READ(reg);
1388         if (val & PIPECONF_ENABLE)
1389                 return;
1390
1391         I915_WRITE(reg, val | PIPECONF_ENABLE);
1392         intel_wait_for_vblank(dev_priv->dev, pipe);
1393 }
1394
1395 /**
1396  * intel_disable_pipe - disable a pipe, asserting requirements
1397  * @dev_priv: i915 private structure
1398  * @pipe: pipe to disable
1399  *
1400  * Disable @pipe, making sure that various hardware specific requirements
1401  * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1402  *
1403  * @pipe should be %PIPE_A or %PIPE_B.
1404  *
1405  * Will wait until the pipe has shut down before returning.
1406  */
1407 static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1408                                enum pipe pipe)
1409 {
1410         int reg;
1411         u32 val;
1412
1413         /*
1414          * Make sure planes won't keep trying to pump pixels to us,
1415          * or we might hang the display.
1416          */
1417         assert_planes_disabled(dev_priv, pipe);
1418
1419         /* Don't disable pipe A or pipe A PLLs if needed */
1420         if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1421                 return;
1422
1423         reg = PIPECONF(pipe);
1424         val = I915_READ(reg);
1425         if ((val & PIPECONF_ENABLE) == 0)
1426                 return;
1427
1428         I915_WRITE(reg, val & ~PIPECONF_ENABLE);
1429         intel_wait_for_pipe_off(dev_priv->dev, pipe);
1430 }
1431
1432 /*
1433  * Plane regs are double buffered, going from enabled->disabled needs a
1434  * trigger in order to latch.  The display address reg provides this.
1435  */
1436 static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1437                                       enum plane plane)
1438 {
1439         I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1440         I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1441 }
1442
1443 /**
1444  * intel_enable_plane - enable a display plane on a given pipe
1445  * @dev_priv: i915 private structure
1446  * @plane: plane to enable
1447  * @pipe: pipe being fed
1448  *
1449  * Enable @plane on @pipe, making sure that @pipe is running first.
1450  */
1451 static void intel_enable_plane(struct drm_i915_private *dev_priv,
1452                                enum plane plane, enum pipe pipe)
1453 {
1454         int reg;
1455         u32 val;
1456
1457         /* If the pipe isn't enabled, we can't pump pixels and may hang */
1458         assert_pipe_enabled(dev_priv, pipe);
1459
1460         reg = DSPCNTR(plane);
1461         val = I915_READ(reg);
1462         if (val & DISPLAY_PLANE_ENABLE)
1463                 return;
1464
1465         I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
1466         intel_flush_display_plane(dev_priv, plane);
1467         intel_wait_for_vblank(dev_priv->dev, pipe);
1468 }
1469
1470 /**
1471  * intel_disable_plane - disable a display plane
1472  * @dev_priv: i915 private structure
1473  * @plane: plane to disable
1474  * @pipe: pipe consuming the data
1475  *
1476  * Disable @plane; should be an independent operation.
1477  */
1478 static void intel_disable_plane(struct drm_i915_private *dev_priv,
1479                                 enum plane plane, enum pipe pipe)
1480 {
1481         int reg;
1482         u32 val;
1483
1484         reg = DSPCNTR(plane);
1485         val = I915_READ(reg);
1486         if ((val & DISPLAY_PLANE_ENABLE) == 0)
1487                 return;
1488
1489         I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
1490         intel_flush_display_plane(dev_priv, plane);
1491         intel_wait_for_vblank(dev_priv->dev, pipe);
1492 }
1493
1494 static void disable_pch_dp(struct drm_i915_private *dev_priv,
1495                            enum pipe pipe, int reg, u32 port_sel)
1496 {
1497         u32 val = I915_READ(reg);
1498         if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
1499                 DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
1500                 I915_WRITE(reg, val & ~DP_PORT_EN);
1501         }
1502 }
1503
1504 static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
1505                              enum pipe pipe, int reg)
1506 {
1507         u32 val = I915_READ(reg);
1508         if (hdmi_pipe_enabled(dev_priv, pipe, val)) {
1509                 DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
1510                               reg, pipe);
1511                 I915_WRITE(reg, val & ~PORT_ENABLE);
1512         }
1513 }
1514
1515 /* Disable any ports connected to this transcoder */
1516 static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1517                                     enum pipe pipe)
1518 {
1519         u32 reg, val;
1520
1521         val = I915_READ(PCH_PP_CONTROL);
1522         I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
1523
1524         disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1525         disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1526         disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1527
1528         reg = PCH_ADPA;
1529         val = I915_READ(reg);
1530         if (adpa_pipe_enabled(dev_priv, pipe, val))
1531                 I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
1532
1533         reg = PCH_LVDS;
1534         val = I915_READ(reg);
1535         if (lvds_pipe_enabled(dev_priv, pipe, val)) {
1536                 DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
1537                 I915_WRITE(reg, val & ~LVDS_PORT_EN);
1538                 POSTING_READ(reg);
1539                 udelay(100);
1540         }
1541
1542         disable_pch_hdmi(dev_priv, pipe, HDMIB);
1543         disable_pch_hdmi(dev_priv, pipe, HDMIC);
1544         disable_pch_hdmi(dev_priv, pipe, HDMID);
1545 }
1546
1547 static void i8xx_disable_fbc(struct drm_device *dev)
1548 {
1549         struct drm_i915_private *dev_priv = dev->dev_private;
1550         u32 fbc_ctl;
1551
1552         /* Disable compression */
1553         fbc_ctl = I915_READ(FBC_CONTROL);
1554         if ((fbc_ctl & FBC_CTL_EN) == 0)
1555                 return;
1556
1557         fbc_ctl &= ~FBC_CTL_EN;
1558         I915_WRITE(FBC_CONTROL, fbc_ctl);
1559
1560         /* Wait for compressing bit to clear */
1561         if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
1562                 DRM_DEBUG_KMS("FBC idle timed out\n");
1563                 return;
1564         }
1565
1566         DRM_DEBUG_KMS("disabled FBC\n");
1567 }
1568
1569 static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1570 {
1571         struct drm_device *dev = crtc->dev;
1572         struct drm_i915_private *dev_priv = dev->dev_private;
1573         struct drm_framebuffer *fb = crtc->fb;
1574         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1575         struct drm_i915_gem_object *obj = intel_fb->obj;
1576         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1577         int cfb_pitch;
1578         int plane, i;
1579         u32 fbc_ctl, fbc_ctl2;
1580
1581         cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
1582         if (fb->pitches[0] < cfb_pitch)
1583                 cfb_pitch = fb->pitches[0];
1584
1585         /* FBC_CTL wants 64B units */
1586         cfb_pitch = (cfb_pitch / 64) - 1;
1587         plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
1588
1589         /* Clear old tags */
1590         for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
1591                 I915_WRITE(FBC_TAG + (i * 4), 0);
1592
1593         /* Set it up... */
1594         fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
1595         fbc_ctl2 |= plane;
1596         I915_WRITE(FBC_CONTROL2, fbc_ctl2);
1597         I915_WRITE(FBC_FENCE_OFF, crtc->y);
1598
1599         /* enable it... */
1600         fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1601         if (IS_I945GM(dev))
1602                 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
1603         fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
1604         fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
1605         fbc_ctl |= obj->fence_reg;
1606         I915_WRITE(FBC_CONTROL, fbc_ctl);
1607
1608         DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
1609                       cfb_pitch, crtc->y, intel_crtc->plane);
1610 }
1611
1612 static bool i8xx_fbc_enabled(struct drm_device *dev)
1613 {
1614         struct drm_i915_private *dev_priv = dev->dev_private;
1615
1616         return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
1617 }
1618
1619 static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1620 {
1621         struct drm_device *dev = crtc->dev;
1622         struct drm_i915_private *dev_priv = dev->dev_private;
1623         struct drm_framebuffer *fb = crtc->fb;
1624         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1625         struct drm_i915_gem_object *obj = intel_fb->obj;
1626         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1627         int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1628         unsigned long stall_watermark = 200;
1629         u32 dpfc_ctl;
1630
1631         dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
1632         dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
1633         I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
1634
1635         I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1636                    (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1637                    (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1638         I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
1639
1640         /* enable it... */
1641         I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
1642
1643         DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1644 }
1645
1646 static void g4x_disable_fbc(struct drm_device *dev)
1647 {
1648         struct drm_i915_private *dev_priv = dev->dev_private;
1649         u32 dpfc_ctl;
1650
1651         /* Disable compression */
1652         dpfc_ctl = I915_READ(DPFC_CONTROL);
1653         if (dpfc_ctl & DPFC_CTL_EN) {
1654                 dpfc_ctl &= ~DPFC_CTL_EN;
1655                 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1656
1657                 DRM_DEBUG_KMS("disabled FBC\n");
1658         }
1659 }
1660
1661 static bool g4x_fbc_enabled(struct drm_device *dev)
1662 {
1663         struct drm_i915_private *dev_priv = dev->dev_private;
1664
1665         return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1666 }
1667
1668 static void sandybridge_blit_fbc_update(struct drm_device *dev)
1669 {
1670         struct drm_i915_private *dev_priv = dev->dev_private;
1671         u32 blt_ecoskpd;
1672
1673         /* Make sure blitter notifies FBC of writes */
1674         gen6_gt_force_wake_get(dev_priv);
1675         blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
1676         blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
1677                 GEN6_BLITTER_LOCK_SHIFT;
1678         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1679         blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
1680         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1681         blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
1682                          GEN6_BLITTER_LOCK_SHIFT);
1683         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1684         POSTING_READ(GEN6_BLITTER_ECOSKPD);
1685         gen6_gt_force_wake_put(dev_priv);
1686 }
1687
1688 static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1689 {
1690         struct drm_device *dev = crtc->dev;
1691         struct drm_i915_private *dev_priv = dev->dev_private;
1692         struct drm_framebuffer *fb = crtc->fb;
1693         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1694         struct drm_i915_gem_object *obj = intel_fb->obj;
1695         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1696         int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1697         unsigned long stall_watermark = 200;
1698         u32 dpfc_ctl;
1699
1700         dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1701         dpfc_ctl &= DPFC_RESERVED;
1702         dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
1703         /* Set persistent mode for front-buffer rendering, ala X. */
1704         dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
1705         dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
1706         I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
1707
1708         I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1709                    (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1710                    (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1711         I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
1712         I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
1713         /* enable it... */
1714         I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
1715
1716         if (IS_GEN6(dev)) {
1717                 I915_WRITE(SNB_DPFC_CTL_SA,
1718                            SNB_CPU_FENCE_ENABLE | obj->fence_reg);
1719                 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
1720                 sandybridge_blit_fbc_update(dev);
1721         }
1722
1723         DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1724 }
1725
1726 static void ironlake_disable_fbc(struct drm_device *dev)
1727 {
1728         struct drm_i915_private *dev_priv = dev->dev_private;
1729         u32 dpfc_ctl;
1730
1731         /* Disable compression */
1732         dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1733         if (dpfc_ctl & DPFC_CTL_EN) {
1734                 dpfc_ctl &= ~DPFC_CTL_EN;
1735                 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
1736
1737                 DRM_DEBUG_KMS("disabled FBC\n");
1738         }
1739 }
1740
1741 static bool ironlake_fbc_enabled(struct drm_device *dev)
1742 {
1743         struct drm_i915_private *dev_priv = dev->dev_private;
1744
1745         return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
1746 }
1747
1748 bool intel_fbc_enabled(struct drm_device *dev)
1749 {
1750         struct drm_i915_private *dev_priv = dev->dev_private;
1751
1752         if (!dev_priv->display.fbc_enabled)
1753                 return false;
1754
1755         return dev_priv->display.fbc_enabled(dev);
1756 }
1757
1758 static void intel_fbc_work_fn(struct work_struct *__work)
1759 {
1760         struct intel_fbc_work *work =
1761                 container_of(to_delayed_work(__work),
1762                              struct intel_fbc_work, work);
1763         struct drm_device *dev = work->crtc->dev;
1764         struct drm_i915_private *dev_priv = dev->dev_private;
1765
1766         mutex_lock(&dev->struct_mutex);
1767         if (work == dev_priv->fbc_work) {
1768                 /* Double check that we haven't switched fb without cancelling
1769                  * the prior work.
1770                  */
1771                 if (work->crtc->fb == work->fb) {
1772                         dev_priv->display.enable_fbc(work->crtc,
1773                                                      work->interval);
1774
1775                         dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
1776                         dev_priv->cfb_fb = work->crtc->fb->base.id;
1777                         dev_priv->cfb_y = work->crtc->y;
1778                 }
1779
1780                 dev_priv->fbc_work = NULL;
1781         }
1782         mutex_unlock(&dev->struct_mutex);
1783
1784         kfree(work);
1785 }
1786
1787 static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
1788 {
1789         if (dev_priv->fbc_work == NULL)
1790                 return;
1791
1792         DRM_DEBUG_KMS("cancelling pending FBC enable\n");
1793
1794         /* Synchronisation is provided by struct_mutex and checking of
1795          * dev_priv->fbc_work, so we can perform the cancellation
1796          * entirely asynchronously.
1797          */
1798         if (cancel_delayed_work(&dev_priv->fbc_work->work))
1799                 /* tasklet was killed before being run, clean up */
1800                 kfree(dev_priv->fbc_work);
1801
1802         /* Mark the work as no longer wanted so that if it does
1803          * wake-up (because the work was already running and waiting
1804          * for our mutex), it will discover that is no longer
1805          * necessary to run.
1806          */
1807         dev_priv->fbc_work = NULL;
1808 }
1809
1810 static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1811 {
1812         struct intel_fbc_work *work;
1813         struct drm_device *dev = crtc->dev;
1814         struct drm_i915_private *dev_priv = dev->dev_private;
1815
1816         if (!dev_priv->display.enable_fbc)
1817                 return;
1818
1819         intel_cancel_fbc_work(dev_priv);
1820
1821         work = kzalloc(sizeof *work, GFP_KERNEL);
1822         if (work == NULL) {
1823                 dev_priv->display.enable_fbc(crtc, interval);
1824                 return;
1825         }
1826
1827         work->crtc = crtc;
1828         work->fb = crtc->fb;
1829         work->interval = interval;
1830         INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
1831
1832         dev_priv->fbc_work = work;
1833
1834         DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
1835
1836         /* Delay the actual enabling to let pageflipping cease and the
1837          * display to settle before starting the compression. Note that
1838          * this delay also serves a second purpose: it allows for a
1839          * vblank to pass after disabling the FBC before we attempt
1840          * to modify the control registers.
1841          *
1842          * A more complicated solution would involve tracking vblanks
1843          * following the termination of the page-flipping sequence
1844          * and indeed performing the enable as a co-routine and not
1845          * waiting synchronously upon the vblank.
1846          */
1847         schedule_delayed_work(&work->work, msecs_to_jiffies(50));
1848 }
1849
1850 void intel_disable_fbc(struct drm_device *dev)
1851 {
1852         struct drm_i915_private *dev_priv = dev->dev_private;
1853
1854         intel_cancel_fbc_work(dev_priv);
1855
1856         if (!dev_priv->display.disable_fbc)
1857                 return;
1858
1859         dev_priv->display.disable_fbc(dev);
1860         dev_priv->cfb_plane = -1;
1861 }
1862
1863 /**
1864  * intel_update_fbc - enable/disable FBC as needed
1865  * @dev: the drm_device
1866  *
1867  * Set up the framebuffer compression hardware at mode set time.  We
1868  * enable it if possible:
1869  *   - plane A only (on pre-965)
1870  *   - no pixel mulitply/line duplication
1871  *   - no alpha buffer discard
1872  *   - no dual wide
1873  *   - framebuffer <= 2048 in width, 1536 in height
1874  *
1875  * We can't assume that any compression will take place (worst case),
1876  * so the compressed buffer has to be the same size as the uncompressed
1877  * one.  It also must reside (along with the line length buffer) in
1878  * stolen memory.
1879  *
1880  * We need to enable/disable FBC on a global basis.
1881  */
1882 static void intel_update_fbc(struct drm_device *dev)
1883 {
1884         struct drm_i915_private *dev_priv = dev->dev_private;
1885         struct drm_crtc *crtc = NULL, *tmp_crtc;
1886         struct intel_crtc *intel_crtc;
1887         struct drm_framebuffer *fb;
1888         struct intel_framebuffer *intel_fb;
1889         struct drm_i915_gem_object *obj;
1890         int enable_fbc;
1891
1892         DRM_DEBUG_KMS("\n");
1893
1894         if (!i915_powersave)
1895                 return;
1896
1897         if (!I915_HAS_FBC(dev))
1898                 return;
1899
1900         /*
1901          * If FBC is already on, we just have to verify that we can
1902          * keep it that way...
1903          * Need to disable if:
1904          *   - more than one pipe is active
1905          *   - changing FBC params (stride, fence, mode)
1906          *   - new fb is too large to fit in compressed buffer
1907          *   - going to an unsupported config (interlace, pixel multiply, etc.)
1908          */
1909         list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
1910                 if (tmp_crtc->enabled && tmp_crtc->fb) {
1911                         if (crtc) {
1912                                 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
1913                                 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
1914                                 goto out_disable;
1915                         }
1916                         crtc = tmp_crtc;
1917                 }
1918         }
1919
1920         if (!crtc || crtc->fb == NULL) {
1921                 DRM_DEBUG_KMS("no output, disabling\n");
1922                 dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
1923                 goto out_disable;
1924         }
1925
1926         intel_crtc = to_intel_crtc(crtc);
1927         fb = crtc->fb;
1928         intel_fb = to_intel_framebuffer(fb);
1929         obj = intel_fb->obj;
1930
1931         enable_fbc = i915_enable_fbc;
1932         if (enable_fbc < 0) {
1933                 DRM_DEBUG_KMS("fbc set to per-chip default\n");
1934                 enable_fbc = 1;
1935                 if (INTEL_INFO(dev)->gen <= 6)
1936                         enable_fbc = 0;
1937         }
1938         if (!enable_fbc) {
1939                 DRM_DEBUG_KMS("fbc disabled per module param\n");
1940                 dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
1941                 goto out_disable;
1942         }
1943         if (intel_fb->obj->base.size > dev_priv->cfb_size) {
1944                 DRM_DEBUG_KMS("framebuffer too large, disabling "
1945                               "compression\n");
1946                 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1947                 goto out_disable;
1948         }
1949         if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
1950             (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
1951                 DRM_DEBUG_KMS("mode incompatible with compression, "
1952                               "disabling\n");
1953                 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
1954                 goto out_disable;
1955         }
1956         if ((crtc->mode.hdisplay > 2048) ||
1957             (crtc->mode.vdisplay > 1536)) {
1958                 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
1959                 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
1960                 goto out_disable;
1961         }
1962         if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
1963                 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
1964                 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
1965                 goto out_disable;
1966         }
1967
1968         /* The use of a CPU fence is mandatory in order to detect writes
1969          * by the CPU to the scanout and trigger updates to the FBC.
1970          */
1971         if (obj->tiling_mode != I915_TILING_X ||
1972             obj->fence_reg == I915_FENCE_REG_NONE) {
1973                 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
1974                 dev_priv->no_fbc_reason = FBC_NOT_TILED;
1975                 goto out_disable;
1976         }
1977
1978         /* If the kernel debugger is active, always disable compression */
1979         if (in_dbg_master())
1980                 goto out_disable;
1981
1982         /* If the scanout has not changed, don't modify the FBC settings.
1983          * Note that we make the fundamental assumption that the fb->obj
1984          * cannot be unpinned (and have its GTT offset and fence revoked)
1985          * without first being decoupled from the scanout and FBC disabled.
1986          */
1987         if (dev_priv->cfb_plane == intel_crtc->plane &&
1988             dev_priv->cfb_fb == fb->base.id &&
1989             dev_priv->cfb_y == crtc->y)
1990                 return;
1991
1992         if (intel_fbc_enabled(dev)) {
1993                 /* We update FBC along two paths, after changing fb/crtc
1994                  * configuration (modeswitching) and after page-flipping
1995                  * finishes. For the latter, we know that not only did
1996                  * we disable the FBC at the start of the page-flip
1997                  * sequence, but also more than one vblank has passed.
1998                  *
1999                  * For the former case of modeswitching, it is possible
2000                  * to switch between two FBC valid configurations
2001                  * instantaneously so we do need to disable the FBC
2002                  * before we can modify its control registers. We also
2003                  * have to wait for the next vblank for that to take
2004                  * effect. However, since we delay enabling FBC we can
2005                  * assume that a vblank has passed since disabling and
2006                  * that we can safely alter the registers in the deferred
2007                  * callback.
2008                  *
2009                  * In the scenario that we go from a valid to invalid
2010                  * and then back to valid FBC configuration we have
2011                  * no strict enforcement that a vblank occurred since
2012                  * disabling the FBC. However, along all current pipe
2013                  * disabling paths we do need to wait for a vblank at
2014                  * some point. And we wait before enabling FBC anyway.
2015                  */
2016                 DRM_DEBUG_KMS("disabling active FBC for update\n");
2017                 intel_disable_fbc(dev);
2018         }
2019
2020         intel_enable_fbc(crtc, 500);
2021         return;
2022
2023 out_disable:
2024         /* Multiple disables should be harmless */
2025         if (intel_fbc_enabled(dev)) {
2026                 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
2027                 intel_disable_fbc(dev);
2028         }
2029 }
2030
2031 int
2032 intel_pin_and_fence_fb_obj(struct drm_device *dev,
2033                            struct drm_i915_gem_object *obj,
2034                            struct intel_ring_buffer *pipelined)
2035 {
2036         struct drm_i915_private *dev_priv = dev->dev_private;
2037         u32 alignment;
2038         int ret;
2039
2040         switch (obj->tiling_mode) {
2041         case I915_TILING_NONE:
2042                 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
2043                         alignment = 128 * 1024;
2044                 else if (INTEL_INFO(dev)->gen >= 4)
2045                         alignment = 4 * 1024;
2046                 else
2047                         alignment = 64 * 1024;
2048                 break;
2049         case I915_TILING_X:
2050                 /* pin() will align the object as required by fence */
2051                 alignment = 0;
2052                 break;
2053         case I915_TILING_Y:
2054                 /* FIXME: Is this true? */
2055                 DRM_ERROR("Y tiled not allowed for scan out buffers\n");
2056                 return -EINVAL;
2057         default:
2058                 BUG();
2059         }
2060
2061         dev_priv->mm.interruptible = false;
2062         ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
2063         if (ret)
2064                 goto err_interruptible;
2065
2066         /* Install a fence for tiled scan-out. Pre-i965 always needs a
2067          * fence, whereas 965+ only requires a fence if using
2068          * framebuffer compression.  For simplicity, we always install
2069          * a fence as the cost is not that onerous.
2070          */
2071         if (obj->tiling_mode != I915_TILING_NONE) {
2072                 ret = i915_gem_object_get_fence(obj, pipelined);
2073                 if (ret)
2074                         goto err_unpin;
2075
2076                 i915_gem_object_pin_fence(obj);
2077         }
2078
2079         dev_priv->mm.interruptible = true;
2080         return 0;
2081
2082 err_unpin:
2083         i915_gem_object_unpin(obj);
2084 err_interruptible:
2085         dev_priv->mm.interruptible = true;
2086         return ret;
2087 }
2088
2089 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
2090 {
2091         i915_gem_object_unpin_fence(obj);
2092         i915_gem_object_unpin(obj);
2093 }
2094
2095 static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2096                              int x, int y)
2097 {
2098         struct drm_device *dev = crtc->dev;
2099         struct drm_i915_private *dev_priv = dev->dev_private;
2100         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2101         struct intel_framebuffer *intel_fb;
2102         struct drm_i915_gem_object *obj;
2103         int plane = intel_crtc->plane;
2104         unsigned long Start, Offset;
2105         u32 dspcntr;
2106         u32 reg;
2107
2108         switch (plane) {
2109         case 0:
2110         case 1:
2111                 break;
2112         default:
2113                 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2114                 return -EINVAL;
2115         }
2116
2117         intel_fb = to_intel_framebuffer(fb);
2118         obj = intel_fb->obj;
2119
2120         reg = DSPCNTR(plane);
2121         dspcntr = I915_READ(reg);
2122         /* Mask out pixel format bits in case we change it */
2123         dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2124         switch (fb->bits_per_pixel) {
2125         case 8:
2126                 dspcntr |= DISPPLANE_8BPP;
2127                 break;
2128         case 16:
2129                 if (fb->depth == 15)
2130                         dspcntr |= DISPPLANE_15_16BPP;
2131                 else
2132                         dspcntr |= DISPPLANE_16BPP;
2133                 break;
2134         case 24:
2135         case 32:
2136                 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2137                 break;
2138         default:
2139                 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2140                 return -EINVAL;
2141         }
2142         if (INTEL_INFO(dev)->gen >= 4) {
2143                 if (obj->tiling_mode != I915_TILING_NONE)
2144                         dspcntr |= DISPPLANE_TILED;
2145                 else
2146                         dspcntr &= ~DISPPLANE_TILED;
2147         }
2148
2149         I915_WRITE(reg, dspcntr);
2150
2151         Start = obj->gtt_offset;
2152         Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2153
2154         DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2155                       Start, Offset, x, y, fb->pitches[0]);
2156         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2157         if (INTEL_INFO(dev)->gen >= 4) {
2158                 I915_WRITE(DSPSURF(plane), Start);
2159                 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2160                 I915_WRITE(DSPADDR(plane), Offset);
2161         } else
2162                 I915_WRITE(DSPADDR(plane), Start + Offset);
2163         POSTING_READ(reg);
2164
2165         return 0;
2166 }
2167
2168 static int ironlake_update_plane(struct drm_crtc *crtc,
2169                                  struct drm_framebuffer *fb, int x, int y)
2170 {
2171         struct drm_device *dev = crtc->dev;
2172         struct drm_i915_private *dev_priv = dev->dev_private;
2173         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2174         struct intel_framebuffer *intel_fb;
2175         struct drm_i915_gem_object *obj;
2176         int plane = intel_crtc->plane;
2177         unsigned long Start, Offset;
2178         u32 dspcntr;
2179         u32 reg;
2180
2181         switch (plane) {
2182         case 0:
2183         case 1:
2184         case 2:
2185                 break;
2186         default:
2187                 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2188                 return -EINVAL;
2189         }
2190
2191         intel_fb = to_intel_framebuffer(fb);
2192         obj = intel_fb->obj;
2193
2194         reg = DSPCNTR(plane);
2195         dspcntr = I915_READ(reg);
2196         /* Mask out pixel format bits in case we change it */
2197         dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2198         switch (fb->bits_per_pixel) {
2199         case 8:
2200                 dspcntr |= DISPPLANE_8BPP;
2201                 break;
2202         case 16:
2203                 if (fb->depth != 16)
2204                         return -EINVAL;
2205
2206                 dspcntr |= DISPPLANE_16BPP;
2207                 break;
2208         case 24:
2209         case 32:
2210                 if (fb->depth == 24)
2211                         dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2212                 else if (fb->depth == 30)
2213                         dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
2214                 else
2215                         return -EINVAL;
2216                 break;
2217         default:
2218                 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2219                 return -EINVAL;
2220         }
2221
2222         if (obj->tiling_mode != I915_TILING_NONE)
2223                 dspcntr |= DISPPLANE_TILED;
2224         else
2225                 dspcntr &= ~DISPPLANE_TILED;
2226
2227         /* must disable */
2228         dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2229
2230         I915_WRITE(reg, dspcntr);
2231
2232         Start = obj->gtt_offset;
2233         Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2234
2235         DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2236                       Start, Offset, x, y, fb->pitches[0]);
2237         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2238         I915_WRITE(DSPSURF(plane), Start);
2239         I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2240         I915_WRITE(DSPADDR(plane), Offset);
2241         POSTING_READ(reg);
2242
2243         return 0;
2244 }
2245
2246 /* Assume fb object is pinned & idle & fenced and just update base pointers */
2247 static int
2248 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2249                            int x, int y, enum mode_set_atomic state)
2250 {
2251         struct drm_device *dev = crtc->dev;
2252         struct drm_i915_private *dev_priv = dev->dev_private;
2253         int ret;
2254
2255         ret = dev_priv->display.update_plane(crtc, fb, x, y);
2256         if (ret)
2257                 return ret;
2258
2259         intel_update_fbc(dev);
2260         intel_increase_pllclock(crtc);
2261
2262         return 0;
2263 }
2264
2265 static int
2266 intel_finish_fb(struct drm_framebuffer *old_fb)
2267 {
2268         struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2269         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2270         bool was_interruptible = dev_priv->mm.interruptible;
2271         int ret;
2272
2273         wait_event(dev_priv->pending_flip_queue,
2274                    atomic_read(&dev_priv->mm.wedged) ||
2275                    atomic_read(&obj->pending_flip) == 0);
2276
2277         /* Big Hammer, we also need to ensure that any pending
2278          * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2279          * current scanout is retired before unpinning the old
2280          * framebuffer.
2281          *
2282          * This should only fail upon a hung GPU, in which case we
2283          * can safely continue.
2284          */
2285         dev_priv->mm.interruptible = false;
2286         ret = i915_gem_object_finish_gpu(obj);
2287         dev_priv->mm.interruptible = was_interruptible;
2288
2289         return ret;
2290 }
2291
2292 static int
2293 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2294                     struct drm_framebuffer *old_fb)
2295 {
2296         struct drm_device *dev = crtc->dev;
2297         struct drm_i915_master_private *master_priv;
2298         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2299         int ret;
2300
2301         /* no fb bound */
2302         if (!crtc->fb) {
2303                 DRM_ERROR("No FB bound\n");
2304                 return 0;
2305         }
2306
2307         switch (intel_crtc->plane) {
2308         case 0:
2309         case 1:
2310                 break;
2311         case 2:
2312                 if (IS_IVYBRIDGE(dev))
2313                         break;
2314                 /* fall through otherwise */
2315         default:
2316                 DRM_ERROR("no plane for crtc\n");
2317                 return -EINVAL;
2318         }
2319
2320         mutex_lock(&dev->struct_mutex);
2321         ret = intel_pin_and_fence_fb_obj(dev,
2322                                          to_intel_framebuffer(crtc->fb)->obj,
2323                                          NULL);
2324         if (ret != 0) {
2325                 mutex_unlock(&dev->struct_mutex);
2326                 DRM_ERROR("pin & fence failed\n");
2327                 return ret;
2328         }
2329
2330         if (old_fb)
2331                 intel_finish_fb(old_fb);
2332
2333         ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
2334                                          LEAVE_ATOMIC_MODE_SET);
2335         if (ret) {
2336                 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
2337                 mutex_unlock(&dev->struct_mutex);
2338                 DRM_ERROR("failed to update base address\n");
2339                 return ret;
2340         }
2341
2342         if (old_fb) {
2343                 intel_wait_for_vblank(dev, intel_crtc->pipe);
2344                 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
2345         }
2346
2347         mutex_unlock(&dev->struct_mutex);
2348
2349         if (!dev->primary->master)
2350                 return 0;
2351
2352         master_priv = dev->primary->master->driver_priv;
2353         if (!master_priv->sarea_priv)
2354                 return 0;
2355
2356         if (intel_crtc->pipe) {
2357                 master_priv->sarea_priv->pipeB_x = x;
2358                 master_priv->sarea_priv->pipeB_y = y;
2359         } else {
2360                 master_priv->sarea_priv->pipeA_x = x;
2361                 master_priv->sarea_priv->pipeA_y = y;
2362         }
2363
2364         return 0;
2365 }
2366
2367 static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
2368 {
2369         struct drm_device *dev = crtc->dev;
2370         struct drm_i915_private *dev_priv = dev->dev_private;
2371         u32 dpa_ctl;
2372
2373         DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
2374         dpa_ctl = I915_READ(DP_A);
2375         dpa_ctl &= ~DP_PLL_FREQ_MASK;
2376
2377         if (clock < 200000) {
2378                 u32 temp;
2379                 dpa_ctl |= DP_PLL_FREQ_160MHZ;
2380                 /* workaround for 160Mhz:
2381                    1) program 0x4600c bits 15:0 = 0x8124
2382                    2) program 0x46010 bit 0 = 1
2383                    3) program 0x46034 bit 24 = 1
2384                    4) program 0x64000 bit 14 = 1
2385                    */
2386                 temp = I915_READ(0x4600c);
2387                 temp &= 0xffff0000;
2388                 I915_WRITE(0x4600c, temp | 0x8124);
2389
2390                 temp = I915_READ(0x46010);
2391                 I915_WRITE(0x46010, temp | 1);
2392
2393                 temp = I915_READ(0x46034);
2394                 I915_WRITE(0x46034, temp | (1 << 24));
2395         } else {
2396                 dpa_ctl |= DP_PLL_FREQ_270MHZ;
2397         }
2398         I915_WRITE(DP_A, dpa_ctl);
2399
2400         POSTING_READ(DP_A);
2401         udelay(500);
2402 }
2403
2404 static void intel_fdi_normal_train(struct drm_crtc *crtc)
2405 {
2406         struct drm_device *dev = crtc->dev;
2407         struct drm_i915_private *dev_priv = dev->dev_private;
2408         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2409         int pipe = intel_crtc->pipe;
2410         u32 reg, temp;
2411
2412         /* enable normal train */
2413         reg = FDI_TX_CTL(pipe);
2414         temp = I915_READ(reg);
2415         if (IS_IVYBRIDGE(dev)) {
2416                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2417                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2418         } else {
2419                 temp &= ~FDI_LINK_TRAIN_NONE;
2420                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2421         }
2422         I915_WRITE(reg, temp);
2423
2424         reg = FDI_RX_CTL(pipe);
2425         temp = I915_READ(reg);
2426         if (HAS_PCH_CPT(dev)) {
2427                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2428                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2429         } else {
2430                 temp &= ~FDI_LINK_TRAIN_NONE;
2431                 temp |= FDI_LINK_TRAIN_NONE;
2432         }
2433         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2434
2435         /* wait one idle pattern time */
2436         POSTING_READ(reg);
2437         udelay(1000);
2438
2439         /* IVB wants error correction enabled */
2440         if (IS_IVYBRIDGE(dev))
2441                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2442                            FDI_FE_ERRC_ENABLE);
2443 }
2444
2445 /* The FDI link training functions for ILK/Ibexpeak. */
2446 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2447 {
2448         struct drm_device *dev = crtc->dev;
2449         struct drm_i915_private *dev_priv = dev->dev_private;
2450         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2451         int pipe = intel_crtc->pipe;
2452         int plane = intel_crtc->plane;
2453         u32 reg, temp, tries;
2454
2455         /* FDI needs bits from pipe & plane first */
2456         assert_pipe_enabled(dev_priv, pipe);
2457         assert_plane_enabled(dev_priv, plane);
2458
2459         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2460            for train result */
2461         reg = FDI_RX_IMR(pipe);
2462         temp = I915_READ(reg);
2463         temp &= ~FDI_RX_SYMBOL_LOCK;
2464         temp &= ~FDI_RX_BIT_LOCK;
2465         I915_WRITE(reg, temp);
2466         I915_READ(reg);
2467         udelay(150);
2468
2469         /* enable CPU FDI TX and PCH FDI RX */
2470         reg = FDI_TX_CTL(pipe);
2471         temp = I915_READ(reg);
2472         temp &= ~(7 << 19);
2473         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2474         temp &= ~FDI_LINK_TRAIN_NONE;
2475         temp |= FDI_LINK_TRAIN_PATTERN_1;
2476         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2477
2478         reg = FDI_RX_CTL(pipe);
2479         temp = I915_READ(reg);
2480         temp &= ~FDI_LINK_TRAIN_NONE;
2481         temp |= FDI_LINK_TRAIN_PATTERN_1;
2482         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2483
2484         POSTING_READ(reg);
2485         udelay(150);
2486
2487         /* Ironlake workaround, enable clock pointer after FDI enable*/
2488         if (HAS_PCH_IBX(dev)) {
2489                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2490                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2491                            FDI_RX_PHASE_SYNC_POINTER_EN);
2492         }
2493
2494         reg = FDI_RX_IIR(pipe);
2495         for (tries = 0; tries < 5; tries++) {
2496                 temp = I915_READ(reg);
2497                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2498
2499                 if ((temp & FDI_RX_BIT_LOCK)) {
2500                         DRM_DEBUG_KMS("FDI train 1 done.\n");
2501                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2502                         break;
2503                 }
2504         }
2505         if (tries == 5)
2506                 DRM_ERROR("FDI train 1 fail!\n");
2507
2508         /* Train 2 */
2509         reg = FDI_TX_CTL(pipe);
2510         temp = I915_READ(reg);
2511         temp &= ~FDI_LINK_TRAIN_NONE;
2512         temp |= FDI_LINK_TRAIN_PATTERN_2;
2513         I915_WRITE(reg, temp);
2514
2515         reg = FDI_RX_CTL(pipe);
2516         temp = I915_READ(reg);
2517         temp &= ~FDI_LINK_TRAIN_NONE;
2518         temp |= FDI_LINK_TRAIN_PATTERN_2;
2519         I915_WRITE(reg, temp);
2520
2521         POSTING_READ(reg);
2522         udelay(150);
2523
2524         reg = FDI_RX_IIR(pipe);
2525         for (tries = 0; tries < 5; tries++) {
2526                 temp = I915_READ(reg);
2527                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2528
2529                 if (temp & FDI_RX_SYMBOL_LOCK) {
2530                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2531                         DRM_DEBUG_KMS("FDI train 2 done.\n");
2532                         break;
2533                 }
2534         }
2535         if (tries == 5)
2536                 DRM_ERROR("FDI train 2 fail!\n");
2537
2538         DRM_DEBUG_KMS("FDI train done\n");
2539
2540 }
2541
2542 static const int snb_b_fdi_train_param[] = {
2543         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2544         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2545         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2546         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2547 };
2548
2549 /* The FDI link training functions for SNB/Cougarpoint. */
2550 static void gen6_fdi_link_train(struct drm_crtc *crtc)
2551 {
2552         struct drm_device *dev = crtc->dev;
2553         struct drm_i915_private *dev_priv = dev->dev_private;
2554         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2555         int pipe = intel_crtc->pipe;
2556         u32 reg, temp, i;
2557
2558         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2559            for train result */
2560         reg = FDI_RX_IMR(pipe);
2561         temp = I915_READ(reg);
2562         temp &= ~FDI_RX_SYMBOL_LOCK;
2563         temp &= ~FDI_RX_BIT_LOCK;
2564         I915_WRITE(reg, temp);
2565
2566         POSTING_READ(reg);
2567         udelay(150);
2568
2569         /* enable CPU FDI TX and PCH FDI RX */
2570         reg = FDI_TX_CTL(pipe);
2571         temp = I915_READ(reg);
2572         temp &= ~(7 << 19);
2573         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2574         temp &= ~FDI_LINK_TRAIN_NONE;
2575         temp |= FDI_LINK_TRAIN_PATTERN_1;
2576         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2577         /* SNB-B */
2578         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2579         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2580
2581         reg = FDI_RX_CTL(pipe);
2582         temp = I915_READ(reg);
2583         if (HAS_PCH_CPT(dev)) {
2584                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2585                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2586         } else {
2587                 temp &= ~FDI_LINK_TRAIN_NONE;
2588                 temp |= FDI_LINK_TRAIN_PATTERN_1;
2589         }
2590         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2591
2592         POSTING_READ(reg);
2593         udelay(150);
2594
2595         for (i = 0; i < 4; i++) {
2596                 reg = FDI_TX_CTL(pipe);
2597                 temp = I915_READ(reg);
2598                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2599                 temp |= snb_b_fdi_train_param[i];
2600                 I915_WRITE(reg, temp);
2601
2602                 POSTING_READ(reg);
2603                 udelay(500);
2604
2605                 reg = FDI_RX_IIR(pipe);
2606                 temp = I915_READ(reg);
2607                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2608
2609                 if (temp & FDI_RX_BIT_LOCK) {
2610                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2611                         DRM_DEBUG_KMS("FDI train 1 done.\n");
2612                         break;
2613                 }
2614         }
2615         if (i == 4)
2616                 DRM_ERROR("FDI train 1 fail!\n");
2617
2618         /* Train 2 */
2619         reg = FDI_TX_CTL(pipe);
2620         temp = I915_READ(reg);
2621         temp &= ~FDI_LINK_TRAIN_NONE;
2622         temp |= FDI_LINK_TRAIN_PATTERN_2;
2623         if (IS_GEN6(dev)) {
2624                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2625                 /* SNB-B */
2626                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2627         }
2628         I915_WRITE(reg, temp);
2629
2630         reg = FDI_RX_CTL(pipe);
2631         temp = I915_READ(reg);
2632         if (HAS_PCH_CPT(dev)) {
2633                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2634                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2635         } else {
2636                 temp &= ~FDI_LINK_TRAIN_NONE;
2637                 temp |= FDI_LINK_TRAIN_PATTERN_2;
2638         }
2639         I915_WRITE(reg, temp);
2640
2641         POSTING_READ(reg);
2642         udelay(150);
2643
2644         for (i = 0; i < 4; i++) {
2645                 reg = FDI_TX_CTL(pipe);
2646                 temp = I915_READ(reg);
2647                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2648                 temp |= snb_b_fdi_train_param[i];
2649                 I915_WRITE(reg, temp);
2650
2651                 POSTING_READ(reg);
2652                 udelay(500);
2653
2654                 reg = FDI_RX_IIR(pipe);
2655                 temp = I915_READ(reg);
2656                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2657
2658                 if (temp & FDI_RX_SYMBOL_LOCK) {
2659                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2660                         DRM_DEBUG_KMS("FDI train 2 done.\n");
2661                         break;
2662                 }
2663         }
2664         if (i == 4)
2665                 DRM_ERROR("FDI train 2 fail!\n");
2666
2667         DRM_DEBUG_KMS("FDI train done.\n");
2668 }
2669
2670 /* Manual link training for Ivy Bridge A0 parts */
2671 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2672 {
2673         struct drm_device *dev = crtc->dev;
2674         struct drm_i915_private *dev_priv = dev->dev_private;
2675         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2676         int pipe = intel_crtc->pipe;
2677         u32 reg, temp, i;
2678
2679         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2680            for train result */
2681         reg = FDI_RX_IMR(pipe);
2682         temp = I915_READ(reg);
2683         temp &= ~FDI_RX_SYMBOL_LOCK;
2684         temp &= ~FDI_RX_BIT_LOCK;
2685         I915_WRITE(reg, temp);
2686
2687         POSTING_READ(reg);
2688         udelay(150);
2689
2690         /* enable CPU FDI TX and PCH FDI RX */
2691         reg = FDI_TX_CTL(pipe);
2692         temp = I915_READ(reg);
2693         temp &= ~(7 << 19);
2694         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2695         temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2696         temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2697         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2698         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2699         temp |= FDI_COMPOSITE_SYNC;
2700         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2701
2702         reg = FDI_RX_CTL(pipe);
2703         temp = I915_READ(reg);
2704         temp &= ~FDI_LINK_TRAIN_AUTO;
2705         temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2706         temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2707         temp |= FDI_COMPOSITE_SYNC;
2708         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2709
2710         POSTING_READ(reg);
2711         udelay(150);
2712
2713         for (i = 0; i < 4; i++) {
2714                 reg = FDI_TX_CTL(pipe);
2715                 temp = I915_READ(reg);
2716                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2717                 temp |= snb_b_fdi_train_param[i];
2718                 I915_WRITE(reg, temp);
2719
2720                 POSTING_READ(reg);
2721                 udelay(500);
2722
2723                 reg = FDI_RX_IIR(pipe);
2724                 temp = I915_READ(reg);
2725                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2726
2727                 if (temp & FDI_RX_BIT_LOCK ||
2728                     (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2729                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2730                         DRM_DEBUG_KMS("FDI train 1 done.\n");
2731                         break;
2732                 }
2733         }
2734         if (i == 4)
2735                 DRM_ERROR("FDI train 1 fail!\n");
2736
2737         /* Train 2 */
2738         reg = FDI_TX_CTL(pipe);
2739         temp = I915_READ(reg);
2740         temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2741         temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2742         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2743         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2744         I915_WRITE(reg, temp);
2745
2746         reg = FDI_RX_CTL(pipe);
2747         temp = I915_READ(reg);
2748         temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2749         temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2750         I915_WRITE(reg, temp);
2751
2752         POSTING_READ(reg);
2753         udelay(150);
2754
2755         for (i = 0; i < 4; i++) {
2756                 reg = FDI_TX_CTL(pipe);
2757                 temp = I915_READ(reg);
2758                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2759                 temp |= snb_b_fdi_train_param[i];
2760                 I915_WRITE(reg, temp);
2761
2762                 POSTING_READ(reg);
2763                 udelay(500);
2764
2765                 reg = FDI_RX_IIR(pipe);
2766                 temp = I915_READ(reg);
2767                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2768
2769                 if (temp & FDI_RX_SYMBOL_LOCK) {
2770                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2771                         DRM_DEBUG_KMS("FDI train 2 done.\n");
2772                         break;
2773                 }
2774         }
2775         if (i == 4)
2776                 DRM_ERROR("FDI train 2 fail!\n");
2777
2778         DRM_DEBUG_KMS("FDI train done.\n");
2779 }
2780
2781 static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
2782 {
2783         struct drm_device *dev = crtc->dev;
2784         struct drm_i915_private *dev_priv = dev->dev_private;
2785         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2786         int pipe = intel_crtc->pipe;
2787         u32 reg, temp;
2788
2789         /* Write the TU size bits so error detection works */
2790         I915_WRITE(FDI_RX_TUSIZE1(pipe),
2791                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2792
2793         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2794         reg = FDI_RX_CTL(pipe);
2795         temp = I915_READ(reg);
2796         temp &= ~((0x7 << 19) | (0x7 << 16));
2797         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2798         temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2799         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2800
2801         POSTING_READ(reg);
2802         udelay(200);
2803
2804         /* Switch from Rawclk to PCDclk */
2805         temp = I915_READ(reg);
2806         I915_WRITE(reg, temp | FDI_PCDCLK);
2807
2808         POSTING_READ(reg);
2809         udelay(200);
2810
2811         /* Enable CPU FDI TX PLL, always on for Ironlake */
2812         reg = FDI_TX_CTL(pipe);
2813         temp = I915_READ(reg);
2814         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2815                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2816
2817                 POSTING_READ(reg);
2818                 udelay(100);
2819         }
2820 }
2821
2822 static void ironlake_fdi_disable(struct drm_crtc *crtc)
2823 {
2824         struct drm_device *dev = crtc->dev;
2825         struct drm_i915_private *dev_priv = dev->dev_private;
2826         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2827         int pipe = intel_crtc->pipe;
2828         u32 reg, temp;
2829
2830         /* disable CPU FDI tx and PCH FDI rx */
2831         reg = FDI_TX_CTL(pipe);
2832         temp = I915_READ(reg);
2833         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2834         POSTING_READ(reg);
2835
2836         reg = FDI_RX_CTL(pipe);
2837         temp = I915_READ(reg);
2838         temp &= ~(0x7 << 16);
2839         temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2840         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2841
2842         POSTING_READ(reg);
2843         udelay(100);
2844
2845         /* Ironlake workaround, disable clock pointer after downing FDI */
2846         if (HAS_PCH_IBX(dev)) {
2847                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2848                 I915_WRITE(FDI_RX_CHICKEN(pipe),
2849                            I915_READ(FDI_RX_CHICKEN(pipe) &
2850                                      ~FDI_RX_PHASE_SYNC_POINTER_EN));
2851         }
2852
2853         /* still set train pattern 1 */
2854         reg = FDI_TX_CTL(pipe);
2855         temp = I915_READ(reg);
2856         temp &= ~FDI_LINK_TRAIN_NONE;
2857         temp |= FDI_LINK_TRAIN_PATTERN_1;
2858         I915_WRITE(reg, temp);
2859
2860         reg = FDI_RX_CTL(pipe);
2861         temp = I915_READ(reg);
2862         if (HAS_PCH_CPT(dev)) {
2863                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2864                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2865         } else {
2866                 temp &= ~FDI_LINK_TRAIN_NONE;
2867                 temp |= FDI_LINK_TRAIN_PATTERN_1;
2868         }
2869         /* BPC in FDI rx is consistent with that in PIPECONF */
2870         temp &= ~(0x07 << 16);
2871         temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2872         I915_WRITE(reg, temp);
2873
2874         POSTING_READ(reg);
2875         udelay(100);
2876 }
2877
2878 /*
2879  * When we disable a pipe, we need to clear any pending scanline wait events
2880  * to avoid hanging the ring, which we assume we are waiting on.
2881  */
2882 static void intel_clear_scanline_wait(struct drm_device *dev)
2883 {
2884         struct drm_i915_private *dev_priv = dev->dev_private;
2885         struct intel_ring_buffer *ring;
2886         u32 tmp;
2887
2888         if (IS_GEN2(dev))
2889                 /* Can't break the hang on i8xx */
2890                 return;
2891
2892         ring = LP_RING(dev_priv);
2893         tmp = I915_READ_CTL(ring);
2894         if (tmp & RING_WAIT)
2895                 I915_WRITE_CTL(ring, tmp);
2896 }
2897
2898 static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2899 {
2900         struct drm_i915_gem_object *obj;
2901         struct drm_i915_private *dev_priv;
2902
2903         if (crtc->fb == NULL)
2904                 return;
2905
2906         obj = to_intel_framebuffer(crtc->fb)->obj;
2907         dev_priv = crtc->dev->dev_private;
2908         wait_event(dev_priv->pending_flip_queue,
2909                    atomic_read(&obj->pending_flip) == 0);
2910 }
2911
2912 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2913 {
2914         struct drm_device *dev = crtc->dev;
2915         struct drm_mode_config *mode_config = &dev->mode_config;
2916         struct intel_encoder *encoder;
2917
2918         /*
2919          * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2920          * must be driven by its own crtc; no sharing is possible.
2921          */
2922         list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
2923                 if (encoder->base.crtc != crtc)
2924                         continue;
2925
2926                 switch (encoder->type) {
2927                 case INTEL_OUTPUT_EDP:
2928                         if (!intel_encoder_is_pch_edp(&encoder->base))
2929                                 return false;
2930                         continue;
2931                 }
2932         }
2933
2934         return true;
2935 }
2936
2937 /*
2938  * Enable PCH resources required for PCH ports:
2939  *   - PCH PLLs
2940  *   - FDI training & RX/TX
2941  *   - update transcoder timings
2942  *   - DP transcoding bits
2943  *   - transcoder
2944  */
2945 static void ironlake_pch_enable(struct drm_crtc *crtc)
2946 {
2947         struct drm_device *dev = crtc->dev;
2948         struct drm_i915_private *dev_priv = dev->dev_private;
2949         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2950         int pipe = intel_crtc->pipe;
2951         u32 reg, temp, transc_sel;
2952
2953         /* For PCH output, training FDI link */
2954         dev_priv->display.fdi_link_train(crtc);
2955
2956         intel_enable_pch_pll(dev_priv, pipe);
2957
2958         if (HAS_PCH_CPT(dev)) {
2959                 transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL :
2960                         TRANSC_DPLLB_SEL;
2961
2962                 /* Be sure PCH DPLL SEL is set */
2963                 temp = I915_READ(PCH_DPLL_SEL);
2964                 if (pipe == 0) {
2965                         temp &= ~(TRANSA_DPLLB_SEL);
2966                         temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
2967                 } else if (pipe == 1) {
2968                         temp &= ~(TRANSB_DPLLB_SEL);
2969                         temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
2970                 } else if (pipe == 2) {
2971                         temp &= ~(TRANSC_DPLLB_SEL);
2972                         temp |= (TRANSC_DPLL_ENABLE | transc_sel);
2973                 }
2974                 I915_WRITE(PCH_DPLL_SEL, temp);
2975         }
2976
2977         /* set transcoder timing, panel must allow it */
2978         assert_panel_unlocked(dev_priv, pipe);
2979         I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
2980         I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
2981         I915_WRITE(TRANS_HSYNC(pipe),  I915_READ(HSYNC(pipe)));
2982
2983         I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
2984         I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
2985         I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
2986         I915_WRITE(TRANS_VSYNCSHIFT(pipe),  I915_READ(VSYNCSHIFT(pipe)));
2987
2988         intel_fdi_normal_train(crtc);
2989
2990         /* For PCH DP, enable TRANS_DP_CTL */
2991         if (HAS_PCH_CPT(dev) &&
2992             (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
2993              intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2994                 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
2995                 reg = TRANS_DP_CTL(pipe);
2996                 temp = I915_READ(reg);
2997                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
2998                           TRANS_DP_SYNC_MASK |
2999                           TRANS_DP_BPC_MASK);
3000                 temp |= (TRANS_DP_OUTPUT_ENABLE |
3001                          TRANS_DP_ENH_FRAMING);
3002                 temp |= bpc << 9; /* same format but at 11:9 */
3003
3004                 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
3005                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3006                 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
3007                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3008
3009                 switch (intel_trans_dp_port_sel(crtc)) {
3010                 case PCH_DP_B:
3011                         temp |= TRANS_DP_PORT_SEL_B;
3012                         break;
3013                 case PCH_DP_C:
3014                         temp |= TRANS_DP_PORT_SEL_C;
3015                         break;
3016                 case PCH_DP_D:
3017                         temp |= TRANS_DP_PORT_SEL_D;
3018                         break;
3019                 default:
3020                         DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
3021                         temp |= TRANS_DP_PORT_SEL_B;
3022                         break;
3023                 }
3024
3025                 I915_WRITE(reg, temp);
3026         }
3027
3028         intel_enable_transcoder(dev_priv, pipe);
3029 }
3030
3031 void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
3032 {
3033         struct drm_i915_private *dev_priv = dev->dev_private;
3034         int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe);
3035         u32 temp;
3036
3037         temp = I915_READ(dslreg);
3038         udelay(500);
3039         if (wait_for(I915_READ(dslreg) != temp, 5)) {
3040                 /* Without this, mode sets may fail silently on FDI */
3041                 I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS);
3042                 udelay(250);
3043                 I915_WRITE(tc2reg, 0);
3044                 if (wait_for(I915_READ(dslreg) != temp, 5))
3045                         DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
3046         }
3047 }
3048
3049 static void ironlake_crtc_enable(struct drm_crtc *crtc)
3050 {
3051         struct drm_device *dev = crtc->dev;
3052         struct drm_i915_private *dev_priv = dev->dev_private;
3053         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3054         int pipe = intel_crtc->pipe;
3055         int plane = intel_crtc->plane;
3056         u32 temp;
3057         bool is_pch_port;
3058
3059         if (intel_crtc->active)
3060                 return;
3061
3062         intel_crtc->active = true;
3063         intel_update_watermarks(dev);
3064
3065         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3066                 temp = I915_READ(PCH_LVDS);
3067                 if ((temp & LVDS_PORT_EN) == 0)
3068                         I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
3069         }
3070
3071         is_pch_port = intel_crtc_driving_pch(crtc);
3072
3073         if (is_pch_port)
3074                 ironlake_fdi_pll_enable(crtc);
3075         else
3076                 ironlake_fdi_disable(crtc);
3077
3078         /* Enable panel fitting for LVDS */
3079         if (dev_priv->pch_pf_size &&
3080             (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
3081                 /* Force use of hard-coded filter coefficients
3082                  * as some pre-programmed values are broken,
3083                  * e.g. x201.
3084                  */
3085                 if (IS_IVYBRIDGE(dev))
3086                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3087                                                  PF_PIPE_SEL_IVB(pipe));
3088                 else
3089                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3090                 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3091                 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
3092         }
3093
3094         /*
3095          * On ILK+ LUT must be loaded before the pipe is running but with
3096          * clocks enabled
3097          */
3098         intel_crtc_load_lut(crtc);
3099
3100         intel_enable_pipe(dev_priv, pipe, is_pch_port);
3101         intel_enable_plane(dev_priv, plane, pipe);
3102
3103         if (is_pch_port)
3104                 ironlake_pch_enable(crtc);
3105
3106         mutex_lock(&dev->struct_mutex);
3107         intel_update_fbc(dev);
3108         mutex_unlock(&dev->struct_mutex);
3109
3110         intel_crtc_update_cursor(crtc, true);
3111 }
3112
3113 static void ironlake_crtc_disable(struct drm_crtc *crtc)
3114 {
3115         struct drm_device *dev = crtc->dev;
3116         struct drm_i915_private *dev_priv = dev->dev_private;
3117         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3118         int pipe = intel_crtc->pipe;
3119         int plane = intel_crtc->plane;
3120         u32 reg, temp;
3121
3122         if (!intel_crtc->active)
3123                 return;
3124
3125         intel_crtc_wait_for_pending_flips(crtc);
3126         drm_vblank_off(dev, pipe);
3127         intel_crtc_update_cursor(crtc, false);
3128
3129         intel_disable_plane(dev_priv, plane, pipe);
3130
3131         if (dev_priv->cfb_plane == plane)
3132                 intel_disable_fbc(dev);
3133
3134         intel_disable_pipe(dev_priv, pipe);
3135
3136         /* Disable PF */
3137         I915_WRITE(PF_CTL(pipe), 0);
3138         I915_WRITE(PF_WIN_SZ(pipe), 0);
3139
3140         ironlake_fdi_disable(crtc);
3141
3142         /* This is a horrible layering violation; we should be doing this in
3143          * the connector/encoder ->prepare instead, but we don't always have
3144          * enough information there about the config to know whether it will
3145          * actually be necessary or just cause undesired flicker.
3146          */
3147         intel_disable_pch_ports(dev_priv, pipe);
3148
3149         intel_disable_transcoder(dev_priv, pipe);
3150
3151         if (HAS_PCH_CPT(dev)) {
3152                 /* disable TRANS_DP_CTL */
3153                 reg = TRANS_DP_CTL(pipe);
3154                 temp = I915_READ(reg);
3155                 temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
3156                 temp |= TRANS_DP_PORT_SEL_NONE;
3157                 I915_WRITE(reg, temp);
3158
3159                 /* disable DPLL_SEL */
3160                 temp = I915_READ(PCH_DPLL_SEL);
3161                 switch (pipe) {
3162                 case 0:
3163                         temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
3164                         break;
3165                 case 1:
3166                         temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
3167                         break;
3168                 case 2:
3169                         /* C shares PLL A or B */
3170                         temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
3171                         break;
3172                 default:
3173                         BUG(); /* wtf */
3174                 }
3175                 I915_WRITE(PCH_DPLL_SEL, temp);
3176         }
3177
3178         /* disable PCH DPLL */
3179         if (!intel_crtc->no_pll)
3180                 intel_disable_pch_pll(dev_priv, pipe);
3181
3182         /* Switch from PCDclk to Rawclk */
3183         reg = FDI_RX_CTL(pipe);
3184         temp = I915_READ(reg);
3185         I915_WRITE(reg, temp & ~FDI_PCDCLK);
3186
3187         /* Disable CPU FDI TX PLL */
3188         reg = FDI_TX_CTL(pipe);
3189         temp = I915_READ(reg);
3190         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3191
3192         POSTING_READ(reg);
3193         udelay(100);
3194
3195         reg = FDI_RX_CTL(pipe);
3196         temp = I915_READ(reg);
3197         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3198
3199         /* Wait for the clocks to turn off. */
3200         POSTING_READ(reg);
3201         udelay(100);
3202
3203         intel_crtc->active = false;
3204         intel_update_watermarks(dev);
3205
3206         mutex_lock(&dev->struct_mutex);
3207         intel_update_fbc(dev);
3208         intel_clear_scanline_wait(dev);
3209         mutex_unlock(&dev->struct_mutex);
3210 }
3211
3212 static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
3213 {
3214         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3215         int pipe = intel_crtc->pipe;
3216         int plane = intel_crtc->plane;
3217
3218         /* XXX: When our outputs are all unaware of DPMS modes other than off
3219          * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3220          */
3221         switch (mode) {
3222         case DRM_MODE_DPMS_ON:
3223         case DRM_MODE_DPMS_STANDBY:
3224         case DRM_MODE_DPMS_SUSPEND:
3225                 DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
3226                 ironlake_crtc_enable(crtc);
3227                 break;
3228
3229         case DRM_MODE_DPMS_OFF:
3230                 DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
3231                 ironlake_crtc_disable(crtc);
3232                 break;
3233         }
3234 }
3235
3236 static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3237 {
3238         if (!enable && intel_crtc->overlay) {
3239                 struct drm_device *dev = intel_crtc->base.dev;
3240                 struct drm_i915_private *dev_priv = dev->dev_private;
3241
3242                 mutex_lock(&dev->struct_mutex);
3243                 dev_priv->mm.interruptible = false;
3244                 (void) intel_overlay_switch_off(intel_crtc->overlay);
3245                 dev_priv->mm.interruptible = true;
3246                 mutex_unlock(&dev->struct_mutex);
3247         }
3248
3249         /* Let userspace switch the overlay on again. In most cases userspace
3250          * has to recompute where to put it anyway.
3251          */
3252 }
3253
3254 static void i9xx_crtc_enable(struct drm_crtc *crtc)
3255 {
3256         struct drm_device *dev = crtc->dev;
3257         struct drm_i915_private *dev_priv = dev->dev_private;
3258         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3259         int pipe = intel_crtc->pipe;
3260         int plane = intel_crtc->plane;
3261
3262         if (intel_crtc->active)
3263                 return;
3264
3265         intel_crtc->active = true;
3266         intel_update_watermarks(dev);
3267
3268         intel_enable_pll(dev_priv, pipe);
3269         intel_enable_pipe(dev_priv, pipe, false);
3270         intel_enable_plane(dev_priv, plane, pipe);
3271
3272         intel_crtc_load_lut(crtc);
3273         intel_update_fbc(dev);
3274
3275         /* Give the overlay scaler a chance to enable if it's on this pipe */
3276         intel_crtc_dpms_overlay(intel_crtc, true);
3277         intel_crtc_update_cursor(crtc, true);
3278 }
3279
3280 static void i9xx_crtc_disable(struct drm_crtc *crtc)
3281 {
3282         struct drm_device *dev = crtc->dev;
3283         struct drm_i915_private *dev_priv = dev->dev_private;
3284         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3285         int pipe = intel_crtc->pipe;
3286         int plane = intel_crtc->plane;
3287
3288         if (!intel_crtc->active)
3289                 return;
3290
3291         /* Give the overlay scaler a chance to disable if it's on this pipe */
3292         intel_crtc_wait_for_pending_flips(crtc);
3293         drm_vblank_off(dev, pipe);
3294         intel_crtc_dpms_overlay(intel_crtc, false);
3295         intel_crtc_update_cursor(crtc, false);
3296
3297         if (dev_priv->cfb_plane == plane)
3298                 intel_disable_fbc(dev);
3299
3300         intel_disable_plane(dev_priv, plane, pipe);
3301         intel_disable_pipe(dev_priv, pipe);
3302         intel_disable_pll(dev_priv, pipe);
3303
3304         intel_crtc->active = false;
3305         intel_update_fbc(dev);
3306         intel_update_watermarks(dev);
3307         intel_clear_scanline_wait(dev);
3308 }
3309
3310 static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
3311 {
3312         /* XXX: When our outputs are all unaware of DPMS modes other than off
3313          * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3314          */
3315         switch (mode) {
3316         case DRM_MODE_DPMS_ON:
3317         case DRM_MODE_DPMS_STANDBY:
3318         case DRM_MODE_DPMS_SUSPEND:
3319                 i9xx_crtc_enable(crtc);
3320                 break;
3321         case DRM_MODE_DPMS_OFF:
3322                 i9xx_crtc_disable(crtc);
3323                 break;
3324         }
3325 }
3326
3327 /**
3328  * Sets the power management mode of the pipe and plane.
3329  */
3330 static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
3331 {
3332         struct drm_device *dev = crtc->dev;
3333         struct drm_i915_private *dev_priv = dev->dev_private;
3334         struct drm_i915_master_private *master_priv;
3335         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3336         int pipe = intel_crtc->pipe;
3337         bool enabled;
3338
3339         if (intel_crtc->dpms_mode == mode)
3340                 return;
3341
3342         intel_crtc->dpms_mode = mode;
3343
3344         dev_priv->display.dpms(crtc, mode);
3345
3346         if (!dev->primary->master)
3347                 return;
3348
3349         master_priv = dev->primary->master->driver_priv;
3350         if (!master_priv->sarea_priv)
3351                 return;
3352
3353         enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
3354
3355         switch (pipe) {
3356         case 0:
3357                 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
3358                 master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
3359                 break;
3360         case 1:
3361                 master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
3362                 master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
3363                 break;
3364         default:
3365                 DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
3366                 break;
3367         }
3368 }
3369
3370 static void intel_crtc_disable(struct drm_crtc *crtc)
3371 {
3372         struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
3373         struct drm_device *dev = crtc->dev;
3374
3375         /* Flush any pending WAITs before we disable the pipe. Note that
3376          * we need to drop the struct_mutex in order to acquire it again
3377          * during the lowlevel dpms routines around a couple of the
3378          * operations. It does not look trivial nor desirable to move
3379          * that locking higher. So instead we leave a window for the
3380          * submission of further commands on the fb before we can actually
3381          * disable it. This race with userspace exists anyway, and we can
3382          * only rely on the pipe being disabled by userspace after it
3383          * receives the hotplug notification and has flushed any pending
3384          * batches.
3385          */
3386         if (crtc->fb) {
3387                 mutex_lock(&dev->struct_mutex);
3388                 intel_finish_fb(crtc->fb);
3389                 mutex_unlock(&dev->struct_mutex);
3390         }
3391
3392         crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
3393         assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
3394         assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
3395
3396         if (crtc->fb) {
3397                 mutex_lock(&dev->struct_mutex);
3398                 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
3399                 mutex_unlock(&dev->struct_mutex);
3400         }
3401 }
3402
3403 /* Prepare for a mode set.
3404  *
3405  * Note we could be a lot smarter here.  We need to figure out which outputs
3406  * will be enabled, which disabled (in short, how the config will changes)
3407  * and perform the minimum necessary steps to accomplish that, e.g. updating
3408  * watermarks, FBC configuration, making sure PLLs are programmed correctly,
3409  * panel fitting is in the proper state, etc.
3410  */
3411 static void i9xx_crtc_prepare(struct drm_crtc *crtc)
3412 {
3413         i9xx_crtc_disable(crtc);
3414 }
3415
3416 static void i9xx_crtc_commit(struct drm_crtc *crtc)
3417 {
3418         i9xx_crtc_enable(crtc);
3419 }
3420
3421 static void ironlake_crtc_prepare(struct drm_crtc *crtc)
3422 {
3423         ironlake_crtc_disable(crtc);
3424 }
3425
3426 static void ironlake_crtc_commit(struct drm_crtc *crtc)
3427 {
3428         ironlake_crtc_enable(crtc);
3429 }
3430
3431 void intel_encoder_prepare(struct drm_encoder *encoder)
3432 {
3433         struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3434         /* lvds has its own version of prepare see intel_lvds_prepare */
3435         encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
3436 }
3437
3438 void intel_encoder_commit(struct drm_encoder *encoder)
3439 {
3440         struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3441         struct drm_device *dev = encoder->dev;
3442         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3443         struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
3444
3445         /* lvds has its own version of commit see intel_lvds_commit */
3446         encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
3447
3448         if (HAS_PCH_CPT(dev))
3449                 intel_cpt_verify_modeset(dev, intel_crtc->pipe);
3450 }
3451
3452 void intel_encoder_destroy(struct drm_encoder *encoder)
3453 {
3454         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3455
3456         drm_encoder_cleanup(encoder);
3457         kfree(intel_encoder);
3458 }
3459
3460 static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
3461                                   struct drm_display_mode *mode,
3462                                   struct drm_display_mode *adjusted_mode)
3463 {
3464         struct drm_device *dev = crtc->dev;
3465
3466         if (HAS_PCH_SPLIT(dev)) {
3467                 /* FDI link clock is fixed at 2.7G */
3468                 if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
3469                         return false;
3470         }
3471
3472         /* All interlaced capable intel hw wants timings in frames. Note though
3473          * that intel_lvds_mode_fixup does some funny tricks with the crtc
3474          * timings, so we need to be careful not to clobber these.*/
3475         if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
3476                 drm_mode_set_crtcinfo(adjusted_mode, 0);
3477
3478         return true;
3479 }
3480
3481 static int i945_get_display_clock_speed(struct drm_device *dev)
3482 {
3483         return 400000;
3484 }
3485
3486 static int i915_get_display_clock_speed(struct drm_device *dev)
3487 {
3488         return 333000;
3489 }
3490
3491 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
3492 {
3493         return 200000;
3494 }
3495
3496 static int i915gm_get_display_clock_speed(struct drm_device *dev)
3497 {
3498         u16 gcfgc = 0;
3499
3500         pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3501
3502         if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
3503                 return 133000;
3504         else {
3505                 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
3506                 case GC_DISPLAY_CLOCK_333_MHZ:
3507                         return 333000;
3508                 default:
3509                 case GC_DISPLAY_CLOCK_190_200_MHZ:
3510                         return 190000;
3511                 }
3512         }
3513 }
3514
3515 static int i865_get_display_clock_speed(struct drm_device *dev)
3516 {
3517         return 266000;
3518 }
3519
3520 static int i855_get_display_clock_speed(struct drm_device *dev)
3521 {
3522         u16 hpllcc = 0;
3523         /* Assume that the hardware is in the high speed state.  This
3524          * should be the default.
3525          */
3526         switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
3527         case GC_CLOCK_133_200:
3528         case GC_CLOCK_100_200:
3529                 return 200000;
3530         case GC_CLOCK_166_250:
3531                 return 250000;
3532         case GC_CLOCK_100_133:
3533                 return 133000;
3534         }
3535
3536         /* Shouldn't happen */
3537         return 0;
3538 }
3539
3540 static int i830_get_display_clock_speed(struct drm_device *dev)
3541 {
3542         return 133000;
3543 }
3544
3545 struct fdi_m_n {
3546         u32        tu;
3547         u32        gmch_m;
3548         u32        gmch_n;
3549         u32        link_m;
3550         u32        link_n;
3551 };
3552
3553 static void
3554 fdi_reduce_ratio(u32 *num, u32 *den)
3555 {
3556         while (*num > 0xffffff || *den > 0xffffff) {
3557                 *num >>= 1;
3558                 *den >>= 1;
3559         }
3560 }
3561
3562 static void
3563 ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
3564                      int link_clock, struct fdi_m_n *m_n)
3565 {
3566         m_n->tu = 64; /* default size */
3567
3568         /* BUG_ON(pixel_clock > INT_MAX / 36); */
3569         m_n->gmch_m = bits_per_pixel * pixel_clock;
3570         m_n->gmch_n = link_clock * nlanes * 8;
3571         fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
3572
3573         m_n->link_m = pixel_clock;
3574         m_n->link_n = link_clock;
3575         fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
3576 }
3577
3578
3579 struct intel_watermark_params {
3580         unsigned long fifo_size;
3581         unsigned long max_wm;
3582         unsigned long default_wm;
3583         unsigned long guard_size;
3584         unsigned long cacheline_size;
3585 };
3586
3587 /* Pineview has different values for various configs */
3588 static const struct intel_watermark_params pineview_display_wm = {
3589         PINEVIEW_DISPLAY_FIFO,
3590         PINEVIEW_MAX_WM,
3591         PINEVIEW_DFT_WM,
3592         PINEVIEW_GUARD_WM,
3593         PINEVIEW_FIFO_LINE_SIZE
3594 };
3595 static const struct intel_watermark_params pineview_display_hplloff_wm = {
3596         PINEVIEW_DISPLAY_FIFO,
3597         PINEVIEW_MAX_WM,
3598         PINEVIEW_DFT_HPLLOFF_WM,
3599         PINEVIEW_GUARD_WM,
3600         PINEVIEW_FIFO_LINE_SIZE
3601 };
3602 static const struct intel_watermark_params pineview_cursor_wm = {
3603         PINEVIEW_CURSOR_FIFO,
3604         PINEVIEW_CURSOR_MAX_WM,
3605         PINEVIEW_CURSOR_DFT_WM,
3606         PINEVIEW_CURSOR_GUARD_WM,
3607         PINEVIEW_FIFO_LINE_SIZE,
3608 };
3609 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
3610         PINEVIEW_CURSOR_FIFO,
3611         PINEVIEW_CURSOR_MAX_WM,
3612         PINEVIEW_CURSOR_DFT_WM,
3613         PINEVIEW_CURSOR_GUARD_WM,
3614         PINEVIEW_FIFO_LINE_SIZE
3615 };
3616 static const struct intel_watermark_params g4x_wm_info = {
3617         G4X_FIFO_SIZE,
3618         G4X_MAX_WM,
3619         G4X_MAX_WM,
3620         2,
3621         G4X_FIFO_LINE_SIZE,
3622 };
3623 static const struct intel_watermark_params g4x_cursor_wm_info = {
3624         I965_CURSOR_FIFO,
3625         I965_CURSOR_MAX_WM,
3626         I965_CURSOR_DFT_WM,
3627         2,
3628         G4X_FIFO_LINE_SIZE,
3629 };
3630 static const struct intel_watermark_params i965_cursor_wm_info = {
3631         I965_CURSOR_FIFO,
3632         I965_CURSOR_MAX_WM,
3633         I965_CURSOR_DFT_WM,
3634         2,
3635         I915_FIFO_LINE_SIZE,
3636 };
3637 static const struct intel_watermark_params i945_wm_info = {
3638         I945_FIFO_SIZE,
3639         I915_MAX_WM,
3640         1,
3641         2,
3642         I915_FIFO_LINE_SIZE
3643 };
3644 static const struct intel_watermark_params i915_wm_info = {
3645         I915_FIFO_SIZE,
3646         I915_MAX_WM,
3647         1,
3648         2,
3649         I915_FIFO_LINE_SIZE
3650 };
3651 static const struct intel_watermark_params i855_wm_info = {
3652         I855GM_FIFO_SIZE,
3653         I915_MAX_WM,
3654         1,
3655         2,
3656         I830_FIFO_LINE_SIZE
3657 };
3658 static const struct intel_watermark_params i830_wm_info = {
3659         I830_FIFO_SIZE,
3660         I915_MAX_WM,
3661         1,
3662         2,
3663         I830_FIFO_LINE_SIZE
3664 };
3665
3666 static const struct intel_watermark_params ironlake_display_wm_info = {
3667         ILK_DISPLAY_FIFO,
3668         ILK_DISPLAY_MAXWM,
3669         ILK_DISPLAY_DFTWM,
3670         2,
3671         ILK_FIFO_LINE_SIZE
3672 };
3673 static const struct intel_watermark_params ironlake_cursor_wm_info = {
3674         ILK_CURSOR_FIFO,
3675         ILK_CURSOR_MAXWM,
3676         ILK_CURSOR_DFTWM,
3677         2,
3678         ILK_FIFO_LINE_SIZE
3679 };
3680 static const struct intel_watermark_params ironlake_display_srwm_info = {
3681         ILK_DISPLAY_SR_FIFO,
3682         ILK_DISPLAY_MAX_SRWM,
3683         ILK_DISPLAY_DFT_SRWM,
3684         2,
3685         ILK_FIFO_LINE_SIZE
3686 };
3687 static const struct intel_watermark_params ironlake_cursor_srwm_info = {
3688         ILK_CURSOR_SR_FIFO,
3689         ILK_CURSOR_MAX_SRWM,
3690         ILK_CURSOR_DFT_SRWM,
3691         2,
3692         ILK_FIFO_LINE_SIZE
3693 };
3694
3695 static const struct intel_watermark_params sandybridge_display_wm_info = {
3696         SNB_DISPLAY_FIFO,
3697         SNB_DISPLAY_MAXWM,
3698         SNB_DISPLAY_DFTWM,
3699         2,
3700         SNB_FIFO_LINE_SIZE
3701 };
3702 static const struct intel_watermark_params sandybridge_cursor_wm_info = {
3703         SNB_CURSOR_FIFO,
3704         SNB_CURSOR_MAXWM,
3705         SNB_CURSOR_DFTWM,
3706         2,
3707         SNB_FIFO_LINE_SIZE
3708 };
3709 static const struct intel_watermark_params sandybridge_display_srwm_info = {
3710         SNB_DISPLAY_SR_FIFO,
3711         SNB_DISPLAY_MAX_SRWM,
3712         SNB_DISPLAY_DFT_SRWM,
3713         2,
3714         SNB_FIFO_LINE_SIZE
3715 };
3716 static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
3717         SNB_CURSOR_SR_FIFO,
3718         SNB_CURSOR_MAX_SRWM,
3719         SNB_CURSOR_DFT_SRWM,
3720         2,
3721         SNB_FIFO_LINE_SIZE
3722 };
3723
3724
3725 /**
3726  * intel_calculate_wm - calculate watermark level
3727  * @clock_in_khz: pixel clock
3728  * @wm: chip FIFO params
3729  * @pixel_size: display pixel size
3730  * @latency_ns: memory latency for the platform
3731  *
3732  * Calculate the watermark level (the level at which the display plane will
3733  * start fetching from memory again).  Each chip has a different display
3734  * FIFO size and allocation, so the caller needs to figure that out and pass
3735  * in the correct intel_watermark_params structure.
3736  *
3737  * As the pixel clock runs, the FIFO will be drained at a rate that depends
3738  * on the pixel size.  When it reaches the watermark level, it'll start
3739  * fetching FIFO line sized based chunks from memory until the FIFO fills
3740  * past the watermark point.  If the FIFO drains completely, a FIFO underrun
3741  * will occur, and a display engine hang could result.
3742  */
3743 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
3744                                         const struct intel_watermark_params *wm,
3745                                         int fifo_size,
3746                                         int pixel_size,
3747                                         unsigned long latency_ns)
3748 {
3749         long entries_required, wm_size;
3750
3751         /*
3752          * Note: we need to make sure we don't overflow for various clock &
3753          * latency values.
3754          * clocks go from a few thousand to several hundred thousand.
3755          * latency is usually a few thousand
3756          */
3757         entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
3758                 1000;
3759         entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
3760
3761         DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
3762
3763         wm_size = fifo_size - (entries_required + wm->guard_size);
3764
3765         DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
3766
3767         /* Don't promote wm_size to unsigned... */
3768         if (wm_size > (long)wm->max_wm)
3769                 wm_size = wm->max_wm;
3770         if (wm_size <= 0)
3771                 wm_size = wm->default_wm;
3772         return wm_size;
3773 }
3774
3775 struct cxsr_latency {
3776         int is_desktop;
3777         int is_ddr3;
3778         unsigned long fsb_freq;
3779         unsigned long mem_freq;
3780         unsigned long display_sr;
3781         unsigned long display_hpll_disable;
3782         unsigned long cursor_sr;
3783         unsigned long cursor_hpll_disable;
3784 };
3785
3786 static const struct cxsr_latency cxsr_latency_table[] = {
3787         {1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
3788         {1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
3789         {1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
3790         {1, 1, 800, 667, 6420, 36420, 6873, 36873},    /* DDR3-667 SC */
3791         {1, 1, 800, 800, 5902, 35902, 6318, 36318},    /* DDR3-800 SC */
3792
3793         {1, 0, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
3794         {1, 0, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
3795         {1, 0, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
3796         {1, 1, 667, 667, 6438, 36438, 6911, 36911},    /* DDR3-667 SC */
3797         {1, 1, 667, 800, 5941, 35941, 6377, 36377},    /* DDR3-800 SC */
3798
3799         {1, 0, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
3800         {1, 0, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
3801         {1, 0, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
3802         {1, 1, 400, 667, 6509, 36509, 7062, 37062},    /* DDR3-667 SC */
3803         {1, 1, 400, 800, 5985, 35985, 6501, 36501},    /* DDR3-800 SC */
3804
3805         {0, 0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
3806         {0, 0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
3807         {0, 0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
3808         {0, 1, 800, 667, 6476, 36476, 6955, 36955},    /* DDR3-667 SC */
3809         {0, 1, 800, 800, 5958, 35958, 6400, 36400},    /* DDR3-800 SC */
3810
3811         {0, 0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
3812         {0, 0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
3813         {0, 0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
3814         {0, 1, 667, 667, 6494, 36494, 6993, 36993},    /* DDR3-667 SC */
3815         {0, 1, 667, 800, 5998, 35998, 6460, 36460},    /* DDR3-800 SC */
3816
3817         {0, 0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
3818         {0, 0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
3819         {0, 0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
3820         {0, 1, 400, 667, 6566, 36566, 7145, 37145},    /* DDR3-667 SC */
3821         {0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
3822 };
3823
3824 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
3825                                                          int is_ddr3,
3826                                                          int fsb,
3827                                                          int mem)
3828 {
3829         const struct cxsr_latency *latency;
3830         int i;
3831
3832         if (fsb == 0 || mem == 0)
3833                 return NULL;
3834
3835         for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
3836                 latency = &cxsr_latency_table[i];
3837                 if (is_desktop == latency->is_desktop &&
3838                     is_ddr3 == latency->is_ddr3 &&
3839                     fsb == latency->fsb_freq && mem == latency->mem_freq)
3840                         return latency;
3841         }
3842
3843         DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3844
3845         return NULL;
3846 }
3847
3848 static void pineview_disable_cxsr(struct drm_device *dev)
3849 {
3850         struct drm_i915_private *dev_priv = dev->dev_private;
3851
3852         /* deactivate cxsr */
3853         I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
3854 }
3855
3856 /*
3857  * Latency for FIFO fetches is dependent on several factors:
3858  *   - memory configuration (speed, channels)
3859  *   - chipset
3860  *   - current MCH state
3861  * It can be fairly high in some situations, so here we assume a fairly
3862  * pessimal value.  It's a tradeoff between extra memory fetches (if we
3863  * set this value too high, the FIFO will fetch frequently to stay full)
3864  * and power consumption (set it too low to save power and we might see
3865  * FIFO underruns and display "flicker").
3866  *
3867  * A value of 5us seems to be a good balance; safe for very low end
3868  * platforms but not overly aggressive on lower latency configs.
3869  */
3870 static const int latency_ns = 5000;
3871
3872 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
3873 {
3874         struct drm_i915_private *dev_priv = dev->dev_private;
3875         uint32_t dsparb = I915_READ(DSPARB);
3876         int size;
3877
3878         size = dsparb & 0x7f;
3879         if (plane)
3880                 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
3881
3882         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3883                       plane ? "B" : "A", size);
3884
3885         return size;
3886 }
3887
3888 static int i85x_get_fifo_size(struct drm_device *dev, int plane)
3889 {
3890         struct drm_i915_private *dev_priv = dev->dev_private;
3891         uint32_t dsparb = I915_READ(DSPARB);
3892         int size;
3893
3894         size = dsparb & 0x1ff;
3895         if (plane)
3896                 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
3897         size >>= 1; /* Convert to cachelines */
3898
3899         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3900                       plane ? "B" : "A", size);
3901
3902         return size;
3903 }
3904
3905 static int i845_get_fifo_size(struct drm_device *dev, int plane)
3906 {
3907         struct drm_i915_private *dev_priv = dev->dev_private;
3908         uint32_t dsparb = I915_READ(DSPARB);
3909         int size;
3910
3911         size = dsparb & 0x7f;
3912         size >>= 2; /* Convert to cachelines */
3913
3914         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3915                       plane ? "B" : "A",
3916                       size);
3917
3918         return size;
3919 }
3920
3921 static int i830_get_fifo_size(struct drm_device *dev, int plane)
3922 {
3923         struct drm_i915_private *dev_priv = dev->dev_private;
3924         uint32_t dsparb = I915_READ(DSPARB);
3925         int size;
3926
3927         size = dsparb & 0x7f;
3928         size >>= 1; /* Convert to cachelines */
3929
3930         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3931                       plane ? "B" : "A", size);
3932
3933         return size;
3934 }
3935
3936 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
3937 {
3938         struct drm_crtc *crtc, *enabled = NULL;
3939
3940         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3941                 if (crtc->enabled && crtc->fb) {
3942                         if (enabled)
3943                                 return NULL;
3944                         enabled = crtc;
3945                 }
3946         }
3947
3948         return enabled;
3949 }
3950
3951 static void pineview_update_wm(struct drm_device *dev)
3952 {
3953         struct drm_i915_private *dev_priv = dev->dev_private;
3954         struct drm_crtc *crtc;
3955         const struct cxsr_latency *latency;
3956         u32 reg;
3957         unsigned long wm;
3958
3959         latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
3960                                          dev_priv->fsb_freq, dev_priv->mem_freq);
3961         if (!latency) {
3962                 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3963                 pineview_disable_cxsr(dev);
3964                 return;
3965         }
3966
3967         crtc = single_enabled_crtc(dev);
3968         if (crtc) {
3969                 int clock = crtc->mode.clock;
3970                 int pixel_size = crtc->fb->bits_per_pixel / 8;
3971
3972                 /* Display SR */
3973                 wm = intel_calculate_wm(clock, &pineview_display_wm,
3974                                         pineview_display_wm.fifo_size,
3975                                         pixel_size, latency->display_sr);
3976                 reg = I915_READ(DSPFW1);
3977                 reg &= ~DSPFW_SR_MASK;
3978                 reg |= wm << DSPFW_SR_SHIFT;
3979                 I915_WRITE(DSPFW1, reg);
3980                 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
3981
3982                 /* cursor SR */
3983                 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
3984                                         pineview_display_wm.fifo_size,
3985                                         pixel_size, latency->cursor_sr);
3986                 reg = I915_READ(DSPFW3);
3987                 reg &= ~DSPFW_CURSOR_SR_MASK;
3988                 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
3989                 I915_WRITE(DSPFW3, reg);
3990
3991                 /* Display HPLL off SR */
3992                 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
3993                                         pineview_display_hplloff_wm.fifo_size,
3994                                         pixel_size, latency->display_hpll_disable);
3995                 reg = I915_READ(DSPFW3);
3996                 reg &= ~DSPFW_HPLL_SR_MASK;
3997                 reg |= wm & DSPFW_HPLL_SR_MASK;
3998                 I915_WRITE(DSPFW3, reg);
3999
4000                 /* cursor HPLL off SR */
4001                 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
4002                                         pineview_display_hplloff_wm.fifo_size,
4003                                         pixel_size, latency->cursor_hpll_disable);
4004                 reg = I915_READ(DSPFW3);
4005                 reg &= ~DSPFW_HPLL_CURSOR_MASK;
4006                 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
4007                 I915_WRITE(DSPFW3, reg);
4008                 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
4009
4010                 /* activate cxsr */
4011                 I915_WRITE(DSPFW3,
4012                            I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
4013                 DRM_DEBUG_KMS("Self-refresh is enabled\n");
4014         } else {
4015                 pineview_disable_cxsr(dev);
4016                 DRM_DEBUG_KMS("Self-refresh is disabled\n");
4017         }
4018 }
4019
4020 static bool g4x_compute_wm0(struct drm_device *dev,
4021                             int plane,
4022                             const struct intel_watermark_params *display,
4023                             int display_latency_ns,
4024                             const struct intel_watermark_params *cursor,
4025                             int cursor_latency_ns,
4026                             int *plane_wm,
4027                             int *cursor_wm)
4028 {
4029         struct drm_crtc *crtc;
4030         int htotal, hdisplay, clock, pixel_size;
4031         int line_time_us, line_count;
4032         int entries, tlb_miss;
4033
4034         crtc = intel_get_crtc_for_plane(dev, plane);
4035         if (crtc->fb == NULL || !crtc->enabled) {
4036                 *cursor_wm = cursor->guard_size;
4037                 *plane_wm = display->guard_size;
4038                 return false;
4039         }
4040
4041         htotal = crtc->mode.htotal;
4042         hdisplay = crtc->mode.hdisplay;
4043         clock = crtc->mode.clock;
4044         pixel_size = crtc->fb->bits_per_pixel / 8;
4045
4046         /* Use the small buffer method to calculate plane watermark */
4047         entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
4048         tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
4049         if (tlb_miss > 0)
4050                 entries += tlb_miss;
4051         entries = DIV_ROUND_UP(entries, display->cacheline_size);
4052         *plane_wm = entries + display->guard_size;
4053         if (*plane_wm > (int)display->max_wm)
4054                 *plane_wm = display->max_wm;
4055
4056         /* Use the large buffer method to calculate cursor watermark */
4057         line_time_us = ((htotal * 1000) / clock);
4058         line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
4059         entries = line_count * 64 * pixel_size;
4060         tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
4061         if (tlb_miss > 0)
4062                 entries += tlb_miss;
4063         entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4064         *cursor_wm = entries + cursor->guard_size;
4065         if (*cursor_wm > (int)cursor->max_wm)
4066                 *cursor_wm = (int)cursor->max_wm;
4067
4068         return true;
4069 }
4070
4071 /*
4072  * Check the wm result.
4073  *
4074  * If any calculated watermark values is larger than the maximum value that
4075  * can be programmed into the associated watermark register, that watermark
4076  * must be disabled.
4077  */
4078 static bool g4x_check_srwm(struct drm_device *dev,
4079                            int display_wm, int cursor_wm,
4080                            const struct intel_watermark_params *display,
4081                            const struct intel_watermark_params *cursor)
4082 {
4083         DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
4084                       display_wm, cursor_wm);
4085
4086         if (display_wm > display->max_wm) {
4087                 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
4088                               display_wm, display->max_wm);
4089                 return false;
4090         }
4091
4092         if (cursor_wm > cursor->max_wm) {
4093                 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
4094                               cursor_wm, cursor->max_wm);
4095                 return false;
4096         }
4097
4098         if (!(display_wm || cursor_wm)) {
4099                 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
4100                 return false;
4101         }
4102
4103         return true;
4104 }
4105
4106 static bool g4x_compute_srwm(struct drm_device *dev,
4107                              int plane,
4108                              int latency_ns,
4109                              const struct intel_watermark_params *display,
4110                              const struct intel_watermark_params *cursor,
4111                              int *display_wm, int *cursor_wm)
4112 {
4113         struct drm_crtc *crtc;
4114         int hdisplay, htotal, pixel_size, clock;
4115         unsigned long line_time_us;
4116         int line_count, line_size;
4117         int small, large;
4118         int entries;
4119
4120         if (!latency_ns) {
4121                 *display_wm = *cursor_wm = 0;
4122                 return false;
4123         }
4124
4125         crtc = intel_get_crtc_for_plane(dev, plane);
4126         hdisplay = crtc->mode.hdisplay;
4127         htotal = crtc->mode.htotal;
4128         clock = crtc->mode.clock;
4129         pixel_size = crtc->fb->bits_per_pixel / 8;
4130
4131         line_time_us = (htotal * 1000) / clock;
4132         line_count = (latency_ns / line_time_us + 1000) / 1000;
4133         line_size = hdisplay * pixel_size;
4134
4135         /* Use the minimum of the small and large buffer method for primary */
4136         small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4137         large = line_count * line_size;
4138
4139         entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4140         *display_wm = entries + display->guard_size;
4141
4142         /* calculate the self-refresh watermark for display cursor */
4143         entries = line_count * pixel_size * 64;
4144         entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4145         *cursor_wm = entries + cursor->guard_size;
4146
4147         return g4x_check_srwm(dev,
4148                               *display_wm, *cursor_wm,
4149                               display, cursor);
4150 }
4151
4152 #define single_plane_enabled(mask) is_power_of_2(mask)
4153
4154 static void g4x_update_wm(struct drm_device *dev)
4155 {
4156         static const int sr_latency_ns = 12000;
4157         struct drm_i915_private *dev_priv = dev->dev_private;
4158         int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
4159         int plane_sr, cursor_sr;
4160         unsigned int enabled = 0;
4161
4162         if (g4x_compute_wm0(dev, 0,
4163                             &g4x_wm_info, latency_ns,
4164                             &g4x_cursor_wm_info, latency_ns,
4165                             &planea_wm, &cursora_wm))
4166                 enabled |= 1;
4167
4168         if (g4x_compute_wm0(dev, 1,
4169                             &g4x_wm_info, latency_ns,
4170                             &g4x_cursor_wm_info, latency_ns,
4171                             &planeb_wm, &cursorb_wm))
4172                 enabled |= 2;
4173
4174         plane_sr = cursor_sr = 0;
4175         if (single_plane_enabled(enabled) &&
4176             g4x_compute_srwm(dev, ffs(enabled) - 1,
4177                              sr_latency_ns,
4178                              &g4x_wm_info,
4179                              &g4x_cursor_wm_info,
4180                              &plane_sr, &cursor_sr))
4181                 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4182         else
4183                 I915_WRITE(FW_BLC_SELF,
4184                            I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
4185
4186         DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
4187                       planea_wm, cursora_wm,
4188                       planeb_wm, cursorb_wm,
4189                       plane_sr, cursor_sr);
4190
4191         I915_WRITE(DSPFW1,
4192                    (plane_sr << DSPFW_SR_SHIFT) |
4193                    (cursorb_wm << DSPFW_CURSORB_SHIFT) |
4194                    (planeb_wm << DSPFW_PLANEB_SHIFT) |
4195                    planea_wm);
4196         I915_WRITE(DSPFW2,
4197                    (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
4198                    (cursora_wm << DSPFW_CURSORA_SHIFT));
4199         /* HPLL off in SR has some issues on G4x... disable it */
4200         I915_WRITE(DSPFW3,
4201                    (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
4202                    (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4203 }
4204
4205 static void i965_update_wm(struct drm_device *dev)
4206 {
4207         struct drm_i915_private *dev_priv = dev->dev_private;
4208         struct drm_crtc *crtc;
4209         int srwm = 1;
4210         int cursor_sr = 16;
4211
4212         /* Calc sr entries for one plane configs */
4213         crtc = single_enabled_crtc(dev);
4214         if (crtc) {
4215                 /* self-refresh has much higher latency */
4216                 static const int sr_latency_ns = 12000;
4217                 int clock = crtc->mode.clock;
4218                 int htotal = crtc->mode.htotal;
4219                 int hdisplay = crtc->mode.hdisplay;
4220                 int pixel_size = crtc->fb->bits_per_pixel / 8;
4221                 unsigned long line_time_us;
4222                 int entries;
4223
4224                 line_time_us = ((htotal * 1000) / clock);
4225
4226                 /* Use ns/us then divide to preserve precision */
4227                 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4228                         pixel_size * hdisplay;
4229                 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
4230                 srwm = I965_FIFO_SIZE - entries;
4231                 if (srwm < 0)
4232                         srwm = 1;
4233                 srwm &= 0x1ff;
4234                 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
4235                               entries, srwm);
4236
4237                 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4238                         pixel_size * 64;
4239                 entries = DIV_ROUND_UP(entries,
4240                                           i965_cursor_wm_info.cacheline_size);
4241                 cursor_sr = i965_cursor_wm_info.fifo_size -
4242                         (entries + i965_cursor_wm_info.guard_size);
4243
4244                 if (cursor_sr > i965_cursor_wm_info.max_wm)
4245                         cursor_sr = i965_cursor_wm_info.max_wm;
4246
4247                 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
4248                               "cursor %d\n", srwm, cursor_sr);
4249
4250                 if (IS_CRESTLINE(dev))
4251                         I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4252         } else {
4253                 /* Turn off self refresh if both pipes are enabled */
4254                 if (IS_CRESTLINE(dev))
4255                         I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
4256                                    & ~FW_BLC_SELF_EN);
4257         }
4258
4259         DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
4260                       srwm);
4261
4262         /* 965 has limitations... */
4263         I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
4264                    (8 << 16) | (8 << 8) | (8 << 0));
4265         I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
4266         /* update cursor SR watermark */
4267         I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4268 }
4269
4270 static void i9xx_update_wm(struct drm_device *dev)
4271 {
4272         struct drm_i915_private *dev_priv = dev->dev_private;
4273         const struct intel_watermark_params *wm_info;
4274         uint32_t fwater_lo;
4275         uint32_t fwater_hi;
4276         int cwm, srwm = 1;
4277         int fifo_size;
4278         int planea_wm, planeb_wm;
4279         struct drm_crtc *crtc, *enabled = NULL;
4280
4281         if (IS_I945GM(dev))
4282                 wm_info = &i945_wm_info;
4283         else if (!IS_GEN2(dev))
4284                 wm_info = &i915_wm_info;
4285         else
4286                 wm_info = &i855_wm_info;
4287
4288         fifo_size = dev_priv->display.get_fifo_size(dev, 0);
4289         crtc = intel_get_crtc_for_plane(dev, 0);
4290         if (crtc->enabled && crtc->fb) {
4291                 planea_wm = intel_calculate_wm(crtc->mode.clock,
4292                                                wm_info, fifo_size,
4293                                                crtc->fb->bits_per_pixel / 8,
4294                                                latency_ns);
4295                 enabled = crtc;
4296         } else
4297                 planea_wm = fifo_size - wm_info->guard_size;
4298
4299         fifo_size = dev_priv->display.get_fifo_size(dev, 1);
4300         crtc = intel_get_crtc_for_plane(dev, 1);
4301         if (crtc->enabled && crtc->fb) {
4302                 planeb_wm = intel_calculate_wm(crtc->mode.clock,
4303                                                wm_info, fifo_size,
4304                                                crtc->fb->bits_per_pixel / 8,
4305                                                latency_ns);
4306                 if (enabled == NULL)
4307                         enabled = crtc;
4308                 else
4309                         enabled = NULL;
4310         } else
4311                 planeb_wm = fifo_size - wm_info->guard_size;
4312
4313         DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
4314
4315         /*
4316          * Overlay gets an aggressive default since video jitter is bad.
4317          */
4318         cwm = 2;
4319
4320         /* Play safe and disable self-refresh before adjusting watermarks. */
4321         if (IS_I945G(dev) || IS_I945GM(dev))
4322                 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
4323         else if (IS_I915GM(dev))
4324                 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
4325
4326         /* Calc sr entries for one plane configs */
4327         if (HAS_FW_BLC(dev) && enabled) {
4328                 /* self-refresh has much higher latency */
4329                 static const int sr_latency_ns = 6000;
4330                 int clock = enabled->mode.clock;
4331                 int htotal = enabled->mode.htotal;
4332                 int hdisplay = enabled->mode.hdisplay;
4333                 int pixel_size = enabled->fb->bits_per_pixel / 8;
4334                 unsigned long line_time_us;
4335                 int entries;
4336
4337                 line_time_us = (htotal * 1000) / clock;
4338
4339                 /* Use ns/us then divide to preserve precision */
4340                 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4341                         pixel_size * hdisplay;
4342                 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
4343                 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
4344                 srwm = wm_info->fifo_size - entries;
4345                 if (srwm < 0)
4346                         srwm = 1;
4347
4348                 if (IS_I945G(dev) || IS_I945GM(dev))
4349                         I915_WRITE(FW_BLC_SELF,
4350                                    FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
4351                 else if (IS_I915GM(dev))
4352                         I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
4353         }
4354
4355         DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
4356                       planea_wm, planeb_wm, cwm, srwm);
4357
4358         fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
4359         fwater_hi = (cwm & 0x1f);
4360
4361         /* Set request length to 8 cachelines per fetch */
4362         fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
4363         fwater_hi = fwater_hi | (1 << 8);
4364
4365         I915_WRITE(FW_BLC, fwater_lo);
4366         I915_WRITE(FW_BLC2, fwater_hi);
4367
4368         if (HAS_FW_BLC(dev)) {
4369                 if (enabled) {
4370                         if (IS_I945G(dev) || IS_I945GM(dev))
4371                                 I915_WRITE(FW_BLC_SELF,
4372                                            FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
4373                         else if (IS_I915GM(dev))
4374                                 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
4375                         DRM_DEBUG_KMS("memory self refresh enabled\n");
4376                 } else
4377                         DRM_DEBUG_KMS("memory self refresh disabled\n");
4378         }
4379 }
4380
4381 static void i830_update_wm(struct drm_device *dev)
4382 {
4383         struct drm_i915_private *dev_priv = dev->dev_private;
4384         struct drm_crtc *crtc;
4385         uint32_t fwater_lo;
4386         int planea_wm;
4387
4388         crtc = single_enabled_crtc(dev);
4389         if (crtc == NULL)
4390                 return;
4391
4392         planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
4393                                        dev_priv->display.get_fifo_size(dev, 0),
4394                                        crtc->fb->bits_per_pixel / 8,
4395                                        latency_ns);
4396         fwater_lo = I915_READ(FW_BLC) & ~0xfff;
4397         fwater_lo |= (3<<8) | planea_wm;
4398
4399         DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
4400
4401         I915_WRITE(FW_BLC, fwater_lo);
4402 }
4403
4404 #define ILK_LP0_PLANE_LATENCY           700
4405 #define ILK_LP0_CURSOR_LATENCY          1300
4406
4407 /*
4408  * Check the wm result.
4409  *
4410  * If any calculated watermark values is larger than the maximum value that
4411  * can be programmed into the associated watermark register, that watermark
4412  * must be disabled.
4413  */
4414 static bool ironlake_check_srwm(struct drm_device *dev, int level,
4415                                 int fbc_wm, int display_wm, int cursor_wm,
4416                                 const struct intel_watermark_params *display,
4417                                 const struct intel_watermark_params *cursor)
4418 {
4419         struct drm_i915_private *dev_priv = dev->dev_private;
4420
4421         DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
4422                       " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
4423
4424         if (fbc_wm > SNB_FBC_MAX_SRWM) {
4425                 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
4426                               fbc_wm, SNB_FBC_MAX_SRWM, level);
4427
4428                 /* fbc has it's own way to disable FBC WM */
4429                 I915_WRITE(DISP_ARB_CTL,
4430                            I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
4431                 return false;
4432         }
4433
4434         if (display_wm > display->max_wm) {
4435                 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
4436                               display_wm, SNB_DISPLAY_MAX_SRWM, level);
4437                 return false;
4438         }
4439
4440         if (cursor_wm > cursor->max_wm) {
4441                 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
4442                               cursor_wm, SNB_CURSOR_MAX_SRWM, level);
4443                 return false;
4444         }
4445
4446         if (!(fbc_wm || display_wm || cursor_wm)) {
4447                 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
4448                 return false;
4449         }
4450
4451         return true;
4452 }
4453
4454 /*
4455  * Compute watermark values of WM[1-3],
4456  */
4457 static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
4458                                   int latency_ns,
4459                                   const struct intel_watermark_params *display,
4460                                   const struct intel_watermark_params *cursor,
4461                                   int *fbc_wm, int *display_wm, int *cursor_wm)
4462 {
4463         struct drm_crtc *crtc;
4464         unsigned long line_time_us;
4465         int hdisplay, htotal, pixel_size, clock;
4466         int line_count, line_size;
4467         int small, large;
4468         int entries;
4469
4470         if (!latency_ns) {
4471                 *fbc_wm = *display_wm = *cursor_wm = 0;
4472                 return false;
4473         }
4474
4475         crtc = intel_get_crtc_for_plane(dev, plane);
4476         hdisplay = crtc->mode.hdisplay;
4477         htotal = crtc->mode.htotal;
4478         clock = crtc->mode.clock;
4479         pixel_size = crtc->fb->bits_per_pixel / 8;
4480
4481         line_time_us = (htotal * 1000) / clock;
4482         line_count = (latency_ns / line_time_us + 1000) / 1000;
4483         line_size = hdisplay * pixel_size;
4484
4485         /* Use the minimum of the small and large buffer method for primary */
4486         small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4487         large = line_count * line_size;
4488
4489         entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4490         *display_wm = entries + display->guard_size;
4491
4492         /*
4493          * Spec says:
4494          * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
4495          */
4496         *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
4497
4498         /* calculate the self-refresh watermark for display cursor */
4499         entries = line_count * pixel_size * 64;
4500         entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4501         *cursor_wm = entries + cursor->guard_size;
4502
4503         return ironlake_check_srwm(dev, level,
4504                                    *fbc_wm, *display_wm, *cursor_wm,
4505                                    display, cursor);
4506 }
4507
4508 static void ironlake_update_wm(struct drm_device *dev)
4509 {
4510         struct drm_i915_private *dev_priv = dev->dev_private;
4511         int fbc_wm, plane_wm, cursor_wm;
4512         unsigned int enabled;
4513
4514         enabled = 0;
4515         if (g4x_compute_wm0(dev, 0,
4516                             &ironlake_display_wm_info,
4517                             ILK_LP0_PLANE_LATENCY,
4518                             &ironlake_cursor_wm_info,
4519                             ILK_LP0_CURSOR_LATENCY,
4520                             &plane_wm, &cursor_wm)) {
4521                 I915_WRITE(WM0_PIPEA_ILK,
4522                            (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4523                 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4524                               " plane %d, " "cursor: %d\n",
4525                               plane_wm, cursor_wm);
4526                 enabled |= 1;
4527         }
4528
4529         if (g4x_compute_wm0(dev, 1,
4530                             &ironlake_display_wm_info,
4531                             ILK_LP0_PLANE_LATENCY,
4532                             &ironlake_cursor_wm_info,
4533                             ILK_LP0_CURSOR_LATENCY,
4534                             &plane_wm, &cursor_wm)) {
4535                 I915_WRITE(WM0_PIPEB_ILK,
4536                            (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4537                 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4538                               " plane %d, cursor: %d\n",
4539                               plane_wm, cursor_wm);
4540                 enabled |= 2;
4541         }
4542
4543         /*
4544          * Calculate and update the self-refresh watermark only when one
4545          * display plane is used.
4546          */
4547         I915_WRITE(WM3_LP_ILK, 0);
4548         I915_WRITE(WM2_LP_ILK, 0);
4549         I915_WRITE(WM1_LP_ILK, 0);
4550
4551         if (!single_plane_enabled(enabled))
4552                 return;
4553         enabled = ffs(enabled) - 1;
4554
4555         /* WM1 */
4556         if (!ironlake_compute_srwm(dev, 1, enabled,
4557                                    ILK_READ_WM1_LATENCY() * 500,
4558                                    &ironlake_display_srwm_info,
4559                                    &ironlake_cursor_srwm_info,
4560                                    &fbc_wm, &plane_wm, &cursor_wm))
4561                 return;
4562
4563         I915_WRITE(WM1_LP_ILK,
4564                    WM1_LP_SR_EN |
4565                    (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4566                    (fbc_wm << WM1_LP_FBC_SHIFT) |
4567                    (plane_wm << WM1_LP_SR_SHIFT) |
4568                    cursor_wm);
4569
4570         /* WM2 */
4571         if (!ironlake_compute_srwm(dev, 2, enabled,
4572                                    ILK_READ_WM2_LATENCY() * 500,
4573                                    &ironlake_display_srwm_info,
4574                                    &ironlake_cursor_srwm_info,
4575                                    &fbc_wm, &plane_wm, &cursor_wm))
4576                 return;
4577
4578         I915_WRITE(WM2_LP_ILK,
4579                    WM2_LP_EN |
4580                    (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4581                    (fbc_wm << WM1_LP_FBC_SHIFT) |
4582                    (plane_wm << WM1_LP_SR_SHIFT) |
4583                    cursor_wm);
4584
4585         /*
4586          * WM3 is unsupported on ILK, probably because we don't have latency
4587          * data for that power state
4588          */
4589 }
4590
4591 void sandybridge_update_wm(struct drm_device *dev)
4592 {
4593         struct drm_i915_private *dev_priv = dev->dev_private;
4594         int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
4595         u32 val;
4596         int fbc_wm, plane_wm, cursor_wm;
4597         unsigned int enabled;
4598
4599         enabled = 0;
4600         if (g4x_compute_wm0(dev, 0,
4601                             &sandybridge_display_wm_info, latency,
4602                             &sandybridge_cursor_wm_info, latency,
4603                             &plane_wm, &cursor_wm)) {
4604                 val = I915_READ(WM0_PIPEA_ILK);
4605                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4606                 I915_WRITE(WM0_PIPEA_ILK, val |
4607                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4608                 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4609                               " plane %d, " "cursor: %d\n",
4610                               plane_wm, cursor_wm);
4611                 enabled |= 1;
4612         }
4613
4614         if (g4x_compute_wm0(dev, 1,
4615                             &sandybridge_display_wm_info, latency,
4616                             &sandybridge_cursor_wm_info, latency,
4617                             &plane_wm, &cursor_wm)) {
4618                 val = I915_READ(WM0_PIPEB_ILK);
4619                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4620                 I915_WRITE(WM0_PIPEB_ILK, val |
4621                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4622                 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4623                               " plane %d, cursor: %d\n",
4624                               plane_wm, cursor_wm);
4625                 enabled |= 2;
4626         }
4627
4628         /* IVB has 3 pipes */
4629         if (IS_IVYBRIDGE(dev) &&
4630             g4x_compute_wm0(dev, 2,
4631                             &sandybridge_display_wm_info, latency,
4632                             &sandybridge_cursor_wm_info, latency,
4633                             &plane_wm, &cursor_wm)) {
4634                 val = I915_READ(WM0_PIPEC_IVB);
4635                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4636                 I915_WRITE(WM0_PIPEC_IVB, val |
4637                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4638                 DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
4639                               " plane %d, cursor: %d\n",
4640                               plane_wm, cursor_wm);
4641                 enabled |= 3;
4642         }
4643
4644         /*
4645          * Calculate and update the self-refresh watermark only when one
4646          * display plane is used.
4647          *
4648          * SNB support 3 levels of watermark.
4649          *
4650          * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
4651          * and disabled in the descending order
4652          *
4653          */
4654         I915_WRITE(WM3_LP_ILK, 0);
4655         I915_WRITE(WM2_LP_ILK, 0);
4656         I915_WRITE(WM1_LP_ILK, 0);
4657
4658         if (!single_plane_enabled(enabled) ||
4659             dev_priv->sprite_scaling_enabled)
4660                 return;
4661         enabled = ffs(enabled) - 1;
4662
4663         /* WM1 */
4664         if (!ironlake_compute_srwm(dev, 1, enabled,
4665                                    SNB_READ_WM1_LATENCY() * 500,
4666                                    &sandybridge_display_srwm_info,
4667                                    &sandybridge_cursor_srwm_info,
4668                                    &fbc_wm, &plane_wm, &cursor_wm))
4669                 return;
4670
4671         I915_WRITE(WM1_LP_ILK,
4672                    WM1_LP_SR_EN |
4673                    (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4674                    (fbc_wm << WM1_LP_FBC_SHIFT) |
4675                    (plane_wm << WM1_LP_SR_SHIFT) |
4676                    cursor_wm);
4677
4678         /* WM2 */
4679         if (!ironlake_compute_srwm(dev, 2, enabled,
4680                                    SNB_READ_WM2_LATENCY() * 500,
4681                                    &sandybridge_display_srwm_info,
4682                                    &sandybridge_cursor_srwm_info,
4683                                    &fbc_wm, &plane_wm, &cursor_wm))
4684                 return;
4685
4686         I915_WRITE(WM2_LP_ILK,
4687                    WM2_LP_EN |
4688                    (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4689                    (fbc_wm << WM1_LP_FBC_SHIFT) |
4690                    (plane_wm << WM1_LP_SR_SHIFT) |
4691                    cursor_wm);
4692
4693         /* WM3 */
4694         if (!ironlake_compute_srwm(dev, 3, enabled,
4695                                    SNB_READ_WM3_LATENCY() * 500,
4696                                    &sandybridge_display_srwm_info,
4697                                    &sandybridge_cursor_srwm_info,
4698                                    &fbc_wm, &plane_wm, &cursor_wm))
4699                 return;
4700
4701         I915_WRITE(WM3_LP_ILK,
4702                    WM3_LP_EN |
4703                    (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4704                    (fbc_wm << WM1_LP_FBC_SHIFT) |
4705                    (plane_wm << WM1_LP_SR_SHIFT) |
4706                    cursor_wm);
4707 }
4708
4709 static bool
4710 sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
4711                               uint32_t sprite_width, int pixel_size,
4712                               const struct intel_watermark_params *display,
4713                               int display_latency_ns, int *sprite_wm)
4714 {
4715         struct drm_crtc *crtc;
4716         int clock;
4717         int entries, tlb_miss;
4718
4719         crtc = intel_get_crtc_for_plane(dev, plane);
4720         if (crtc->fb == NULL || !crtc->enabled) {
4721                 *sprite_wm = display->guard_size;
4722                 return false;
4723         }
4724
4725         clock = crtc->mode.clock;
4726
4727         /* Use the small buffer method to calculate the sprite watermark */
4728         entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
4729         tlb_miss = display->fifo_size*display->cacheline_size -
4730                 sprite_width * 8;
4731         if (tlb_miss > 0)
4732                 entries += tlb_miss;
4733         entries = DIV_ROUND_UP(entries, display->cacheline_size);
4734         *sprite_wm = entries + display->guard_size;
4735         if (*sprite_wm > (int)display->max_wm)
4736                 *sprite_wm = display->max_wm;
4737
4738         return true;
4739 }
4740
4741 static bool
4742 sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
4743                                 uint32_t sprite_width, int pixel_size,
4744                                 const struct intel_watermark_params *display,
4745                                 int latency_ns, int *sprite_wm)
4746 {
4747         struct drm_crtc *crtc;
4748         unsigned long line_time_us;
4749         int clock;
4750         int line_count, line_size;
4751         int small, large;
4752         int entries;
4753
4754         if (!latency_ns) {
4755                 *sprite_wm = 0;
4756                 return false;
4757         }
4758
4759         crtc = intel_get_crtc_for_plane(dev, plane);
4760         clock = crtc->mode.clock;
4761         if (!clock) {
4762                 *sprite_wm = 0;
4763                 return false;
4764         }
4765
4766         line_time_us = (sprite_width * 1000) / clock;
4767         if (!line_time_us) {
4768                 *sprite_wm = 0;
4769                 return false;
4770         }
4771
4772         line_count = (latency_ns / line_time_us + 1000) / 1000;
4773         line_size = sprite_width * pixel_size;
4774
4775         /* Use the minimum of the small and large buffer method for primary */
4776         small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4777         large = line_count * line_size;
4778
4779         entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4780         *sprite_wm = entries + display->guard_size;
4781
4782         return *sprite_wm > 0x3ff ? false : true;
4783 }
4784
4785 static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
4786                                          uint32_t sprite_width, int pixel_size)
4787 {
4788         struct drm_i915_private *dev_priv = dev->dev_private;
4789         int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
4790         u32 val;
4791         int sprite_wm, reg;
4792         int ret;
4793
4794         switch (pipe) {
4795         case 0:
4796                 reg = WM0_PIPEA_ILK;
4797                 break;
4798         case 1:
4799                 reg = WM0_PIPEB_ILK;
4800                 break;
4801         case 2:
4802                 reg = WM0_PIPEC_IVB;
4803                 break;
4804         default:
4805                 return; /* bad pipe */
4806         }
4807
4808         ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
4809                                             &sandybridge_display_wm_info,
4810                                             latency, &sprite_wm);
4811         if (!ret) {
4812                 DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
4813                               pipe);
4814                 return;
4815         }
4816
4817         val = I915_READ(reg);
4818         val &= ~WM0_PIPE_SPRITE_MASK;
4819         I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
4820         DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
4821
4822
4823         ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4824                                               pixel_size,
4825                                               &sandybridge_display_srwm_info,
4826                                               SNB_READ_WM1_LATENCY() * 500,
4827                                               &sprite_wm);
4828         if (!ret) {
4829                 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
4830                               pipe);
4831                 return;
4832         }
4833         I915_WRITE(WM1S_LP_ILK, sprite_wm);
4834
4835         /* Only IVB has two more LP watermarks for sprite */
4836         if (!IS_IVYBRIDGE(dev))
4837                 return;
4838
4839         ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4840                                               pixel_size,
4841                                               &sandybridge_display_srwm_info,
4842                                               SNB_READ_WM2_LATENCY() * 500,
4843                                               &sprite_wm);
4844         if (!ret) {
4845                 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
4846                               pipe);
4847                 return;
4848         }
4849         I915_WRITE(WM2S_LP_IVB, sprite_wm);
4850
4851         ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4852                                               pixel_size,
4853                                               &sandybridge_display_srwm_info,
4854                                               SNB_READ_WM3_LATENCY() * 500,
4855                                               &sprite_wm);
4856         if (!ret) {
4857                 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
4858                               pipe);
4859                 return;
4860         }
4861         I915_WRITE(WM3S_LP_IVB, sprite_wm);
4862 }
4863
4864 /**
4865  * intel_update_watermarks - update FIFO watermark values based on current modes
4866  *
4867  * Calculate watermark values for the various WM regs based on current mode
4868  * and plane configuration.
4869  *
4870  * There are several cases to deal with here:
4871  *   - normal (i.e. non-self-refresh)
4872  *   - self-refresh (SR) mode
4873  *   - lines are large relative to FIFO size (buffer can hold up to 2)
4874  *   - lines are small relative to FIFO size (buffer can hold more than 2
4875  *     lines), so need to account for TLB latency
4876  *
4877  *   The normal calculation is:
4878  *     watermark = dotclock * bytes per pixel * latency
4879  *   where latency is platform & configuration dependent (we assume pessimal
4880  *   values here).
4881  *
4882  *   The SR calculation is:
4883  *     watermark = (trunc(latency/line time)+1) * surface width *
4884  *       bytes per pixel
4885  *   where
4886  *     line time = htotal / dotclock
4887  *     surface width = hdisplay for normal plane and 64 for cursor
4888  *   and latency is assumed to be high, as above.
4889  *
4890  * The final value programmed to the register should always be rounded up,
4891  * and include an extra 2 entries to account for clock crossings.
4892  *
4893  * We don't use the sprite, so we can ignore that.  And on Crestline we have
4894  * to set the non-SR watermarks to 8.
4895  */
4896 static void intel_update_watermarks(struct drm_device *dev)
4897 {
4898         struct drm_i915_private *dev_priv = dev->dev_private;
4899
4900         if (dev_priv->display.update_wm)
4901                 dev_priv->display.update_wm(dev);
4902 }
4903
4904 void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
4905                                     uint32_t sprite_width, int pixel_size)
4906 {
4907         struct drm_i915_private *dev_priv = dev->dev_private;
4908
4909         if (dev_priv->display.update_sprite_wm)
4910                 dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
4911                                                    pixel_size);
4912 }
4913
4914 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4915 {
4916         if (i915_panel_use_ssc >= 0)
4917                 return i915_panel_use_ssc != 0;
4918         return dev_priv->lvds_use_ssc
4919                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4920 }
4921
4922 /**
4923  * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
4924  * @crtc: CRTC structure
4925  * @mode: requested mode
4926  *
4927  * A pipe may be connected to one or more outputs.  Based on the depth of the
4928  * attached framebuffer, choose a good color depth to use on the pipe.
4929  *
4930  * If possible, match the pipe depth to the fb depth.  In some cases, this
4931  * isn't ideal, because the connected output supports a lesser or restricted
4932  * set of depths.  Resolve that here:
4933  *    LVDS typically supports only 6bpc, so clamp down in that case
4934  *    HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
4935  *    Displays may support a restricted set as well, check EDID and clamp as
4936  *      appropriate.
4937  *    DP may want to dither down to 6bpc to fit larger modes
4938  *
4939  * RETURNS:
4940  * Dithering requirement (i.e. false if display bpc and pipe bpc match,
4941  * true if they don't match).
4942  */
4943 static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4944                                          unsigned int *pipe_bpp,
4945                                          struct drm_display_mode *mode)
4946 {
4947         struct drm_device *dev = crtc->dev;
4948         struct drm_i915_private *dev_priv = dev->dev_private;
4949         struct drm_encoder *encoder;
4950         struct drm_connector *connector;
4951         unsigned int display_bpc = UINT_MAX, bpc;
4952
4953         /* Walk the encoders & connectors on this crtc, get min bpc */
4954         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
4955                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4956
4957                 if (encoder->crtc != crtc)
4958                         continue;
4959
4960                 if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
4961                         unsigned int lvds_bpc;
4962
4963                         if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
4964                             LVDS_A3_POWER_UP)
4965                                 lvds_bpc = 8;
4966                         else
4967                                 lvds_bpc = 6;
4968
4969                         if (lvds_bpc < display_bpc) {
4970                                 DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
4971                                 display_bpc = lvds_bpc;
4972                         }
4973                         continue;
4974                 }
4975
4976                 /* Not one of the known troublemakers, check the EDID */
4977                 list_for_each_entry(connector, &dev->mode_config.connector_list,
4978                                     head) {
4979                         if (connector->encoder != encoder)
4980                                 continue;
4981
4982                         /* Don't use an invalid EDID bpc value */
4983                         if (connector->display_info.bpc &&
4984                             connector->display_info.bpc < display_bpc) {
4985                                 DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
4986                                 display_bpc = connector->display_info.bpc;
4987                         }
4988                 }
4989
4990                 if (intel_encoder->type == INTEL_OUTPUT_EDP) {
4991                         /* Use VBT settings if we have an eDP panel */
4992                         unsigned int edp_bpc = dev_priv->edp.bpp / 3;
4993
4994                         if (edp_bpc && edp_bpc < display_bpc) {
4995                                 DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
4996                                 display_bpc = edp_bpc;
4997                         }
4998                         continue;
4999                 }
5000
5001                 /*
5002                  * HDMI is either 12 or 8, so if the display lets 10bpc sneak
5003                  * through, clamp it down.  (Note: >12bpc will be caught below.)
5004                  */
5005                 if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
5006                         if (display_bpc > 8 && display_bpc < 12) {
5007                                 DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
5008                                 display_bpc = 12;
5009                         } else {
5010                                 DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
5011                                 display_bpc = 8;
5012                         }
5013                 }
5014         }
5015
5016         if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
5017                 DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
5018                 display_bpc = 6;
5019         }
5020
5021         /*
5022          * We could just drive the pipe at the highest bpc all the time and
5023          * enable dithering as needed, but that costs bandwidth.  So choose
5024          * the minimum value that expresses the full color range of the fb but
5025          * also stays within the max display bpc discovered above.
5026          */
5027
5028         switch (crtc->fb->depth) {
5029         case 8:
5030                 bpc = 8; /* since we go through a colormap */
5031                 break;
5032         case 15:
5033         case 16:
5034                 bpc = 6; /* min is 18bpp */
5035                 break;
5036         case 24:
5037                 bpc = 8;
5038                 break;
5039         case 30:
5040                 bpc = 10;
5041                 break;
5042         case 48:
5043                 bpc = 12;
5044                 break;
5045         default:
5046                 DRM_DEBUG("unsupported depth, assuming 24 bits\n");
5047                 bpc = min((unsigned int)8, display_bpc);
5048                 break;
5049         }
5050
5051         display_bpc = min(display_bpc, bpc);
5052
5053         DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
5054                       bpc, display_bpc);
5055
5056         *pipe_bpp = display_bpc * 3;
5057
5058         return display_bpc != bpc;
5059 }
5060
5061 static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
5062 {
5063         struct drm_device *dev = crtc->dev;
5064         struct drm_i915_private *dev_priv = dev->dev_private;
5065         int refclk;
5066
5067         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5068             intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5069                 refclk = dev_priv->lvds_ssc_freq * 1000;
5070                 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
5071                               refclk / 1000);
5072         } else if (!IS_GEN2(dev)) {
5073                 refclk = 96000;
5074         } else {
5075                 refclk = 48000;
5076         }
5077
5078         return refclk;
5079 }
5080
5081 static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode,
5082                                       intel_clock_t *clock)
5083 {
5084         /* SDVO TV has fixed PLL values depend on its clock range,
5085            this mirrors vbios setting. */
5086         if (adjusted_mode->clock >= 100000
5087             && adjusted_mode->clock < 140500) {
5088                 clock->p1 = 2;
5089                 clock->p2 = 10;
5090                 clock->n = 3;
5091                 clock->m1 = 16;
5092                 clock->m2 = 8;
5093         } else if (adjusted_mode->clock >= 140500
5094                    && adjusted_mode->clock <= 200000) {
5095                 clock->p1 = 1;
5096                 clock->p2 = 10;
5097                 clock->n = 6;
5098                 clock->m1 = 12;
5099                 clock->m2 = 8;
5100         }
5101 }
5102
5103 static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
5104                                      intel_clock_t *clock,
5105                                      intel_clock_t *reduced_clock)
5106 {
5107         struct drm_device *dev = crtc->dev;
5108         struct drm_i915_private *dev_priv = dev->dev_private;
5109         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5110         int pipe = intel_crtc->pipe;
5111         u32 fp, fp2 = 0;
5112
5113         if (IS_PINEVIEW(dev)) {
5114                 fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
5115                 if (reduced_clock)
5116                         fp2 = (1 << reduced_clock->n) << 16 |
5117                                 reduced_clock->m1 << 8 | reduced_clock->m2;
5118         } else {
5119                 fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
5120                 if (reduced_clock)
5121                         fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
5122                                 reduced_clock->m2;
5123         }
5124
5125         I915_WRITE(FP0(pipe), fp);
5126
5127         intel_crtc->lowfreq_avail = false;
5128         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5129             reduced_clock && i915_powersave) {
5130                 I915_WRITE(FP1(pipe), fp2);
5131                 intel_crtc->lowfreq_avail = true;
5132         } else {
5133                 I915_WRITE(FP1(pipe), fp);
5134         }
5135 }
5136
5137 static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5138                               struct drm_display_mode *mode,
5139                               struct drm_display_mode *adjusted_mode,
5140                               int x, int y,
5141                               struct drm_framebuffer *old_fb)
5142 {
5143         struct drm_device *dev = crtc->dev;
5144         struct drm_i915_private *dev_priv = dev->dev_private;
5145         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5146         int pipe = intel_crtc->pipe;
5147         int plane = intel_crtc->plane;
5148         int refclk, num_connectors = 0;
5149         intel_clock_t clock, reduced_clock;
5150         u32 dpll, dspcntr, pipeconf, vsyncshift;
5151         bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
5152         bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
5153         struct drm_mode_config *mode_config = &dev->mode_config;
5154         struct intel_encoder *encoder;
5155         const intel_limit_t *limit;
5156         int ret;
5157         u32 temp;
5158         u32 lvds_sync = 0;
5159
5160         list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5161                 if (encoder->base.crtc != crtc)
5162                         continue;
5163
5164                 switch (encoder->type) {
5165                 case INTEL_OUTPUT_LVDS:
5166                         is_lvds = true;
5167                         break;
5168                 case INTEL_OUTPUT_SDVO:
5169                 case INTEL_OUTPUT_HDMI:
5170                         is_sdvo = true;
5171                         if (encoder->needs_tv_clock)
5172                                 is_tv = true;
5173                         break;
5174                 case INTEL_OUTPUT_DVO:
5175                         is_dvo = true;
5176                         break;
5177                 case INTEL_OUTPUT_TVOUT:
5178                         is_tv = true;
5179                         break;
5180                 case INTEL_OUTPUT_ANALOG:
5181                         is_crt = true;
5182                         break;
5183                 case INTEL_OUTPUT_DISPLAYPORT:
5184                         is_dp = true;
5185                         break;
5186                 }
5187
5188                 num_connectors++;
5189         }
5190
5191         refclk = i9xx_get_refclk(crtc, num_connectors);
5192
5193         /*
5194          * Returns a set of divisors for the desired target clock with the given
5195          * refclk, or FALSE.  The returned values represent the clock equation:
5196          * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5197          */
5198         limit = intel_limit(crtc, refclk);
5199         ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
5200                              &clock);
5201         if (!ok) {
5202                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
5203                 return -EINVAL;
5204         }
5205
5206         /* Ensure that the cursor is valid for the new mode before changing... */
5207         intel_crtc_update_cursor(crtc, true);
5208
5209         if (is_lvds && dev_priv->lvds_downclock_avail) {
5210                 /*
5211                  * Ensure we match the reduced clock's P to the target clock.
5212                  * If the clocks don't match, we can't switch the display clock
5213                  * by using the FP0/FP1. In such case we will disable the LVDS
5214                  * downclock feature.
5215                 */
5216                 has_reduced_clock = limit->find_pll(limit, crtc,
5217                                                     dev_priv->lvds_downclock,
5218                                                     refclk,
5219                                                     &clock,
5220                                                     &reduced_clock);
5221         }
5222
5223         if (is_sdvo && is_tv)
5224                 i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
5225
5226         i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
5227                                  &reduced_clock : NULL);
5228
5229         dpll = DPLL_VGA_MODE_DIS;
5230
5231         if (!IS_GEN2(dev)) {
5232                 if (is_lvds)
5233                         dpll |= DPLLB_MODE_LVDS;
5234                 else
5235                         dpll |= DPLLB_MODE_DAC_SERIAL;
5236                 if (is_sdvo) {
5237                         int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5238                         if (pixel_multiplier > 1) {
5239                                 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
5240                                         dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
5241                         }
5242                         dpll |= DPLL_DVO_HIGH_SPEED;
5243                 }
5244                 if (is_dp)
5245                         dpll |= DPLL_DVO_HIGH_SPEED;
5246
5247                 /* compute bitmask from p1 value */
5248                 if (IS_PINEVIEW(dev))
5249                         dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
5250                 else {
5251                         dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5252                         if (IS_G4X(dev) && has_reduced_clock)
5253                                 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5254                 }
5255                 switch (clock.p2) {
5256                 case 5:
5257                         dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5258                         break;
5259                 case 7:
5260                         dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5261                         break;
5262                 case 10:
5263                         dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5264                         break;
5265                 case 14:
5266                         dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5267                         break;
5268                 }
5269                 if (INTEL_INFO(dev)->gen >= 4)
5270                         dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
5271         } else {
5272                 if (is_lvds) {
5273                         dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5274                 } else {
5275                         if (clock.p1 == 2)
5276                                 dpll |= PLL_P1_DIVIDE_BY_TWO;
5277                         else
5278                                 dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5279                         if (clock.p2 == 4)
5280                                 dpll |= PLL_P2_DIVIDE_BY_4;
5281                 }
5282         }
5283
5284         if (is_sdvo && is_tv)
5285                 dpll |= PLL_REF_INPUT_TVCLKINBC;
5286         else if (is_tv)
5287                 /* XXX: just matching BIOS for now */
5288                 /*      dpll |= PLL_REF_INPUT_TVCLKINBC; */
5289                 dpll |= 3;
5290         else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5291                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5292         else
5293                 dpll |= PLL_REF_INPUT_DREFCLK;
5294
5295         /* setup pipeconf */
5296         pipeconf = I915_READ(PIPECONF(pipe));
5297
5298         /* Set up the display plane register */
5299         dspcntr = DISPPLANE_GAMMA_ENABLE;
5300
5301         if (pipe == 0)
5302                 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
5303         else
5304                 dspcntr |= DISPPLANE_SEL_PIPE_B;
5305
5306         if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
5307                 /* Enable pixel doubling when the dot clock is > 90% of the (display)
5308                  * core speed.
5309                  *
5310                  * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
5311                  * pipe == 0 check?
5312                  */
5313                 if (mode->clock >
5314                     dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
5315                         pipeconf |= PIPECONF_DOUBLE_WIDE;
5316                 else
5317                         pipeconf &= ~PIPECONF_DOUBLE_WIDE;
5318         }
5319
5320         /* default to 8bpc */
5321         pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
5322         if (is_dp) {
5323                 if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
5324                         pipeconf |= PIPECONF_BPP_6 |
5325                                     PIPECONF_DITHER_EN |
5326                                     PIPECONF_DITHER_TYPE_SP;
5327                 }
5328         }
5329
5330         dpll |= DPLL_VCO_ENABLE;
5331
5332         DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
5333         drm_mode_debug_printmodeline(mode);
5334
5335         I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5336
5337         POSTING_READ(DPLL(pipe));
5338         udelay(150);
5339
5340         /* The LVDS pin pair needs to be on before the DPLLs are enabled.
5341          * This is an exception to the general rule that mode_set doesn't turn
5342          * things on.
5343          */
5344         if (is_lvds) {
5345                 temp = I915_READ(LVDS);
5346                 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5347                 if (pipe == 1) {
5348                         temp |= LVDS_PIPEB_SELECT;
5349                 } else {
5350                         temp &= ~LVDS_PIPEB_SELECT;
5351                 }
5352                 /* set the corresponsding LVDS_BORDER bit */
5353                 temp |= dev_priv->lvds_border_bits;
5354                 /* Set the B0-B3 data pairs corresponding to whether we're going to
5355                  * set the DPLLs for dual-channel mode or not.
5356                  */
5357                 if (clock.p2 == 7)
5358                         temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5359                 else
5360                         temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
5361
5362                 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
5363                  * appropriately here, but we need to look more thoroughly into how
5364                  * panels behave in the two modes.
5365                  */
5366                 /* set the dithering flag on LVDS as needed */
5367                 if (INTEL_INFO(dev)->gen >= 4) {
5368                         if (dev_priv->lvds_dither)
5369                                 temp |= LVDS_ENABLE_DITHER;
5370                         else
5371                                 temp &= ~LVDS_ENABLE_DITHER;
5372                 }
5373                 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5374                         lvds_sync |= LVDS_HSYNC_POLARITY;
5375                 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5376                         lvds_sync |= LVDS_VSYNC_POLARITY;
5377                 if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
5378                     != lvds_sync) {
5379                         char flags[2] = "-+";
5380                         DRM_INFO("Changing LVDS panel from "
5381                                  "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
5382                                  flags[!(temp & LVDS_HSYNC_POLARITY)],
5383                                  flags[!(temp & LVDS_VSYNC_POLARITY)],
5384                                  flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5385                                  flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5386                         temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5387                         temp |= lvds_sync;
5388                 }
5389                 I915_WRITE(LVDS, temp);
5390         }
5391
5392         if (is_dp) {
5393                 intel_dp_set_m_n(crtc, mode, adjusted_mode);
5394         }
5395
5396         I915_WRITE(DPLL(pipe), dpll);
5397
5398         /* Wait for the clocks to stabilize. */
5399         POSTING_READ(DPLL(pipe));
5400         udelay(150);
5401
5402         if (INTEL_INFO(dev)->gen >= 4) {
5403                 temp = 0;
5404                 if (is_sdvo) {
5405                         temp = intel_mode_get_pixel_multiplier(adjusted_mode);
5406                         if (temp > 1)
5407                                 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5408                         else
5409                                 temp = 0;
5410                 }
5411                 I915_WRITE(DPLL_MD(pipe), temp);
5412         } else {
5413                 /* The pixel multiplier can only be updated once the
5414                  * DPLL is enabled and the clocks are stable.
5415                  *
5416                  * So write it again.
5417                  */
5418                 I915_WRITE(DPLL(pipe), dpll);
5419         }
5420
5421         if (HAS_PIPE_CXSR(dev)) {
5422                 if (intel_crtc->lowfreq_avail) {
5423                         DRM_DEBUG_KMS("enabling CxSR downclocking\n");
5424                         pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
5425                 } else {
5426                         DRM_DEBUG_KMS("disabling CxSR downclocking\n");
5427                         pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
5428                 }
5429         }
5430
5431         pipeconf &= ~PIPECONF_INTERLACE_MASK;
5432         if (!IS_GEN2(dev) &&
5433             adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5434                 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5435                 /* the chip adds 2 halflines automatically */
5436                 adjusted_mode->crtc_vtotal -= 1;
5437                 adjusted_mode->crtc_vblank_end -= 1;
5438                 vsyncshift = adjusted_mode->crtc_hsync_start
5439                              - adjusted_mode->crtc_htotal/2;
5440         } else {
5441                 pipeconf |= PIPECONF_PROGRESSIVE;
5442                 vsyncshift = 0;
5443         }
5444
5445         if (!IS_GEN3(dev))
5446                 I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
5447
5448         I915_WRITE(HTOTAL(pipe),
5449                    (adjusted_mode->crtc_hdisplay - 1) |
5450                    ((adjusted_mode->crtc_htotal - 1) << 16));
5451         I915_WRITE(HBLANK(pipe),
5452                    (adjusted_mode->crtc_hblank_start - 1) |
5453                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
5454         I915_WRITE(HSYNC(pipe),
5455                    (adjusted_mode->crtc_hsync_start - 1) |
5456                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
5457
5458         I915_WRITE(VTOTAL(pipe),
5459                    (adjusted_mode->crtc_vdisplay - 1) |
5460                    ((adjusted_mode->crtc_vtotal - 1) << 16));
5461         I915_WRITE(VBLANK(pipe),
5462                    (adjusted_mode->crtc_vblank_start - 1) |
5463                    ((adjusted_mode->crtc_vblank_end - 1) << 16));
5464         I915_WRITE(VSYNC(pipe),
5465                    (adjusted_mode->crtc_vsync_start - 1) |
5466                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
5467
5468         /* pipesrc and dspsize control the size that is scaled from,
5469          * which should always be the user's requested size.
5470          */
5471         I915_WRITE(DSPSIZE(plane),
5472                    ((mode->vdisplay - 1) << 16) |
5473                    (mode->hdisplay - 1));
5474         I915_WRITE(DSPPOS(plane), 0);
5475         I915_WRITE(PIPESRC(pipe),
5476                    ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
5477
5478         I915_WRITE(PIPECONF(pipe), pipeconf);
5479         POSTING_READ(PIPECONF(pipe));
5480         intel_enable_pipe(dev_priv, pipe, false);
5481
5482         intel_wait_for_vblank(dev, pipe);
5483
5484         I915_WRITE(DSPCNTR(plane), dspcntr);
5485         POSTING_READ(DSPCNTR(plane));
5486         intel_enable_plane(dev_priv, plane, pipe);
5487
5488         ret = intel_pipe_set_base(crtc, x, y, old_fb);
5489
5490         intel_update_watermarks(dev);
5491
5492         return ret;
5493 }
5494
5495 /*
5496  * Initialize reference clocks when the driver loads
5497  */
5498 void ironlake_init_pch_refclk(struct drm_device *dev)
5499 {
5500         struct drm_i915_private *dev_priv = dev->dev_private;
5501         struct drm_mode_config *mode_config = &dev->mode_config;
5502         struct intel_encoder *encoder;
5503         u32 temp;
5504         bool has_lvds = false;
5505         bool has_cpu_edp = false;
5506         bool has_pch_edp = false;
5507         bool has_panel = false;
5508         bool has_ck505 = false;
5509         bool can_ssc = false;
5510
5511         /* We need to take the global config into account */
5512         list_for_each_entry(encoder, &mode_config->encoder_list,
5513                             base.head) {
5514                 switch (encoder->type) {
5515                 case INTEL_OUTPUT_LVDS:
5516                         has_panel = true;
5517                         has_lvds = true;
5518                         break;
5519                 case INTEL_OUTPUT_EDP:
5520                         has_panel = true;
5521                         if (intel_encoder_is_pch_edp(&encoder->base))
5522                                 has_pch_edp = true;
5523                         else
5524                                 has_cpu_edp = true;
5525                         break;
5526                 }
5527         }
5528
5529         if (HAS_PCH_IBX(dev)) {
5530                 has_ck505 = dev_priv->display_clock_mode;
5531                 can_ssc = has_ck505;
5532         } else {
5533                 has_ck505 = false;
5534                 can_ssc = true;
5535         }
5536
5537         DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
5538                       has_panel, has_lvds, has_pch_edp, has_cpu_edp,
5539                       has_ck505);
5540
5541         /* Ironlake: try to setup display ref clock before DPLL
5542          * enabling. This is only under driver's control after
5543          * PCH B stepping, previous chipset stepping should be
5544          * ignoring this setting.
5545          */
5546         temp = I915_READ(PCH_DREF_CONTROL);
5547         /* Always enable nonspread source */
5548         temp &= ~DREF_NONSPREAD_SOURCE_MASK;
5549
5550         if (has_ck505)
5551                 temp |= DREF_NONSPREAD_CK505_ENABLE;
5552         else
5553                 temp |= DREF_NONSPREAD_SOURCE_ENABLE;
5554
5555         if (has_panel) {
5556                 temp &= ~DREF_SSC_SOURCE_MASK;
5557                 temp |= DREF_SSC_SOURCE_ENABLE;
5558
5559                 /* SSC must be turned on before enabling the CPU output  */
5560                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5561                         DRM_DEBUG_KMS("Using SSC on panel\n");
5562                         temp |= DREF_SSC1_ENABLE;
5563                 } else
5564                         temp &= ~DREF_SSC1_ENABLE;
5565
5566                 /* Get SSC going before enabling the outputs */
5567                 I915_WRITE(PCH_DREF_CONTROL, temp);
5568                 POSTING_READ(PCH_DREF_CONTROL);
5569                 udelay(200);
5570
5571                 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5572
5573                 /* Enable CPU source on CPU attached eDP */
5574                 if (has_cpu_edp) {
5575                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5576                                 DRM_DEBUG_KMS("Using SSC on eDP\n");
5577                                 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5578                         }
5579                         else
5580                                 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5581                 } else
5582                         temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5583
5584                 I915_WRITE(PCH_DREF_CONTROL, temp);
5585                 POSTING_READ(PCH_DREF_CONTROL);
5586                 udelay(200);
5587         } else {
5588                 DRM_DEBUG_KMS("Disabling SSC entirely\n");
5589
5590                 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5591
5592                 /* Turn off CPU output */
5593                 temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5594
5595                 I915_WRITE(PCH_DREF_CONTROL, temp);
5596                 POSTING_READ(PCH_DREF_CONTROL);
5597                 udelay(200);
5598
5599                 /* Turn off the SSC source */
5600                 temp &= ~DREF_SSC_SOURCE_MASK;
5601                 temp |= DREF_SSC_SOURCE_DISABLE;
5602
5603                 /* Turn off SSC1 */
5604                 temp &= ~ DREF_SSC1_ENABLE;
5605
5606                 I915_WRITE(PCH_DREF_CONTROL, temp);
5607                 POSTING_READ(PCH_DREF_CONTROL);
5608                 udelay(200);
5609         }
5610 }
5611
5612 static int ironlake_get_refclk(struct drm_crtc *crtc)
5613 {
5614         struct drm_device *dev = crtc->dev;
5615         struct drm_i915_private *dev_priv = dev->dev_private;
5616         struct intel_encoder *encoder;
5617         struct drm_mode_config *mode_config = &dev->mode_config;
5618         struct intel_encoder *edp_encoder = NULL;
5619         int num_connectors = 0;
5620         bool is_lvds = false;
5621
5622         list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5623                 if (encoder->base.crtc != crtc)
5624                         continue;
5625
5626                 switch (encoder->type) {
5627                 case INTEL_OUTPUT_LVDS:
5628                         is_lvds = true;
5629                         break;
5630                 case INTEL_OUTPUT_EDP:
5631                         edp_encoder = encoder;
5632                         break;
5633                 }
5634                 num_connectors++;
5635         }
5636
5637         if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5638                 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
5639                               dev_priv->lvds_ssc_freq);
5640                 return dev_priv->lvds_ssc_freq * 1000;
5641         }
5642
5643         return 120000;
5644 }
5645
5646 static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5647                                   struct drm_display_mode *mode,
5648                                   struct drm_display_mode *adjusted_mode,
5649                                   int x, int y,
5650                                   struct drm_framebuffer *old_fb)
5651 {
5652         struct drm_device *dev = crtc->dev;
5653         struct drm_i915_private *dev_priv = dev->dev_private;
5654         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5655         int pipe = intel_crtc->pipe;
5656         int plane = intel_crtc->plane;
5657         int refclk, num_connectors = 0;
5658         intel_clock_t clock, reduced_clock;
5659         u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
5660         bool ok, has_reduced_clock = false, is_sdvo = false;
5661         bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
5662         struct intel_encoder *has_edp_encoder = NULL;
5663         struct drm_mode_config *mode_config = &dev->mode_config;
5664         struct intel_encoder *encoder;
5665         const intel_limit_t *limit;
5666         int ret;
5667         struct fdi_m_n m_n = {0};
5668         u32 temp;
5669         u32 lvds_sync = 0;
5670         int target_clock, pixel_multiplier, lane, link_bw, factor;
5671         unsigned int pipe_bpp;
5672         bool dither;
5673
5674         list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5675                 if (encoder->base.crtc != crtc)
5676                         continue;
5677
5678                 switch (encoder->type) {
5679                 case INTEL_OUTPUT_LVDS:
5680                         is_lvds = true;
5681                         break;
5682                 case INTEL_OUTPUT_SDVO:
5683                 case INTEL_OUTPUT_HDMI:
5684                         is_sdvo = true;
5685                         if (encoder->needs_tv_clock)
5686                                 is_tv = true;
5687                         break;
5688                 case INTEL_OUTPUT_TVOUT:
5689                         is_tv = true;
5690                         break;
5691                 case INTEL_OUTPUT_ANALOG:
5692                         is_crt = true;
5693                         break;
5694                 case INTEL_OUTPUT_DISPLAYPORT:
5695                         is_dp = true;
5696                         break;
5697                 case INTEL_OUTPUT_EDP:
5698                         has_edp_encoder = encoder;
5699                         break;
5700                 }
5701
5702                 num_connectors++;
5703         }
5704
5705         refclk = ironlake_get_refclk(crtc);
5706
5707         /*
5708          * Returns a set of divisors for the desired target clock with the given
5709          * refclk, or FALSE.  The returned values represent the clock equation:
5710          * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5711          */
5712         limit = intel_limit(crtc, refclk);
5713         ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
5714                              &clock);
5715         if (!ok) {
5716                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
5717                 return -EINVAL;
5718         }
5719
5720         /* Ensure that the cursor is valid for the new mode before changing... */
5721         intel_crtc_update_cursor(crtc, true);
5722
5723         if (is_lvds && dev_priv->lvds_downclock_avail) {
5724                 /*
5725                  * Ensure we match the reduced clock's P to the target clock.
5726                  * If the clocks don't match, we can't switch the display clock
5727                  * by using the FP0/FP1. In such case we will disable the LVDS
5728                  * downclock feature.
5729                 */
5730                 has_reduced_clock = limit->find_pll(limit, crtc,
5731                                                     dev_priv->lvds_downclock,
5732                                                     refclk,
5733                                                     &clock,
5734                                                     &reduced_clock);
5735         }
5736         /* SDVO TV has fixed PLL values depend on its clock range,
5737            this mirrors vbios setting. */
5738         if (is_sdvo && is_tv) {
5739                 if (adjusted_mode->clock >= 100000
5740                     && adjusted_mode->clock < 140500) {
5741                         clock.p1 = 2;
5742                         clock.p2 = 10;
5743                         clock.n = 3;
5744                         clock.m1 = 16;
5745                         clock.m2 = 8;
5746                 } else if (adjusted_mode->clock >= 140500
5747                            && adjusted_mode->clock <= 200000) {
5748                         clock.p1 = 1;
5749                         clock.p2 = 10;
5750                         clock.n = 6;
5751                         clock.m1 = 12;
5752                         clock.m2 = 8;
5753                 }
5754         }
5755
5756         /* FDI link */
5757         pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5758         lane = 0;
5759         /* CPU eDP doesn't require FDI link, so just set DP M/N
5760            according to current link config */
5761         if (has_edp_encoder &&
5762             !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5763                 target_clock = mode->clock;
5764                 intel_edp_link_config(has_edp_encoder,
5765                                       &lane, &link_bw);
5766         } else {
5767                 /* [e]DP over FDI requires target mode clock
5768                    instead of link clock */
5769                 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5770                         target_clock = mode->clock;
5771                 else
5772                         target_clock = adjusted_mode->clock;
5773
5774                 /* FDI is a binary signal running at ~2.7GHz, encoding
5775                  * each output octet as 10 bits. The actual frequency
5776                  * is stored as a divider into a 100MHz clock, and the
5777                  * mode pixel clock is stored in units of 1KHz.
5778                  * Hence the bw of each lane in terms of the mode signal
5779                  * is:
5780                  */
5781                 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5782         }
5783
5784         /* determine panel color depth */
5785         temp = I915_READ(PIPECONF(pipe));
5786         temp &= ~PIPE_BPC_MASK;
5787         dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, adjusted_mode);
5788         switch (pipe_bpp) {
5789         case 18:
5790                 temp |= PIPE_6BPC;
5791                 break;
5792         case 24:
5793                 temp |= PIPE_8BPC;
5794                 break;
5795         case 30:
5796                 temp |= PIPE_10BPC;
5797                 break;
5798         case 36:
5799                 temp |= PIPE_12BPC;
5800                 break;
5801         default:
5802                 WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
5803                         pipe_bpp);
5804                 temp |= PIPE_8BPC;
5805                 pipe_bpp = 24;
5806                 break;
5807         }
5808
5809         intel_crtc->bpp = pipe_bpp;
5810         I915_WRITE(PIPECONF(pipe), temp);
5811
5812         if (!lane) {
5813                 /*
5814                  * Account for spread spectrum to avoid
5815                  * oversubscribing the link. Max center spread
5816                  * is 2.5%; use 5% for safety's sake.
5817                  */
5818                 u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
5819                 lane = bps / (link_bw * 8) + 1;
5820         }
5821
5822         intel_crtc->fdi_lanes = lane;
5823
5824         if (pixel_multiplier > 1)
5825                 link_bw *= pixel_multiplier;
5826         ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
5827                              &m_n);
5828
5829         fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5830         if (has_reduced_clock)
5831                 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5832                         reduced_clock.m2;
5833
5834         /* Enable autotuning of the PLL clock (if permissible) */
5835         factor = 21;
5836         if (is_lvds) {
5837                 if ((intel_panel_use_ssc(dev_priv) &&
5838                      dev_priv->lvds_ssc_freq == 100) ||
5839                     (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
5840                         factor = 25;
5841         } else if (is_sdvo && is_tv)
5842                 factor = 20;
5843
5844         if (clock.m < factor * clock.n)
5845                 fp |= FP_CB_TUNE;
5846
5847         dpll = 0;
5848
5849         if (is_lvds)
5850                 dpll |= DPLLB_MODE_LVDS;
5851         else
5852                 dpll |= DPLLB_MODE_DAC_SERIAL;
5853         if (is_sdvo) {
5854                 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5855                 if (pixel_multiplier > 1) {
5856                         dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
5857                 }
5858                 dpll |= DPLL_DVO_HIGH_SPEED;
5859         }
5860         if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5861                 dpll |= DPLL_DVO_HIGH_SPEED;
5862
5863         /* compute bitmask from p1 value */
5864         dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5865         /* also FPA1 */
5866         dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5867
5868         switch (clock.p2) {
5869         case 5:
5870                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5871                 break;
5872         case 7:
5873                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5874                 break;
5875         case 10:
5876                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5877                 break;
5878         case 14:
5879                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5880                 break;
5881         }
5882
5883         if (is_sdvo && is_tv)
5884                 dpll |= PLL_REF_INPUT_TVCLKINBC;
5885         else if (is_tv)
5886                 /* XXX: just matching BIOS for now */
5887                 /*      dpll |= PLL_REF_INPUT_TVCLKINBC; */
5888                 dpll |= 3;
5889         else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5890                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5891         else
5892                 dpll |= PLL_REF_INPUT_DREFCLK;
5893
5894         /* setup pipeconf */
5895         pipeconf = I915_READ(PIPECONF(pipe));
5896
5897         /* Set up the display plane register */
5898         dspcntr = DISPPLANE_GAMMA_ENABLE;
5899
5900         DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
5901         drm_mode_debug_printmodeline(mode);
5902
5903         /* PCH eDP needs FDI, but CPU eDP does not */
5904         if (!intel_crtc->no_pll) {
5905                 if (!has_edp_encoder ||
5906                     intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5907                         I915_WRITE(PCH_FP0(pipe), fp);
5908                         I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5909
5910                         POSTING_READ(PCH_DPLL(pipe));
5911                         udelay(150);
5912                 }
5913         } else {
5914                 if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) &&
5915                     fp == I915_READ(PCH_FP0(0))) {
5916                         intel_crtc->use_pll_a = true;
5917                         DRM_DEBUG_KMS("using pipe a dpll\n");
5918                 } else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) &&
5919                            fp == I915_READ(PCH_FP0(1))) {
5920                         intel_crtc->use_pll_a = false;
5921                         DRM_DEBUG_KMS("using pipe b dpll\n");
5922                 } else {
5923                         DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n");
5924                         return -EINVAL;
5925                 }
5926         }
5927
5928         /* The LVDS pin pair needs to be on before the DPLLs are enabled.
5929          * This is an exception to the general rule that mode_set doesn't turn
5930          * things on.
5931          */
5932         if (is_lvds) {
5933                 temp = I915_READ(PCH_LVDS);
5934                 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5935                 if (HAS_PCH_CPT(dev)) {
5936                         temp &= ~PORT_TRANS_SEL_MASK;
5937                         temp |= PORT_TRANS_SEL_CPT(pipe);
5938                 } else {
5939                         if (pipe == 1)
5940                                 temp |= LVDS_PIPEB_SELECT;
5941                         else
5942                                 temp &= ~LVDS_PIPEB_SELECT;
5943                 }
5944
5945                 /* set the corresponsding LVDS_BORDER bit */
5946                 temp |= dev_priv->lvds_border_bits;
5947                 /* Set the B0-B3 data pairs corresponding to whether we're going to
5948                  * set the DPLLs for dual-channel mode or not.
5949                  */
5950                 if (clock.p2 == 7)
5951                         temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5952                 else
5953                         temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
5954
5955                 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
5956                  * appropriately here, but we need to look more thoroughly into how
5957                  * panels behave in the two modes.
5958                  */
5959                 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5960                         lvds_sync |= LVDS_HSYNC_POLARITY;
5961                 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5962                         lvds_sync |= LVDS_VSYNC_POLARITY;
5963                 if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
5964                     != lvds_sync) {
5965                         char flags[2] = "-+";
5966                         DRM_INFO("Changing LVDS panel from "
5967                                  "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
5968                                  flags[!(temp & LVDS_HSYNC_POLARITY)],
5969                                  flags[!(temp & LVDS_VSYNC_POLARITY)],
5970                                  flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5971                                  flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5972                         temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5973                         temp |= lvds_sync;
5974                 }
5975                 I915_WRITE(PCH_LVDS, temp);
5976         }
5977
5978         pipeconf &= ~PIPECONF_DITHER_EN;
5979         pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
5980         if ((is_lvds && de