drm/i915: Flush the pending flips on the CRTC before modification
[linux-2.6.git] / drivers / gpu / drm / i915 / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/cpufreq.h>
28 #include <linux/module.h>
29 #include <linux/input.h>
30 #include <linux/i2c.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/vgaarb.h>
34 #include <drm/drm_edid.h>
35 #include "drmP.h"
36 #include "intel_drv.h"
37 #include "i915_drm.h"
38 #include "i915_drv.h"
39 #include "i915_trace.h"
40 #include "drm_dp_helper.h"
41 #include "drm_crtc_helper.h"
42 #include <linux/dma_remapping.h>
43
44 #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
45
46 bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
47 static void intel_update_watermarks(struct drm_device *dev);
48 static void intel_increase_pllclock(struct drm_crtc *crtc);
49 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
50
51 typedef struct {
52         /* given values */
53         int n;
54         int m1, m2;
55         int p1, p2;
56         /* derived values */
57         int     dot;
58         int     vco;
59         int     m;
60         int     p;
61 } intel_clock_t;
62
63 typedef struct {
64         int     min, max;
65 } intel_range_t;
66
67 typedef struct {
68         int     dot_limit;
69         int     p2_slow, p2_fast;
70 } intel_p2_t;
71
72 #define INTEL_P2_NUM                  2
73 typedef struct intel_limit intel_limit_t;
74 struct intel_limit {
75         intel_range_t   dot, vco, n, m, m1, m2, p, p1;
76         intel_p2_t          p2;
77         bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
78                         int, int, intel_clock_t *, intel_clock_t *);
79 };
80
81 /* FDI */
82 #define IRONLAKE_FDI_FREQ               2700000 /* in kHz for mode->clock */
83
84 static bool
85 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
86                     int target, int refclk, intel_clock_t *match_clock,
87                     intel_clock_t *best_clock);
88 static bool
89 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
90                         int target, int refclk, intel_clock_t *match_clock,
91                         intel_clock_t *best_clock);
92
93 static bool
94 intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
95                       int target, int refclk, intel_clock_t *match_clock,
96                       intel_clock_t *best_clock);
97 static bool
98 intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
99                            int target, int refclk, intel_clock_t *match_clock,
100                            intel_clock_t *best_clock);
101
102 static inline u32 /* units of 100MHz */
103 intel_fdi_link_freq(struct drm_device *dev)
104 {
105         if (IS_GEN5(dev)) {
106                 struct drm_i915_private *dev_priv = dev->dev_private;
107                 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
108         } else
109                 return 27;
110 }
111
112 static const intel_limit_t intel_limits_i8xx_dvo = {
113         .dot = { .min = 25000, .max = 350000 },
114         .vco = { .min = 930000, .max = 1400000 },
115         .n = { .min = 3, .max = 16 },
116         .m = { .min = 96, .max = 140 },
117         .m1 = { .min = 18, .max = 26 },
118         .m2 = { .min = 6, .max = 16 },
119         .p = { .min = 4, .max = 128 },
120         .p1 = { .min = 2, .max = 33 },
121         .p2 = { .dot_limit = 165000,
122                 .p2_slow = 4, .p2_fast = 2 },
123         .find_pll = intel_find_best_PLL,
124 };
125
126 static const intel_limit_t intel_limits_i8xx_lvds = {
127         .dot = { .min = 25000, .max = 350000 },
128         .vco = { .min = 930000, .max = 1400000 },
129         .n = { .min = 3, .max = 16 },
130         .m = { .min = 96, .max = 140 },
131         .m1 = { .min = 18, .max = 26 },
132         .m2 = { .min = 6, .max = 16 },
133         .p = { .min = 4, .max = 128 },
134         .p1 = { .min = 1, .max = 6 },
135         .p2 = { .dot_limit = 165000,
136                 .p2_slow = 14, .p2_fast = 7 },
137         .find_pll = intel_find_best_PLL,
138 };
139
140 static const intel_limit_t intel_limits_i9xx_sdvo = {
141         .dot = { .min = 20000, .max = 400000 },
142         .vco = { .min = 1400000, .max = 2800000 },
143         .n = { .min = 1, .max = 6 },
144         .m = { .min = 70, .max = 120 },
145         .m1 = { .min = 10, .max = 22 },
146         .m2 = { .min = 5, .max = 9 },
147         .p = { .min = 5, .max = 80 },
148         .p1 = { .min = 1, .max = 8 },
149         .p2 = { .dot_limit = 200000,
150                 .p2_slow = 10, .p2_fast = 5 },
151         .find_pll = intel_find_best_PLL,
152 };
153
154 static const intel_limit_t intel_limits_i9xx_lvds = {
155         .dot = { .min = 20000, .max = 400000 },
156         .vco = { .min = 1400000, .max = 2800000 },
157         .n = { .min = 1, .max = 6 },
158         .m = { .min = 70, .max = 120 },
159         .m1 = { .min = 10, .max = 22 },
160         .m2 = { .min = 5, .max = 9 },
161         .p = { .min = 7, .max = 98 },
162         .p1 = { .min = 1, .max = 8 },
163         .p2 = { .dot_limit = 112000,
164                 .p2_slow = 14, .p2_fast = 7 },
165         .find_pll = intel_find_best_PLL,
166 };
167
168
169 static const intel_limit_t intel_limits_g4x_sdvo = {
170         .dot = { .min = 25000, .max = 270000 },
171         .vco = { .min = 1750000, .max = 3500000},
172         .n = { .min = 1, .max = 4 },
173         .m = { .min = 104, .max = 138 },
174         .m1 = { .min = 17, .max = 23 },
175         .m2 = { .min = 5, .max = 11 },
176         .p = { .min = 10, .max = 30 },
177         .p1 = { .min = 1, .max = 3},
178         .p2 = { .dot_limit = 270000,
179                 .p2_slow = 10,
180                 .p2_fast = 10
181         },
182         .find_pll = intel_g4x_find_best_PLL,
183 };
184
185 static const intel_limit_t intel_limits_g4x_hdmi = {
186         .dot = { .min = 22000, .max = 400000 },
187         .vco = { .min = 1750000, .max = 3500000},
188         .n = { .min = 1, .max = 4 },
189         .m = { .min = 104, .max = 138 },
190         .m1 = { .min = 16, .max = 23 },
191         .m2 = { .min = 5, .max = 11 },
192         .p = { .min = 5, .max = 80 },
193         .p1 = { .min = 1, .max = 8},
194         .p2 = { .dot_limit = 165000,
195                 .p2_slow = 10, .p2_fast = 5 },
196         .find_pll = intel_g4x_find_best_PLL,
197 };
198
199 static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
200         .dot = { .min = 20000, .max = 115000 },
201         .vco = { .min = 1750000, .max = 3500000 },
202         .n = { .min = 1, .max = 3 },
203         .m = { .min = 104, .max = 138 },
204         .m1 = { .min = 17, .max = 23 },
205         .m2 = { .min = 5, .max = 11 },
206         .p = { .min = 28, .max = 112 },
207         .p1 = { .min = 2, .max = 8 },
208         .p2 = { .dot_limit = 0,
209                 .p2_slow = 14, .p2_fast = 14
210         },
211         .find_pll = intel_g4x_find_best_PLL,
212 };
213
214 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
215         .dot = { .min = 80000, .max = 224000 },
216         .vco = { .min = 1750000, .max = 3500000 },
217         .n = { .min = 1, .max = 3 },
218         .m = { .min = 104, .max = 138 },
219         .m1 = { .min = 17, .max = 23 },
220         .m2 = { .min = 5, .max = 11 },
221         .p = { .min = 14, .max = 42 },
222         .p1 = { .min = 2, .max = 6 },
223         .p2 = { .dot_limit = 0,
224                 .p2_slow = 7, .p2_fast = 7
225         },
226         .find_pll = intel_g4x_find_best_PLL,
227 };
228
229 static const intel_limit_t intel_limits_g4x_display_port = {
230         .dot = { .min = 161670, .max = 227000 },
231         .vco = { .min = 1750000, .max = 3500000},
232         .n = { .min = 1, .max = 2 },
233         .m = { .min = 97, .max = 108 },
234         .m1 = { .min = 0x10, .max = 0x12 },
235         .m2 = { .min = 0x05, .max = 0x06 },
236         .p = { .min = 10, .max = 20 },
237         .p1 = { .min = 1, .max = 2},
238         .p2 = { .dot_limit = 0,
239                 .p2_slow = 10, .p2_fast = 10 },
240         .find_pll = intel_find_pll_g4x_dp,
241 };
242
243 static const intel_limit_t intel_limits_pineview_sdvo = {
244         .dot = { .min = 20000, .max = 400000},
245         .vco = { .min = 1700000, .max = 3500000 },
246         /* Pineview's Ncounter is a ring counter */
247         .n = { .min = 3, .max = 6 },
248         .m = { .min = 2, .max = 256 },
249         /* Pineview only has one combined m divider, which we treat as m2. */
250         .m1 = { .min = 0, .max = 0 },
251         .m2 = { .min = 0, .max = 254 },
252         .p = { .min = 5, .max = 80 },
253         .p1 = { .min = 1, .max = 8 },
254         .p2 = { .dot_limit = 200000,
255                 .p2_slow = 10, .p2_fast = 5 },
256         .find_pll = intel_find_best_PLL,
257 };
258
259 static const intel_limit_t intel_limits_pineview_lvds = {
260         .dot = { .min = 20000, .max = 400000 },
261         .vco = { .min = 1700000, .max = 3500000 },
262         .n = { .min = 3, .max = 6 },
263         .m = { .min = 2, .max = 256 },
264         .m1 = { .min = 0, .max = 0 },
265         .m2 = { .min = 0, .max = 254 },
266         .p = { .min = 7, .max = 112 },
267         .p1 = { .min = 1, .max = 8 },
268         .p2 = { .dot_limit = 112000,
269                 .p2_slow = 14, .p2_fast = 14 },
270         .find_pll = intel_find_best_PLL,
271 };
272
273 /* Ironlake / Sandybridge
274  *
275  * We calculate clock using (register_value + 2) for N/M1/M2, so here
276  * the range value for them is (actual_value - 2).
277  */
278 static const intel_limit_t intel_limits_ironlake_dac = {
279         .dot = { .min = 25000, .max = 350000 },
280         .vco = { .min = 1760000, .max = 3510000 },
281         .n = { .min = 1, .max = 5 },
282         .m = { .min = 79, .max = 127 },
283         .m1 = { .min = 12, .max = 22 },
284         .m2 = { .min = 5, .max = 9 },
285         .p = { .min = 5, .max = 80 },
286         .p1 = { .min = 1, .max = 8 },
287         .p2 = { .dot_limit = 225000,
288                 .p2_slow = 10, .p2_fast = 5 },
289         .find_pll = intel_g4x_find_best_PLL,
290 };
291
292 static const intel_limit_t intel_limits_ironlake_single_lvds = {
293         .dot = { .min = 25000, .max = 350000 },
294         .vco = { .min = 1760000, .max = 3510000 },
295         .n = { .min = 1, .max = 3 },
296         .m = { .min = 79, .max = 118 },
297         .m1 = { .min = 12, .max = 22 },
298         .m2 = { .min = 5, .max = 9 },
299         .p = { .min = 28, .max = 112 },
300         .p1 = { .min = 2, .max = 8 },
301         .p2 = { .dot_limit = 225000,
302                 .p2_slow = 14, .p2_fast = 14 },
303         .find_pll = intel_g4x_find_best_PLL,
304 };
305
306 static const intel_limit_t intel_limits_ironlake_dual_lvds = {
307         .dot = { .min = 25000, .max = 350000 },
308         .vco = { .min = 1760000, .max = 3510000 },
309         .n = { .min = 1, .max = 3 },
310         .m = { .min = 79, .max = 127 },
311         .m1 = { .min = 12, .max = 22 },
312         .m2 = { .min = 5, .max = 9 },
313         .p = { .min = 14, .max = 56 },
314         .p1 = { .min = 2, .max = 8 },
315         .p2 = { .dot_limit = 225000,
316                 .p2_slow = 7, .p2_fast = 7 },
317         .find_pll = intel_g4x_find_best_PLL,
318 };
319
320 /* LVDS 100mhz refclk limits. */
321 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
322         .dot = { .min = 25000, .max = 350000 },
323         .vco = { .min = 1760000, .max = 3510000 },
324         .n = { .min = 1, .max = 2 },
325         .m = { .min = 79, .max = 126 },
326         .m1 = { .min = 12, .max = 22 },
327         .m2 = { .min = 5, .max = 9 },
328         .p = { .min = 28, .max = 112 },
329         .p1 = { .min = 2, .max = 8 },
330         .p2 = { .dot_limit = 225000,
331                 .p2_slow = 14, .p2_fast = 14 },
332         .find_pll = intel_g4x_find_best_PLL,
333 };
334
335 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
336         .dot = { .min = 25000, .max = 350000 },
337         .vco = { .min = 1760000, .max = 3510000 },
338         .n = { .min = 1, .max = 3 },
339         .m = { .min = 79, .max = 126 },
340         .m1 = { .min = 12, .max = 22 },
341         .m2 = { .min = 5, .max = 9 },
342         .p = { .min = 14, .max = 42 },
343         .p1 = { .min = 2, .max = 6 },
344         .p2 = { .dot_limit = 225000,
345                 .p2_slow = 7, .p2_fast = 7 },
346         .find_pll = intel_g4x_find_best_PLL,
347 };
348
349 static const intel_limit_t intel_limits_ironlake_display_port = {
350         .dot = { .min = 25000, .max = 350000 },
351         .vco = { .min = 1760000, .max = 3510000},
352         .n = { .min = 1, .max = 2 },
353         .m = { .min = 81, .max = 90 },
354         .m1 = { .min = 12, .max = 22 },
355         .m2 = { .min = 5, .max = 9 },
356         .p = { .min = 10, .max = 20 },
357         .p1 = { .min = 1, .max = 2},
358         .p2 = { .dot_limit = 0,
359                 .p2_slow = 10, .p2_fast = 10 },
360         .find_pll = intel_find_pll_ironlake_dp,
361 };
362
363 static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
364                               unsigned int reg)
365 {
366         unsigned int val;
367
368         if (dev_priv->lvds_val)
369                 val = dev_priv->lvds_val;
370         else {
371                 /* BIOS should set the proper LVDS register value at boot, but
372                  * in reality, it doesn't set the value when the lid is closed;
373                  * we need to check "the value to be set" in VBT when LVDS
374                  * register is uninitialized.
375                  */
376                 val = I915_READ(reg);
377                 if (!(val & ~LVDS_DETECTED))
378                         val = dev_priv->bios_lvds_val;
379                 dev_priv->lvds_val = val;
380         }
381         return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
382 }
383
384 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
385                                                 int refclk)
386 {
387         struct drm_device *dev = crtc->dev;
388         struct drm_i915_private *dev_priv = dev->dev_private;
389         const intel_limit_t *limit;
390
391         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
392                 if (is_dual_link_lvds(dev_priv, PCH_LVDS)) {
393                         /* LVDS dual channel */
394                         if (refclk == 100000)
395                                 limit = &intel_limits_ironlake_dual_lvds_100m;
396                         else
397                                 limit = &intel_limits_ironlake_dual_lvds;
398                 } else {
399                         if (refclk == 100000)
400                                 limit = &intel_limits_ironlake_single_lvds_100m;
401                         else
402                                 limit = &intel_limits_ironlake_single_lvds;
403                 }
404         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
405                         HAS_eDP)
406                 limit = &intel_limits_ironlake_display_port;
407         else
408                 limit = &intel_limits_ironlake_dac;
409
410         return limit;
411 }
412
413 static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
414 {
415         struct drm_device *dev = crtc->dev;
416         struct drm_i915_private *dev_priv = dev->dev_private;
417         const intel_limit_t *limit;
418
419         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
420                 if (is_dual_link_lvds(dev_priv, LVDS))
421                         /* LVDS with dual channel */
422                         limit = &intel_limits_g4x_dual_channel_lvds;
423                 else
424                         /* LVDS with dual channel */
425                         limit = &intel_limits_g4x_single_channel_lvds;
426         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
427                    intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
428                 limit = &intel_limits_g4x_hdmi;
429         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
430                 limit = &intel_limits_g4x_sdvo;
431         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
432                 limit = &intel_limits_g4x_display_port;
433         } else /* The option is for other outputs */
434                 limit = &intel_limits_i9xx_sdvo;
435
436         return limit;
437 }
438
439 static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
440 {
441         struct drm_device *dev = crtc->dev;
442         const intel_limit_t *limit;
443
444         if (HAS_PCH_SPLIT(dev))
445                 limit = intel_ironlake_limit(crtc, refclk);
446         else if (IS_G4X(dev)) {
447                 limit = intel_g4x_limit(crtc);
448         } else if (IS_PINEVIEW(dev)) {
449                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
450                         limit = &intel_limits_pineview_lvds;
451                 else
452                         limit = &intel_limits_pineview_sdvo;
453         } else if (!IS_GEN2(dev)) {
454                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
455                         limit = &intel_limits_i9xx_lvds;
456                 else
457                         limit = &intel_limits_i9xx_sdvo;
458         } else {
459                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
460                         limit = &intel_limits_i8xx_lvds;
461                 else
462                         limit = &intel_limits_i8xx_dvo;
463         }
464         return limit;
465 }
466
467 /* m1 is reserved as 0 in Pineview, n is a ring counter */
468 static void pineview_clock(int refclk, intel_clock_t *clock)
469 {
470         clock->m = clock->m2 + 2;
471         clock->p = clock->p1 * clock->p2;
472         clock->vco = refclk * clock->m / clock->n;
473         clock->dot = clock->vco / clock->p;
474 }
475
476 static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
477 {
478         if (IS_PINEVIEW(dev)) {
479                 pineview_clock(refclk, clock);
480                 return;
481         }
482         clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
483         clock->p = clock->p1 * clock->p2;
484         clock->vco = refclk * clock->m / (clock->n + 2);
485         clock->dot = clock->vco / clock->p;
486 }
487
488 /**
489  * Returns whether any output on the specified pipe is of the specified type
490  */
491 bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
492 {
493         struct drm_device *dev = crtc->dev;
494         struct drm_mode_config *mode_config = &dev->mode_config;
495         struct intel_encoder *encoder;
496
497         list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
498                 if (encoder->base.crtc == crtc && encoder->type == type)
499                         return true;
500
501         return false;
502 }
503
504 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
505 /**
506  * Returns whether the given set of divisors are valid for a given refclk with
507  * the given connectors.
508  */
509
510 static bool intel_PLL_is_valid(struct drm_device *dev,
511                                const intel_limit_t *limit,
512                                const intel_clock_t *clock)
513 {
514         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
515                 INTELPllInvalid("p1 out of range\n");
516         if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
517                 INTELPllInvalid("p out of range\n");
518         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
519                 INTELPllInvalid("m2 out of range\n");
520         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
521                 INTELPllInvalid("m1 out of range\n");
522         if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
523                 INTELPllInvalid("m1 <= m2\n");
524         if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
525                 INTELPllInvalid("m out of range\n");
526         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
527                 INTELPllInvalid("n out of range\n");
528         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
529                 INTELPllInvalid("vco out of range\n");
530         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
531          * connector, etc., rather than just a single range.
532          */
533         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
534                 INTELPllInvalid("dot out of range\n");
535
536         return true;
537 }
538
539 static bool
540 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
541                     int target, int refclk, intel_clock_t *match_clock,
542                     intel_clock_t *best_clock)
543
544 {
545         struct drm_device *dev = crtc->dev;
546         struct drm_i915_private *dev_priv = dev->dev_private;
547         intel_clock_t clock;
548         int err = target;
549
550         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
551             (I915_READ(LVDS)) != 0) {
552                 /*
553                  * For LVDS, if the panel is on, just rely on its current
554                  * settings for dual-channel.  We haven't figured out how to
555                  * reliably set up different single/dual channel state, if we
556                  * even can.
557                  */
558                 if (is_dual_link_lvds(dev_priv, LVDS))
559                         clock.p2 = limit->p2.p2_fast;
560                 else
561                         clock.p2 = limit->p2.p2_slow;
562         } else {
563                 if (target < limit->p2.dot_limit)
564                         clock.p2 = limit->p2.p2_slow;
565                 else
566                         clock.p2 = limit->p2.p2_fast;
567         }
568
569         memset(best_clock, 0, sizeof(*best_clock));
570
571         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
572              clock.m1++) {
573                 for (clock.m2 = limit->m2.min;
574                      clock.m2 <= limit->m2.max; clock.m2++) {
575                         /* m1 is always 0 in Pineview */
576                         if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
577                                 break;
578                         for (clock.n = limit->n.min;
579                              clock.n <= limit->n.max; clock.n++) {
580                                 for (clock.p1 = limit->p1.min;
581                                         clock.p1 <= limit->p1.max; clock.p1++) {
582                                         int this_err;
583
584                                         intel_clock(dev, refclk, &clock);
585                                         if (!intel_PLL_is_valid(dev, limit,
586                                                                 &clock))
587                                                 continue;
588                                         if (match_clock &&
589                                             clock.p != match_clock->p)
590                                                 continue;
591
592                                         this_err = abs(clock.dot - target);
593                                         if (this_err < err) {
594                                                 *best_clock = clock;
595                                                 err = this_err;
596                                         }
597                                 }
598                         }
599                 }
600         }
601
602         return (err != target);
603 }
604
605 static bool
606 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
607                         int target, int refclk, intel_clock_t *match_clock,
608                         intel_clock_t *best_clock)
609 {
610         struct drm_device *dev = crtc->dev;
611         struct drm_i915_private *dev_priv = dev->dev_private;
612         intel_clock_t clock;
613         int max_n;
614         bool found;
615         /* approximately equals target * 0.00585 */
616         int err_most = (target >> 8) + (target >> 9);
617         found = false;
618
619         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
620                 int lvds_reg;
621
622                 if (HAS_PCH_SPLIT(dev))
623                         lvds_reg = PCH_LVDS;
624                 else
625                         lvds_reg = LVDS;
626                 if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
627                     LVDS_CLKB_POWER_UP)
628                         clock.p2 = limit->p2.p2_fast;
629                 else
630                         clock.p2 = limit->p2.p2_slow;
631         } else {
632                 if (target < limit->p2.dot_limit)
633                         clock.p2 = limit->p2.p2_slow;
634                 else
635                         clock.p2 = limit->p2.p2_fast;
636         }
637
638         memset(best_clock, 0, sizeof(*best_clock));
639         max_n = limit->n.max;
640         /* based on hardware requirement, prefer smaller n to precision */
641         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
642                 /* based on hardware requirement, prefere larger m1,m2 */
643                 for (clock.m1 = limit->m1.max;
644                      clock.m1 >= limit->m1.min; clock.m1--) {
645                         for (clock.m2 = limit->m2.max;
646                              clock.m2 >= limit->m2.min; clock.m2--) {
647                                 for (clock.p1 = limit->p1.max;
648                                      clock.p1 >= limit->p1.min; clock.p1--) {
649                                         int this_err;
650
651                                         intel_clock(dev, refclk, &clock);
652                                         if (!intel_PLL_is_valid(dev, limit,
653                                                                 &clock))
654                                                 continue;
655                                         if (match_clock &&
656                                             clock.p != match_clock->p)
657                                                 continue;
658
659                                         this_err = abs(clock.dot - target);
660                                         if (this_err < err_most) {
661                                                 *best_clock = clock;
662                                                 err_most = this_err;
663                                                 max_n = clock.n;
664                                                 found = true;
665                                         }
666                                 }
667                         }
668                 }
669         }
670         return found;
671 }
672
673 static bool
674 intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
675                            int target, int refclk, intel_clock_t *match_clock,
676                            intel_clock_t *best_clock)
677 {
678         struct drm_device *dev = crtc->dev;
679         intel_clock_t clock;
680
681         if (target < 200000) {
682                 clock.n = 1;
683                 clock.p1 = 2;
684                 clock.p2 = 10;
685                 clock.m1 = 12;
686                 clock.m2 = 9;
687         } else {
688                 clock.n = 2;
689                 clock.p1 = 1;
690                 clock.p2 = 10;
691                 clock.m1 = 14;
692                 clock.m2 = 8;
693         }
694         intel_clock(dev, refclk, &clock);
695         memcpy(best_clock, &clock, sizeof(intel_clock_t));
696         return true;
697 }
698
699 /* DisplayPort has only two frequencies, 162MHz and 270MHz */
700 static bool
701 intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
702                       int target, int refclk, intel_clock_t *match_clock,
703                       intel_clock_t *best_clock)
704 {
705         intel_clock_t clock;
706         if (target < 200000) {
707                 clock.p1 = 2;
708                 clock.p2 = 10;
709                 clock.n = 2;
710                 clock.m1 = 23;
711                 clock.m2 = 8;
712         } else {
713                 clock.p1 = 1;
714                 clock.p2 = 10;
715                 clock.n = 1;
716                 clock.m1 = 14;
717                 clock.m2 = 2;
718         }
719         clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
720         clock.p = (clock.p1 * clock.p2);
721         clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
722         clock.vco = 0;
723         memcpy(best_clock, &clock, sizeof(intel_clock_t));
724         return true;
725 }
726
727 /**
728  * intel_wait_for_vblank - wait for vblank on a given pipe
729  * @dev: drm device
730  * @pipe: pipe to wait for
731  *
732  * Wait for vblank to occur on a given pipe.  Needed for various bits of
733  * mode setting code.
734  */
735 void intel_wait_for_vblank(struct drm_device *dev, int pipe)
736 {
737         struct drm_i915_private *dev_priv = dev->dev_private;
738         int pipestat_reg = PIPESTAT(pipe);
739
740         /* Clear existing vblank status. Note this will clear any other
741          * sticky status fields as well.
742          *
743          * This races with i915_driver_irq_handler() with the result
744          * that either function could miss a vblank event.  Here it is not
745          * fatal, as we will either wait upon the next vblank interrupt or
746          * timeout.  Generally speaking intel_wait_for_vblank() is only
747          * called during modeset at which time the GPU should be idle and
748          * should *not* be performing page flips and thus not waiting on
749          * vblanks...
750          * Currently, the result of us stealing a vblank from the irq
751          * handler is that a single frame will be skipped during swapbuffers.
752          */
753         I915_WRITE(pipestat_reg,
754                    I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
755
756         /* Wait for vblank interrupt bit to set */
757         if (wait_for(I915_READ(pipestat_reg) &
758                      PIPE_VBLANK_INTERRUPT_STATUS,
759                      50))
760                 DRM_DEBUG_KMS("vblank wait timed out\n");
761 }
762
763 /*
764  * intel_wait_for_pipe_off - wait for pipe to turn off
765  * @dev: drm device
766  * @pipe: pipe to wait for
767  *
768  * After disabling a pipe, we can't wait for vblank in the usual way,
769  * spinning on the vblank interrupt status bit, since we won't actually
770  * see an interrupt when the pipe is disabled.
771  *
772  * On Gen4 and above:
773  *   wait for the pipe register state bit to turn off
774  *
775  * Otherwise:
776  *   wait for the display line value to settle (it usually
777  *   ends up stopping at the start of the next frame).
778  *
779  */
780 void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
781 {
782         struct drm_i915_private *dev_priv = dev->dev_private;
783
784         if (INTEL_INFO(dev)->gen >= 4) {
785                 int reg = PIPECONF(pipe);
786
787                 /* Wait for the Pipe State to go off */
788                 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
789                              100))
790                         DRM_DEBUG_KMS("pipe_off wait timed out\n");
791         } else {
792                 u32 last_line;
793                 int reg = PIPEDSL(pipe);
794                 unsigned long timeout = jiffies + msecs_to_jiffies(100);
795
796                 /* Wait for the display line to settle */
797                 do {
798                         last_line = I915_READ(reg) & DSL_LINEMASK;
799                         mdelay(5);
800                 } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
801                          time_after(timeout, jiffies));
802                 if (time_after(jiffies, timeout))
803                         DRM_DEBUG_KMS("pipe_off wait timed out\n");
804         }
805 }
806
807 static const char *state_string(bool enabled)
808 {
809         return enabled ? "on" : "off";
810 }
811
812 /* Only for pre-ILK configs */
813 static void assert_pll(struct drm_i915_private *dev_priv,
814                        enum pipe pipe, bool state)
815 {
816         int reg;
817         u32 val;
818         bool cur_state;
819
820         reg = DPLL(pipe);
821         val = I915_READ(reg);
822         cur_state = !!(val & DPLL_VCO_ENABLE);
823         WARN(cur_state != state,
824              "PLL state assertion failure (expected %s, current %s)\n",
825              state_string(state), state_string(cur_state));
826 }
827 #define assert_pll_enabled(d, p) assert_pll(d, p, true)
828 #define assert_pll_disabled(d, p) assert_pll(d, p, false)
829
830 /* For ILK+ */
831 static void assert_pch_pll(struct drm_i915_private *dev_priv,
832                            enum pipe pipe, bool state)
833 {
834         int reg;
835         u32 val;
836         bool cur_state;
837
838         if (HAS_PCH_CPT(dev_priv->dev)) {
839                 u32 pch_dpll;
840
841                 pch_dpll = I915_READ(PCH_DPLL_SEL);
842
843                 /* Make sure the selected PLL is enabled to the transcoder */
844                 WARN(!((pch_dpll >> (4 * pipe)) & 8),
845                      "transcoder %d PLL not enabled\n", pipe);
846
847                 /* Convert the transcoder pipe number to a pll pipe number */
848                 pipe = (pch_dpll >> (4 * pipe)) & 1;
849         }
850
851         reg = PCH_DPLL(pipe);
852         val = I915_READ(reg);
853         cur_state = !!(val & DPLL_VCO_ENABLE);
854         WARN(cur_state != state,
855              "PCH PLL state assertion failure (expected %s, current %s)\n",
856              state_string(state), state_string(cur_state));
857 }
858 #define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
859 #define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
860
861 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
862                           enum pipe pipe, bool state)
863 {
864         int reg;
865         u32 val;
866         bool cur_state;
867
868         reg = FDI_TX_CTL(pipe);
869         val = I915_READ(reg);
870         cur_state = !!(val & FDI_TX_ENABLE);
871         WARN(cur_state != state,
872              "FDI TX state assertion failure (expected %s, current %s)\n",
873              state_string(state), state_string(cur_state));
874 }
875 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
876 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
877
878 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
879                           enum pipe pipe, bool state)
880 {
881         int reg;
882         u32 val;
883         bool cur_state;
884
885         reg = FDI_RX_CTL(pipe);
886         val = I915_READ(reg);
887         cur_state = !!(val & FDI_RX_ENABLE);
888         WARN(cur_state != state,
889              "FDI RX state assertion failure (expected %s, current %s)\n",
890              state_string(state), state_string(cur_state));
891 }
892 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
893 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
894
895 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
896                                       enum pipe pipe)
897 {
898         int reg;
899         u32 val;
900
901         /* ILK FDI PLL is always enabled */
902         if (dev_priv->info->gen == 5)
903                 return;
904
905         reg = FDI_TX_CTL(pipe);
906         val = I915_READ(reg);
907         WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
908 }
909
910 static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
911                                       enum pipe pipe)
912 {
913         int reg;
914         u32 val;
915
916         reg = FDI_RX_CTL(pipe);
917         val = I915_READ(reg);
918         WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
919 }
920
921 static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
922                                   enum pipe pipe)
923 {
924         int pp_reg, lvds_reg;
925         u32 val;
926         enum pipe panel_pipe = PIPE_A;
927         bool locked = true;
928
929         if (HAS_PCH_SPLIT(dev_priv->dev)) {
930                 pp_reg = PCH_PP_CONTROL;
931                 lvds_reg = PCH_LVDS;
932         } else {
933                 pp_reg = PP_CONTROL;
934                 lvds_reg = LVDS;
935         }
936
937         val = I915_READ(pp_reg);
938         if (!(val & PANEL_POWER_ON) ||
939             ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
940                 locked = false;
941
942         if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
943                 panel_pipe = PIPE_B;
944
945         WARN(panel_pipe == pipe && locked,
946              "panel assertion failure, pipe %c regs locked\n",
947              pipe_name(pipe));
948 }
949
950 void assert_pipe(struct drm_i915_private *dev_priv,
951                  enum pipe pipe, bool state)
952 {
953         int reg;
954         u32 val;
955         bool cur_state;
956
957         /* if we need the pipe A quirk it must be always on */
958         if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
959                 state = true;
960
961         reg = PIPECONF(pipe);
962         val = I915_READ(reg);
963         cur_state = !!(val & PIPECONF_ENABLE);
964         WARN(cur_state != state,
965              "pipe %c assertion failure (expected %s, current %s)\n",
966              pipe_name(pipe), state_string(state), state_string(cur_state));
967 }
968
969 static void assert_plane(struct drm_i915_private *dev_priv,
970                          enum plane plane, bool state)
971 {
972         int reg;
973         u32 val;
974         bool cur_state;
975
976         reg = DSPCNTR(plane);
977         val = I915_READ(reg);
978         cur_state = !!(val & DISPLAY_PLANE_ENABLE);
979         WARN(cur_state != state,
980              "plane %c assertion failure (expected %s, current %s)\n",
981              plane_name(plane), state_string(state), state_string(cur_state));
982 }
983
984 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
985 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
986
987 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
988                                    enum pipe pipe)
989 {
990         int reg, i;
991         u32 val;
992         int cur_pipe;
993
994         /* Planes are fixed to pipes on ILK+ */
995         if (HAS_PCH_SPLIT(dev_priv->dev)) {
996                 reg = DSPCNTR(pipe);
997                 val = I915_READ(reg);
998                 WARN((val & DISPLAY_PLANE_ENABLE),
999                      "plane %c assertion failure, should be disabled but not\n",
1000                      plane_name(pipe));
1001                 return;
1002         }
1003
1004         /* Need to check both planes against the pipe */
1005         for (i = 0; i < 2; i++) {
1006                 reg = DSPCNTR(i);
1007                 val = I915_READ(reg);
1008                 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1009                         DISPPLANE_SEL_PIPE_SHIFT;
1010                 WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1011                      "plane %c assertion failure, should be off on pipe %c but is still active\n",
1012                      plane_name(i), pipe_name(pipe));
1013         }
1014 }
1015
1016 static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1017 {
1018         u32 val;
1019         bool enabled;
1020
1021         val = I915_READ(PCH_DREF_CONTROL);
1022         enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1023                             DREF_SUPERSPREAD_SOURCE_MASK));
1024         WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1025 }
1026
1027 static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
1028                                        enum pipe pipe)
1029 {
1030         int reg;
1031         u32 val;
1032         bool enabled;
1033
1034         reg = TRANSCONF(pipe);
1035         val = I915_READ(reg);
1036         enabled = !!(val & TRANS_ENABLE);
1037         WARN(enabled,
1038              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1039              pipe_name(pipe));
1040 }
1041
1042 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1043                             enum pipe pipe, u32 port_sel, u32 val)
1044 {
1045         if ((val & DP_PORT_EN) == 0)
1046                 return false;
1047
1048         if (HAS_PCH_CPT(dev_priv->dev)) {
1049                 u32     trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1050                 u32     trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1051                 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1052                         return false;
1053         } else {
1054                 if ((val & DP_PIPE_MASK) != (pipe << 30))
1055                         return false;
1056         }
1057         return true;
1058 }
1059
1060 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1061                               enum pipe pipe, u32 val)
1062 {
1063         if ((val & PORT_ENABLE) == 0)
1064                 return false;
1065
1066         if (HAS_PCH_CPT(dev_priv->dev)) {
1067                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1068                         return false;
1069         } else {
1070                 if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
1071                         return false;
1072         }
1073         return true;
1074 }
1075
1076 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1077                               enum pipe pipe, u32 val)
1078 {
1079         if ((val & LVDS_PORT_EN) == 0)
1080                 return false;
1081
1082         if (HAS_PCH_CPT(dev_priv->dev)) {
1083                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1084                         return false;
1085         } else {
1086                 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1087                         return false;
1088         }
1089         return true;
1090 }
1091
1092 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1093                               enum pipe pipe, u32 val)
1094 {
1095         if ((val & ADPA_DAC_ENABLE) == 0)
1096                 return false;
1097         if (HAS_PCH_CPT(dev_priv->dev)) {
1098                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1099                         return false;
1100         } else {
1101                 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1102                         return false;
1103         }
1104         return true;
1105 }
1106
1107 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1108                                    enum pipe pipe, int reg, u32 port_sel)
1109 {
1110         u32 val = I915_READ(reg);
1111         WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1112              "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1113              reg, pipe_name(pipe));
1114 }
1115
1116 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1117                                      enum pipe pipe, int reg)
1118 {
1119         u32 val = I915_READ(reg);
1120         WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1121              "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1122              reg, pipe_name(pipe));
1123 }
1124
1125 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1126                                       enum pipe pipe)
1127 {
1128         int reg;
1129         u32 val;
1130
1131         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1132         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1133         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1134
1135         reg = PCH_ADPA;
1136         val = I915_READ(reg);
1137         WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1138              "PCH VGA enabled on transcoder %c, should be disabled\n",
1139              pipe_name(pipe));
1140
1141         reg = PCH_LVDS;
1142         val = I915_READ(reg);
1143         WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1144              "PCH LVDS enabled on transcoder %c, should be disabled\n",
1145              pipe_name(pipe));
1146
1147         assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
1148         assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
1149         assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
1150 }
1151
1152 /**
1153  * intel_enable_pll - enable a PLL
1154  * @dev_priv: i915 private structure
1155  * @pipe: pipe PLL to enable
1156  *
1157  * Enable @pipe's PLL so we can start pumping pixels from a plane.  Check to
1158  * make sure the PLL reg is writable first though, since the panel write
1159  * protect mechanism may be enabled.
1160  *
1161  * Note!  This is for pre-ILK only.
1162  */
1163 static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1164 {
1165         int reg;
1166         u32 val;
1167
1168         /* No really, not for ILK+ */
1169         BUG_ON(dev_priv->info->gen >= 5);
1170
1171         /* PLL is protected by panel, make sure we can write it */
1172         if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1173                 assert_panel_unlocked(dev_priv, pipe);
1174
1175         reg = DPLL(pipe);
1176         val = I915_READ(reg);
1177         val |= DPLL_VCO_ENABLE;
1178
1179         /* We do this three times for luck */
1180         I915_WRITE(reg, val);
1181         POSTING_READ(reg);
1182         udelay(150); /* wait for warmup */
1183         I915_WRITE(reg, val);
1184         POSTING_READ(reg);
1185         udelay(150); /* wait for warmup */
1186         I915_WRITE(reg, val);
1187         POSTING_READ(reg);
1188         udelay(150); /* wait for warmup */
1189 }
1190
1191 /**
1192  * intel_disable_pll - disable a PLL
1193  * @dev_priv: i915 private structure
1194  * @pipe: pipe PLL to disable
1195  *
1196  * Disable the PLL for @pipe, making sure the pipe is off first.
1197  *
1198  * Note!  This is for pre-ILK only.
1199  */
1200 static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1201 {
1202         int reg;
1203         u32 val;
1204
1205         /* Don't disable pipe A or pipe A PLLs if needed */
1206         if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1207                 return;
1208
1209         /* Make sure the pipe isn't still relying on us */
1210         assert_pipe_disabled(dev_priv, pipe);
1211
1212         reg = DPLL(pipe);
1213         val = I915_READ(reg);
1214         val &= ~DPLL_VCO_ENABLE;
1215         I915_WRITE(reg, val);
1216         POSTING_READ(reg);
1217 }
1218
1219 /**
1220  * intel_enable_pch_pll - enable PCH PLL
1221  * @dev_priv: i915 private structure
1222  * @pipe: pipe PLL to enable
1223  *
1224  * The PCH PLL needs to be enabled before the PCH transcoder, since it
1225  * drives the transcoder clock.
1226  */
1227 static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
1228                                  enum pipe pipe)
1229 {
1230         int reg;
1231         u32 val;
1232
1233         if (pipe > 1)
1234                 return;
1235
1236         /* PCH only available on ILK+ */
1237         BUG_ON(dev_priv->info->gen < 5);
1238
1239         /* PCH refclock must be enabled first */
1240         assert_pch_refclk_enabled(dev_priv);
1241
1242         reg = PCH_DPLL(pipe);
1243         val = I915_READ(reg);
1244         val |= DPLL_VCO_ENABLE;
1245         I915_WRITE(reg, val);
1246         POSTING_READ(reg);
1247         udelay(200);
1248 }
1249
1250 static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
1251                                   enum pipe pipe)
1252 {
1253         int reg;
1254         u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL,
1255                 pll_sel = TRANSC_DPLL_ENABLE;
1256
1257         if (pipe > 1)
1258                 return;
1259
1260         /* PCH only available on ILK+ */
1261         BUG_ON(dev_priv->info->gen < 5);
1262
1263         /* Make sure transcoder isn't still depending on us */
1264         assert_transcoder_disabled(dev_priv, pipe);
1265
1266         if (pipe == 0)
1267                 pll_sel |= TRANSC_DPLLA_SEL;
1268         else if (pipe == 1)
1269                 pll_sel |= TRANSC_DPLLB_SEL;
1270
1271
1272         if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel)
1273                 return;
1274
1275         reg = PCH_DPLL(pipe);
1276         val = I915_READ(reg);
1277         val &= ~DPLL_VCO_ENABLE;
1278         I915_WRITE(reg, val);
1279         POSTING_READ(reg);
1280         udelay(200);
1281 }
1282
1283 static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1284                                     enum pipe pipe)
1285 {
1286         int reg;
1287         u32 val, pipeconf_val;
1288         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1289
1290         /* PCH only available on ILK+ */
1291         BUG_ON(dev_priv->info->gen < 5);
1292
1293         /* Make sure PCH DPLL is enabled */
1294         assert_pch_pll_enabled(dev_priv, pipe);
1295
1296         /* FDI must be feeding us bits for PCH ports */
1297         assert_fdi_tx_enabled(dev_priv, pipe);
1298         assert_fdi_rx_enabled(dev_priv, pipe);
1299
1300         reg = TRANSCONF(pipe);
1301         val = I915_READ(reg);
1302         pipeconf_val = I915_READ(PIPECONF(pipe));
1303
1304         if (HAS_PCH_IBX(dev_priv->dev)) {
1305                 /*
1306                  * make the BPC in transcoder be consistent with
1307                  * that in pipeconf reg.
1308                  */
1309                 val &= ~PIPE_BPC_MASK;
1310                 val |= pipeconf_val & PIPE_BPC_MASK;
1311         }
1312
1313         val &= ~TRANS_INTERLACE_MASK;
1314         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1315                 if (HAS_PCH_IBX(dev_priv->dev) &&
1316                     intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1317                         val |= TRANS_LEGACY_INTERLACED_ILK;
1318                 else
1319                         val |= TRANS_INTERLACED;
1320         else
1321                 val |= TRANS_PROGRESSIVE;
1322
1323         I915_WRITE(reg, val | TRANS_ENABLE);
1324         if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1325                 DRM_ERROR("failed to enable transcoder %d\n", pipe);
1326 }
1327
1328 static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
1329                                      enum pipe pipe)
1330 {
1331         int reg;
1332         u32 val;
1333
1334         /* FDI relies on the transcoder */
1335         assert_fdi_tx_disabled(dev_priv, pipe);
1336         assert_fdi_rx_disabled(dev_priv, pipe);
1337
1338         /* Ports must be off as well */
1339         assert_pch_ports_disabled(dev_priv, pipe);
1340
1341         reg = TRANSCONF(pipe);
1342         val = I915_READ(reg);
1343         val &= ~TRANS_ENABLE;
1344         I915_WRITE(reg, val);
1345         /* wait for PCH transcoder off, transcoder state */
1346         if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1347                 DRM_ERROR("failed to disable transcoder %d\n", pipe);
1348 }
1349
1350 /**
1351  * intel_enable_pipe - enable a pipe, asserting requirements
1352  * @dev_priv: i915 private structure
1353  * @pipe: pipe to enable
1354  * @pch_port: on ILK+, is this pipe driving a PCH port or not
1355  *
1356  * Enable @pipe, making sure that various hardware specific requirements
1357  * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1358  *
1359  * @pipe should be %PIPE_A or %PIPE_B.
1360  *
1361  * Will wait until the pipe is actually running (i.e. first vblank) before
1362  * returning.
1363  */
1364 static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1365                               bool pch_port)
1366 {
1367         int reg;
1368         u32 val;
1369
1370         /*
1371          * A pipe without a PLL won't actually be able to drive bits from
1372          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1373          * need the check.
1374          */
1375         if (!HAS_PCH_SPLIT(dev_priv->dev))
1376                 assert_pll_enabled(dev_priv, pipe);
1377         else {
1378                 if (pch_port) {
1379                         /* if driving the PCH, we need FDI enabled */
1380                         assert_fdi_rx_pll_enabled(dev_priv, pipe);
1381                         assert_fdi_tx_pll_enabled(dev_priv, pipe);
1382                 }
1383                 /* FIXME: assert CPU port conditions for SNB+ */
1384         }
1385
1386         reg = PIPECONF(pipe);
1387         val = I915_READ(reg);
1388         if (val & PIPECONF_ENABLE)
1389                 return;
1390
1391         I915_WRITE(reg, val | PIPECONF_ENABLE);
1392         intel_wait_for_vblank(dev_priv->dev, pipe);
1393 }
1394
1395 /**
1396  * intel_disable_pipe - disable a pipe, asserting requirements
1397  * @dev_priv: i915 private structure
1398  * @pipe: pipe to disable
1399  *
1400  * Disable @pipe, making sure that various hardware specific requirements
1401  * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1402  *
1403  * @pipe should be %PIPE_A or %PIPE_B.
1404  *
1405  * Will wait until the pipe has shut down before returning.
1406  */
1407 static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1408                                enum pipe pipe)
1409 {
1410         int reg;
1411         u32 val;
1412
1413         /*
1414          * Make sure planes won't keep trying to pump pixels to us,
1415          * or we might hang the display.
1416          */
1417         assert_planes_disabled(dev_priv, pipe);
1418
1419         /* Don't disable pipe A or pipe A PLLs if needed */
1420         if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1421                 return;
1422
1423         reg = PIPECONF(pipe);
1424         val = I915_READ(reg);
1425         if ((val & PIPECONF_ENABLE) == 0)
1426                 return;
1427
1428         I915_WRITE(reg, val & ~PIPECONF_ENABLE);
1429         intel_wait_for_pipe_off(dev_priv->dev, pipe);
1430 }
1431
1432 /*
1433  * Plane regs are double buffered, going from enabled->disabled needs a
1434  * trigger in order to latch.  The display address reg provides this.
1435  */
1436 static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1437                                       enum plane plane)
1438 {
1439         I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1440         I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1441 }
1442
1443 /**
1444  * intel_enable_plane - enable a display plane on a given pipe
1445  * @dev_priv: i915 private structure
1446  * @plane: plane to enable
1447  * @pipe: pipe being fed
1448  *
1449  * Enable @plane on @pipe, making sure that @pipe is running first.
1450  */
1451 static void intel_enable_plane(struct drm_i915_private *dev_priv,
1452                                enum plane plane, enum pipe pipe)
1453 {
1454         int reg;
1455         u32 val;
1456
1457         /* If the pipe isn't enabled, we can't pump pixels and may hang */
1458         assert_pipe_enabled(dev_priv, pipe);
1459
1460         reg = DSPCNTR(plane);
1461         val = I915_READ(reg);
1462         if (val & DISPLAY_PLANE_ENABLE)
1463                 return;
1464
1465         I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
1466         intel_flush_display_plane(dev_priv, plane);
1467         intel_wait_for_vblank(dev_priv->dev, pipe);
1468 }
1469
1470 /**
1471  * intel_disable_plane - disable a display plane
1472  * @dev_priv: i915 private structure
1473  * @plane: plane to disable
1474  * @pipe: pipe consuming the data
1475  *
1476  * Disable @plane; should be an independent operation.
1477  */
1478 static void intel_disable_plane(struct drm_i915_private *dev_priv,
1479                                 enum plane plane, enum pipe pipe)
1480 {
1481         int reg;
1482         u32 val;
1483
1484         reg = DSPCNTR(plane);
1485         val = I915_READ(reg);
1486         if ((val & DISPLAY_PLANE_ENABLE) == 0)
1487                 return;
1488
1489         I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
1490         intel_flush_display_plane(dev_priv, plane);
1491         intel_wait_for_vblank(dev_priv->dev, pipe);
1492 }
1493
1494 static void disable_pch_dp(struct drm_i915_private *dev_priv,
1495                            enum pipe pipe, int reg, u32 port_sel)
1496 {
1497         u32 val = I915_READ(reg);
1498         if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
1499                 DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
1500                 I915_WRITE(reg, val & ~DP_PORT_EN);
1501         }
1502 }
1503
1504 static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
1505                              enum pipe pipe, int reg)
1506 {
1507         u32 val = I915_READ(reg);
1508         if (hdmi_pipe_enabled(dev_priv, pipe, val)) {
1509                 DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
1510                               reg, pipe);
1511                 I915_WRITE(reg, val & ~PORT_ENABLE);
1512         }
1513 }
1514
1515 /* Disable any ports connected to this transcoder */
1516 static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1517                                     enum pipe pipe)
1518 {
1519         u32 reg, val;
1520
1521         val = I915_READ(PCH_PP_CONTROL);
1522         I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
1523
1524         disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1525         disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1526         disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1527
1528         reg = PCH_ADPA;
1529         val = I915_READ(reg);
1530         if (adpa_pipe_enabled(dev_priv, pipe, val))
1531                 I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
1532
1533         reg = PCH_LVDS;
1534         val = I915_READ(reg);
1535         if (lvds_pipe_enabled(dev_priv, pipe, val)) {
1536                 DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
1537                 I915_WRITE(reg, val & ~LVDS_PORT_EN);
1538                 POSTING_READ(reg);
1539                 udelay(100);
1540         }
1541
1542         disable_pch_hdmi(dev_priv, pipe, HDMIB);
1543         disable_pch_hdmi(dev_priv, pipe, HDMIC);
1544         disable_pch_hdmi(dev_priv, pipe, HDMID);
1545 }
1546
1547 static void i8xx_disable_fbc(struct drm_device *dev)
1548 {
1549         struct drm_i915_private *dev_priv = dev->dev_private;
1550         u32 fbc_ctl;
1551
1552         /* Disable compression */
1553         fbc_ctl = I915_READ(FBC_CONTROL);
1554         if ((fbc_ctl & FBC_CTL_EN) == 0)
1555                 return;
1556
1557         fbc_ctl &= ~FBC_CTL_EN;
1558         I915_WRITE(FBC_CONTROL, fbc_ctl);
1559
1560         /* Wait for compressing bit to clear */
1561         if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
1562                 DRM_DEBUG_KMS("FBC idle timed out\n");
1563                 return;
1564         }
1565
1566         DRM_DEBUG_KMS("disabled FBC\n");
1567 }
1568
1569 static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1570 {
1571         struct drm_device *dev = crtc->dev;
1572         struct drm_i915_private *dev_priv = dev->dev_private;
1573         struct drm_framebuffer *fb = crtc->fb;
1574         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1575         struct drm_i915_gem_object *obj = intel_fb->obj;
1576         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1577         int cfb_pitch;
1578         int plane, i;
1579         u32 fbc_ctl, fbc_ctl2;
1580
1581         cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
1582         if (fb->pitches[0] < cfb_pitch)
1583                 cfb_pitch = fb->pitches[0];
1584
1585         /* FBC_CTL wants 64B units */
1586         cfb_pitch = (cfb_pitch / 64) - 1;
1587         plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
1588
1589         /* Clear old tags */
1590         for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
1591                 I915_WRITE(FBC_TAG + (i * 4), 0);
1592
1593         /* Set it up... */
1594         fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
1595         fbc_ctl2 |= plane;
1596         I915_WRITE(FBC_CONTROL2, fbc_ctl2);
1597         I915_WRITE(FBC_FENCE_OFF, crtc->y);
1598
1599         /* enable it... */
1600         fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1601         if (IS_I945GM(dev))
1602                 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
1603         fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
1604         fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
1605         fbc_ctl |= obj->fence_reg;
1606         I915_WRITE(FBC_CONTROL, fbc_ctl);
1607
1608         DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
1609                       cfb_pitch, crtc->y, intel_crtc->plane);
1610 }
1611
1612 static bool i8xx_fbc_enabled(struct drm_device *dev)
1613 {
1614         struct drm_i915_private *dev_priv = dev->dev_private;
1615
1616         return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
1617 }
1618
1619 static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1620 {
1621         struct drm_device *dev = crtc->dev;
1622         struct drm_i915_private *dev_priv = dev->dev_private;
1623         struct drm_framebuffer *fb = crtc->fb;
1624         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1625         struct drm_i915_gem_object *obj = intel_fb->obj;
1626         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1627         int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1628         unsigned long stall_watermark = 200;
1629         u32 dpfc_ctl;
1630
1631         dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
1632         dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
1633         I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
1634
1635         I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1636                    (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1637                    (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1638         I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
1639
1640         /* enable it... */
1641         I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
1642
1643         DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1644 }
1645
1646 static void g4x_disable_fbc(struct drm_device *dev)
1647 {
1648         struct drm_i915_private *dev_priv = dev->dev_private;
1649         u32 dpfc_ctl;
1650
1651         /* Disable compression */
1652         dpfc_ctl = I915_READ(DPFC_CONTROL);
1653         if (dpfc_ctl & DPFC_CTL_EN) {
1654                 dpfc_ctl &= ~DPFC_CTL_EN;
1655                 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1656
1657                 DRM_DEBUG_KMS("disabled FBC\n");
1658         }
1659 }
1660
1661 static bool g4x_fbc_enabled(struct drm_device *dev)
1662 {
1663         struct drm_i915_private *dev_priv = dev->dev_private;
1664
1665         return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1666 }
1667
1668 static void sandybridge_blit_fbc_update(struct drm_device *dev)
1669 {
1670         struct drm_i915_private *dev_priv = dev->dev_private;
1671         u32 blt_ecoskpd;
1672
1673         /* Make sure blitter notifies FBC of writes */
1674         gen6_gt_force_wake_get(dev_priv);
1675         blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
1676         blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
1677                 GEN6_BLITTER_LOCK_SHIFT;
1678         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1679         blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
1680         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1681         blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
1682                          GEN6_BLITTER_LOCK_SHIFT);
1683         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1684         POSTING_READ(GEN6_BLITTER_ECOSKPD);
1685         gen6_gt_force_wake_put(dev_priv);
1686 }
1687
1688 static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1689 {
1690         struct drm_device *dev = crtc->dev;
1691         struct drm_i915_private *dev_priv = dev->dev_private;
1692         struct drm_framebuffer *fb = crtc->fb;
1693         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1694         struct drm_i915_gem_object *obj = intel_fb->obj;
1695         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1696         int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1697         unsigned long stall_watermark = 200;
1698         u32 dpfc_ctl;
1699
1700         dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1701         dpfc_ctl &= DPFC_RESERVED;
1702         dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
1703         /* Set persistent mode for front-buffer rendering, ala X. */
1704         dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
1705         dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
1706         I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
1707
1708         I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1709                    (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1710                    (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1711         I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
1712         I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
1713         /* enable it... */
1714         I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
1715
1716         if (IS_GEN6(dev)) {
1717                 I915_WRITE(SNB_DPFC_CTL_SA,
1718                            SNB_CPU_FENCE_ENABLE | obj->fence_reg);
1719                 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
1720                 sandybridge_blit_fbc_update(dev);
1721         }
1722
1723         DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1724 }
1725
1726 static void ironlake_disable_fbc(struct drm_device *dev)
1727 {
1728         struct drm_i915_private *dev_priv = dev->dev_private;
1729         u32 dpfc_ctl;
1730
1731         /* Disable compression */
1732         dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1733         if (dpfc_ctl & DPFC_CTL_EN) {
1734                 dpfc_ctl &= ~DPFC_CTL_EN;
1735                 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
1736
1737                 DRM_DEBUG_KMS("disabled FBC\n");
1738         }
1739 }
1740
1741 static bool ironlake_fbc_enabled(struct drm_device *dev)
1742 {
1743         struct drm_i915_private *dev_priv = dev->dev_private;
1744
1745         return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
1746 }
1747
1748 bool intel_fbc_enabled(struct drm_device *dev)
1749 {
1750         struct drm_i915_private *dev_priv = dev->dev_private;
1751
1752         if (!dev_priv->display.fbc_enabled)
1753                 return false;
1754
1755         return dev_priv->display.fbc_enabled(dev);
1756 }
1757
1758 static void intel_fbc_work_fn(struct work_struct *__work)
1759 {
1760         struct intel_fbc_work *work =
1761                 container_of(to_delayed_work(__work),
1762                              struct intel_fbc_work, work);
1763         struct drm_device *dev = work->crtc->dev;
1764         struct drm_i915_private *dev_priv = dev->dev_private;
1765
1766         mutex_lock(&dev->struct_mutex);
1767         if (work == dev_priv->fbc_work) {
1768                 /* Double check that we haven't switched fb without cancelling
1769                  * the prior work.
1770                  */
1771                 if (work->crtc->fb == work->fb) {
1772                         dev_priv->display.enable_fbc(work->crtc,
1773                                                      work->interval);
1774
1775                         dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
1776                         dev_priv->cfb_fb = work->crtc->fb->base.id;
1777                         dev_priv->cfb_y = work->crtc->y;
1778                 }
1779
1780                 dev_priv->fbc_work = NULL;
1781         }
1782         mutex_unlock(&dev->struct_mutex);
1783
1784         kfree(work);
1785 }
1786
1787 static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
1788 {
1789         if (dev_priv->fbc_work == NULL)
1790                 return;
1791
1792         DRM_DEBUG_KMS("cancelling pending FBC enable\n");
1793
1794         /* Synchronisation is provided by struct_mutex and checking of
1795          * dev_priv->fbc_work, so we can perform the cancellation
1796          * entirely asynchronously.
1797          */
1798         if (cancel_delayed_work(&dev_priv->fbc_work->work))
1799                 /* tasklet was killed before being run, clean up */
1800                 kfree(dev_priv->fbc_work);
1801
1802         /* Mark the work as no longer wanted so that if it does
1803          * wake-up (because the work was already running and waiting
1804          * for our mutex), it will discover that is no longer
1805          * necessary to run.
1806          */
1807         dev_priv->fbc_work = NULL;
1808 }
1809
1810 static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1811 {
1812         struct intel_fbc_work *work;
1813         struct drm_device *dev = crtc->dev;
1814         struct drm_i915_private *dev_priv = dev->dev_private;
1815
1816         if (!dev_priv->display.enable_fbc)
1817                 return;
1818
1819         intel_cancel_fbc_work(dev_priv);
1820
1821         work = kzalloc(sizeof *work, GFP_KERNEL);
1822         if (work == NULL) {
1823                 dev_priv->display.enable_fbc(crtc, interval);
1824                 return;
1825         }
1826
1827         work->crtc = crtc;
1828         work->fb = crtc->fb;
1829         work->interval = interval;
1830         INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
1831
1832         dev_priv->fbc_work = work;
1833
1834         DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
1835
1836         /* Delay the actual enabling to let pageflipping cease and the
1837          * display to settle before starting the compression. Note that
1838          * this delay also serves a second purpose: it allows for a
1839          * vblank to pass after disabling the FBC before we attempt
1840          * to modify the control registers.
1841          *
1842          * A more complicated solution would involve tracking vblanks
1843          * following the termination of the page-flipping sequence
1844          * and indeed performing the enable as a co-routine and not
1845          * waiting synchronously upon the vblank.
1846          */
1847         schedule_delayed_work(&work->work, msecs_to_jiffies(50));
1848 }
1849
1850 void intel_disable_fbc(struct drm_device *dev)
1851 {
1852         struct drm_i915_private *dev_priv = dev->dev_private;
1853
1854         intel_cancel_fbc_work(dev_priv);
1855
1856         if (!dev_priv->display.disable_fbc)
1857                 return;
1858
1859         dev_priv->display.disable_fbc(dev);
1860         dev_priv->cfb_plane = -1;
1861 }
1862
1863 /**
1864  * intel_update_fbc - enable/disable FBC as needed
1865  * @dev: the drm_device
1866  *
1867  * Set up the framebuffer compression hardware at mode set time.  We
1868  * enable it if possible:
1869  *   - plane A only (on pre-965)
1870  *   - no pixel mulitply/line duplication
1871  *   - no alpha buffer discard
1872  *   - no dual wide
1873  *   - framebuffer <= 2048 in width, 1536 in height
1874  *
1875  * We can't assume that any compression will take place (worst case),
1876  * so the compressed buffer has to be the same size as the uncompressed
1877  * one.  It also must reside (along with the line length buffer) in
1878  * stolen memory.
1879  *
1880  * We need to enable/disable FBC on a global basis.
1881  */
1882 static void intel_update_fbc(struct drm_device *dev)
1883 {
1884         struct drm_i915_private *dev_priv = dev->dev_private;
1885         struct drm_crtc *crtc = NULL, *tmp_crtc;
1886         struct intel_crtc *intel_crtc;
1887         struct drm_framebuffer *fb;
1888         struct intel_framebuffer *intel_fb;
1889         struct drm_i915_gem_object *obj;
1890         int enable_fbc;
1891
1892         DRM_DEBUG_KMS("\n");
1893
1894         if (!i915_powersave)
1895                 return;
1896
1897         if (!I915_HAS_FBC(dev))
1898                 return;
1899
1900         /*
1901          * If FBC is already on, we just have to verify that we can
1902          * keep it that way...
1903          * Need to disable if:
1904          *   - more than one pipe is active
1905          *   - changing FBC params (stride, fence, mode)
1906          *   - new fb is too large to fit in compressed buffer
1907          *   - going to an unsupported config (interlace, pixel multiply, etc.)
1908          */
1909         list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
1910                 if (tmp_crtc->enabled && tmp_crtc->fb) {
1911                         if (crtc) {
1912                                 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
1913                                 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
1914                                 goto out_disable;
1915                         }
1916                         crtc = tmp_crtc;
1917                 }
1918         }
1919
1920         if (!crtc || crtc->fb == NULL) {
1921                 DRM_DEBUG_KMS("no output, disabling\n");
1922                 dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
1923                 goto out_disable;
1924         }
1925
1926         intel_crtc = to_intel_crtc(crtc);
1927         fb = crtc->fb;
1928         intel_fb = to_intel_framebuffer(fb);
1929         obj = intel_fb->obj;
1930
1931         enable_fbc = i915_enable_fbc;
1932         if (enable_fbc < 0) {
1933                 DRM_DEBUG_KMS("fbc set to per-chip default\n");
1934                 enable_fbc = 1;
1935                 if (INTEL_INFO(dev)->gen <= 6)
1936                         enable_fbc = 0;
1937         }
1938         if (!enable_fbc) {
1939                 DRM_DEBUG_KMS("fbc disabled per module param\n");
1940                 dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
1941                 goto out_disable;
1942         }
1943         if (intel_fb->obj->base.size > dev_priv->cfb_size) {
1944                 DRM_DEBUG_KMS("framebuffer too large, disabling "
1945                               "compression\n");
1946                 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1947                 goto out_disable;
1948         }
1949         if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
1950             (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
1951                 DRM_DEBUG_KMS("mode incompatible with compression, "
1952                               "disabling\n");
1953                 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
1954                 goto out_disable;
1955         }
1956         if ((crtc->mode.hdisplay > 2048) ||
1957             (crtc->mode.vdisplay > 1536)) {
1958                 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
1959                 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
1960                 goto out_disable;
1961         }
1962         if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
1963                 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
1964                 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
1965                 goto out_disable;
1966         }
1967
1968         /* The use of a CPU fence is mandatory in order to detect writes
1969          * by the CPU to the scanout and trigger updates to the FBC.
1970          */
1971         if (obj->tiling_mode != I915_TILING_X ||
1972             obj->fence_reg == I915_FENCE_REG_NONE) {
1973                 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
1974                 dev_priv->no_fbc_reason = FBC_NOT_TILED;
1975                 goto out_disable;
1976         }
1977
1978         /* If the kernel debugger is active, always disable compression */
1979         if (in_dbg_master())
1980                 goto out_disable;
1981
1982         /* If the scanout has not changed, don't modify the FBC settings.
1983          * Note that we make the fundamental assumption that the fb->obj
1984          * cannot be unpinned (and have its GTT offset and fence revoked)
1985          * without first being decoupled from the scanout and FBC disabled.
1986          */
1987         if (dev_priv->cfb_plane == intel_crtc->plane &&
1988             dev_priv->cfb_fb == fb->base.id &&
1989             dev_priv->cfb_y == crtc->y)
1990                 return;
1991
1992         if (intel_fbc_enabled(dev)) {
1993                 /* We update FBC along two paths, after changing fb/crtc
1994                  * configuration (modeswitching) and after page-flipping
1995                  * finishes. For the latter, we know that not only did
1996                  * we disable the FBC at the start of the page-flip
1997                  * sequence, but also more than one vblank has passed.
1998                  *
1999                  * For the former case of modeswitching, it is possible
2000                  * to switch between two FBC valid configurations
2001                  * instantaneously so we do need to disable the FBC
2002                  * before we can modify its control registers. We also
2003                  * have to wait for the next vblank for that to take
2004                  * effect. However, since we delay enabling FBC we can
2005                  * assume that a vblank has passed since disabling and
2006                  * that we can safely alter the registers in the deferred
2007                  * callback.
2008                  *
2009                  * In the scenario that we go from a valid to invalid
2010                  * and then back to valid FBC configuration we have
2011                  * no strict enforcement that a vblank occurred since
2012                  * disabling the FBC. However, along all current pipe
2013                  * disabling paths we do need to wait for a vblank at
2014                  * some point. And we wait before enabling FBC anyway.
2015                  */
2016                 DRM_DEBUG_KMS("disabling active FBC for update\n");
2017                 intel_disable_fbc(dev);
2018         }
2019
2020         intel_enable_fbc(crtc, 500);
2021         return;
2022
2023 out_disable:
2024         /* Multiple disables should be harmless */
2025         if (intel_fbc_enabled(dev)) {
2026                 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
2027                 intel_disable_fbc(dev);
2028         }
2029 }
2030
2031 int
2032 intel_pin_and_fence_fb_obj(struct drm_device *dev,
2033                            struct drm_i915_gem_object *obj,
2034                            struct intel_ring_buffer *pipelined)
2035 {
2036         struct drm_i915_private *dev_priv = dev->dev_private;
2037         u32 alignment;
2038         int ret;
2039
2040         switch (obj->tiling_mode) {
2041         case I915_TILING_NONE:
2042                 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
2043                         alignment = 128 * 1024;
2044                 else if (INTEL_INFO(dev)->gen >= 4)
2045                         alignment = 4 * 1024;
2046                 else
2047                         alignment = 64 * 1024;
2048                 break;
2049         case I915_TILING_X:
2050                 /* pin() will align the object as required by fence */
2051                 alignment = 0;
2052                 break;
2053         case I915_TILING_Y:
2054                 /* FIXME: Is this true? */
2055                 DRM_ERROR("Y tiled not allowed for scan out buffers\n");
2056                 return -EINVAL;
2057         default:
2058                 BUG();
2059         }
2060
2061         dev_priv->mm.interruptible = false;
2062         ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
2063         if (ret)
2064                 goto err_interruptible;
2065
2066         /* Install a fence for tiled scan-out. Pre-i965 always needs a
2067          * fence, whereas 965+ only requires a fence if using
2068          * framebuffer compression.  For simplicity, we always install
2069          * a fence as the cost is not that onerous.
2070          */
2071         if (obj->tiling_mode != I915_TILING_NONE) {
2072                 ret = i915_gem_object_get_fence(obj, pipelined);
2073                 if (ret)
2074                         goto err_unpin;
2075
2076                 i915_gem_object_pin_fence(obj);
2077         }
2078
2079         dev_priv->mm.interruptible = true;
2080         return 0;
2081
2082 err_unpin:
2083         i915_gem_object_unpin(obj);
2084 err_interruptible:
2085         dev_priv->mm.interruptible = true;
2086         return ret;
2087 }
2088
2089 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
2090 {
2091         i915_gem_object_unpin_fence(obj);
2092         i915_gem_object_unpin(obj);
2093 }
2094
2095 static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2096                              int x, int y)
2097 {
2098         struct drm_device *dev = crtc->dev;
2099         struct drm_i915_private *dev_priv = dev->dev_private;
2100         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2101         struct intel_framebuffer *intel_fb;
2102         struct drm_i915_gem_object *obj;
2103         int plane = intel_crtc->plane;
2104         unsigned long Start, Offset;
2105         u32 dspcntr;
2106         u32 reg;
2107
2108         switch (plane) {
2109         case 0:
2110         case 1:
2111                 break;
2112         default:
2113                 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2114                 return -EINVAL;
2115         }
2116
2117         intel_fb = to_intel_framebuffer(fb);
2118         obj = intel_fb->obj;
2119
2120         reg = DSPCNTR(plane);
2121         dspcntr = I915_READ(reg);
2122         /* Mask out pixel format bits in case we change it */
2123         dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2124         switch (fb->bits_per_pixel) {
2125         case 8:
2126                 dspcntr |= DISPPLANE_8BPP;
2127                 break;
2128         case 16:
2129                 if (fb->depth == 15)
2130                         dspcntr |= DISPPLANE_15_16BPP;
2131                 else
2132                         dspcntr |= DISPPLANE_16BPP;
2133                 break;
2134         case 24:
2135         case 32:
2136                 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2137                 break;
2138         default:
2139                 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2140                 return -EINVAL;
2141         }
2142         if (INTEL_INFO(dev)->gen >= 4) {
2143                 if (obj->tiling_mode != I915_TILING_NONE)
2144                         dspcntr |= DISPPLANE_TILED;
2145                 else
2146                         dspcntr &= ~DISPPLANE_TILED;
2147         }
2148
2149         I915_WRITE(reg, dspcntr);
2150
2151         Start = obj->gtt_offset;
2152         Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2153
2154         DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2155                       Start, Offset, x, y, fb->pitches[0]);
2156         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2157         if (INTEL_INFO(dev)->gen >= 4) {
2158                 I915_WRITE(DSPSURF(plane), Start);
2159                 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2160                 I915_WRITE(DSPADDR(plane), Offset);
2161         } else
2162                 I915_WRITE(DSPADDR(plane), Start + Offset);
2163         POSTING_READ(reg);
2164
2165         return 0;
2166 }
2167
2168 static int ironlake_update_plane(struct drm_crtc *crtc,
2169                                  struct drm_framebuffer *fb, int x, int y)
2170 {
2171         struct drm_device *dev = crtc->dev;
2172         struct drm_i915_private *dev_priv = dev->dev_private;
2173         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2174         struct intel_framebuffer *intel_fb;
2175         struct drm_i915_gem_object *obj;
2176         int plane = intel_crtc->plane;
2177         unsigned long Start, Offset;
2178         u32 dspcntr;
2179         u32 reg;
2180
2181         switch (plane) {
2182         case 0:
2183         case 1:
2184         case 2:
2185                 break;
2186         default:
2187                 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2188                 return -EINVAL;
2189         }
2190
2191         intel_fb = to_intel_framebuffer(fb);
2192         obj = intel_fb->obj;
2193
2194         reg = DSPCNTR(plane);
2195         dspcntr = I915_READ(reg);
2196         /* Mask out pixel format bits in case we change it */
2197         dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2198         switch (fb->bits_per_pixel) {
2199         case 8:
2200                 dspcntr |= DISPPLANE_8BPP;
2201                 break;
2202         case 16:
2203                 if (fb->depth != 16)
2204                         return -EINVAL;
2205
2206                 dspcntr |= DISPPLANE_16BPP;
2207                 break;
2208         case 24:
2209         case 32:
2210                 if (fb->depth == 24)
2211                         dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2212                 else if (fb->depth == 30)
2213                         dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
2214                 else
2215                         return -EINVAL;
2216                 break;
2217         default:
2218                 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2219                 return -EINVAL;
2220         }
2221
2222         if (obj->tiling_mode != I915_TILING_NONE)
2223                 dspcntr |= DISPPLANE_TILED;
2224         else
2225                 dspcntr &= ~DISPPLANE_TILED;
2226
2227         /* must disable */
2228         dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2229
2230         I915_WRITE(reg, dspcntr);
2231
2232         Start = obj->gtt_offset;
2233         Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2234
2235         DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2236                       Start, Offset, x, y, fb->pitches[0]);
2237         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2238         I915_WRITE(DSPSURF(plane), Start);
2239         I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2240         I915_WRITE(DSPADDR(plane), Offset);
2241         POSTING_READ(reg);
2242
2243         return 0;
2244 }
2245
2246 /* Assume fb object is pinned & idle & fenced and just update base pointers */
2247 static int
2248 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2249                            int x, int y, enum mode_set_atomic state)
2250 {
2251         struct drm_device *dev = crtc->dev;
2252         struct drm_i915_private *dev_priv = dev->dev_private;
2253         int ret;
2254
2255         ret = dev_priv->display.update_plane(crtc, fb, x, y);
2256         if (ret)
2257                 return ret;
2258
2259         intel_update_fbc(dev);
2260         intel_increase_pllclock(crtc);
2261
2262         return 0;
2263 }
2264
2265 static int
2266 intel_finish_fb(struct drm_framebuffer *old_fb)
2267 {
2268         struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2269         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2270         bool was_interruptible = dev_priv->mm.interruptible;
2271         int ret;
2272
2273         wait_event(dev_priv->pending_flip_queue,
2274                    atomic_read(&dev_priv->mm.wedged) ||
2275                    atomic_read(&obj->pending_flip) == 0);
2276
2277         /* Big Hammer, we also need to ensure that any pending
2278          * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2279          * current scanout is retired before unpinning the old
2280          * framebuffer.
2281          *
2282          * This should only fail upon a hung GPU, in which case we
2283          * can safely continue.
2284          */
2285         dev_priv->mm.interruptible = false;
2286         ret = i915_gem_object_finish_gpu(obj);
2287         dev_priv->mm.interruptible = was_interruptible;
2288
2289         return ret;
2290 }
2291
2292 static int
2293 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2294                     struct drm_framebuffer *old_fb)
2295 {
2296         struct drm_device *dev = crtc->dev;
2297         struct drm_i915_master_private *master_priv;
2298         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2299         int ret;
2300
2301         /* no fb bound */
2302         if (!crtc->fb) {
2303                 DRM_ERROR("No FB bound\n");
2304                 return 0;
2305         }
2306
2307         switch (intel_crtc->plane) {
2308         case 0:
2309         case 1:
2310                 break;
2311         case 2:
2312                 if (IS_IVYBRIDGE(dev))
2313                         break;
2314                 /* fall through otherwise */
2315         default:
2316                 DRM_ERROR("no plane for crtc\n");
2317                 return -EINVAL;
2318         }
2319
2320         mutex_lock(&dev->struct_mutex);
2321         ret = intel_pin_and_fence_fb_obj(dev,
2322                                          to_intel_framebuffer(crtc->fb)->obj,
2323                                          NULL);
2324         if (ret != 0) {
2325                 mutex_unlock(&dev->struct_mutex);
2326                 DRM_ERROR("pin & fence failed\n");
2327                 return ret;
2328         }
2329
2330         if (old_fb)
2331                 intel_finish_fb(old_fb);
2332
2333         ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
2334                                          LEAVE_ATOMIC_MODE_SET);
2335         if (ret) {
2336                 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
2337                 mutex_unlock(&dev->struct_mutex);
2338                 DRM_ERROR("failed to update base address\n");
2339                 return ret;
2340         }
2341
2342         if (old_fb) {
2343                 intel_wait_for_vblank(dev, intel_crtc->pipe);
2344                 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
2345         }
2346
2347         mutex_unlock(&dev->struct_mutex);
2348
2349         if (!dev->primary->master)
2350                 return 0;
2351
2352         master_priv = dev->primary->master->driver_priv;
2353         if (!master_priv->sarea_priv)
2354                 return 0;
2355
2356         if (intel_crtc->pipe) {
2357                 master_priv->sarea_priv->pipeB_x = x;
2358                 master_priv->sarea_priv->pipeB_y = y;
2359         } else {
2360                 master_priv->sarea_priv->pipeA_x = x;
2361                 master_priv->sarea_priv->pipeA_y = y;
2362         }
2363
2364         return 0;
2365 }
2366
2367 static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
2368 {
2369         struct drm_device *dev = crtc->dev;
2370         struct drm_i915_private *dev_priv = dev->dev_private;
2371         u32 dpa_ctl;
2372
2373         DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
2374         dpa_ctl = I915_READ(DP_A);
2375         dpa_ctl &= ~DP_PLL_FREQ_MASK;
2376
2377         if (clock < 200000) {
2378                 u32 temp;
2379                 dpa_ctl |= DP_PLL_FREQ_160MHZ;
2380                 /* workaround for 160Mhz:
2381                    1) program 0x4600c bits 15:0 = 0x8124
2382                    2) program 0x46010 bit 0 = 1
2383                    3) program 0x46034 bit 24 = 1
2384                    4) program 0x64000 bit 14 = 1
2385                    */
2386                 temp = I915_READ(0x4600c);
2387                 temp &= 0xffff0000;
2388                 I915_WRITE(0x4600c, temp | 0x8124);
2389
2390                 temp = I915_READ(0x46010);
2391                 I915_WRITE(0x46010, temp | 1);
2392
2393                 temp = I915_READ(0x46034);
2394                 I915_WRITE(0x46034, temp | (1 << 24));
2395         } else {
2396                 dpa_ctl |= DP_PLL_FREQ_270MHZ;
2397         }
2398         I915_WRITE(DP_A, dpa_ctl);
2399
2400         POSTING_READ(DP_A);
2401         udelay(500);
2402 }
2403
2404 static void intel_fdi_normal_train(struct drm_crtc *crtc)
2405 {
2406         struct drm_device *dev = crtc->dev;
2407         struct drm_i915_private *dev_priv = dev->dev_private;
2408         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2409         int pipe = intel_crtc->pipe;
2410         u32 reg, temp;
2411
2412         /* enable normal train */
2413         reg = FDI_TX_CTL(pipe);
2414         temp = I915_READ(reg);
2415         if (IS_IVYBRIDGE(dev)) {
2416                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2417                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2418         } else {
2419                 temp &= ~FDI_LINK_TRAIN_NONE;
2420                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2421         }
2422         I915_WRITE(reg, temp);
2423
2424         reg = FDI_RX_CTL(pipe);
2425         temp = I915_READ(reg);
2426         if (HAS_PCH_CPT(dev)) {
2427                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2428                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2429         } else {
2430                 temp &= ~FDI_LINK_TRAIN_NONE;
2431                 temp |= FDI_LINK_TRAIN_NONE;
2432         }
2433         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2434
2435         /* wait one idle pattern time */
2436         POSTING_READ(reg);
2437         udelay(1000);
2438
2439         /* IVB wants error correction enabled */
2440         if (IS_IVYBRIDGE(dev))
2441                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2442                            FDI_FE_ERRC_ENABLE);
2443 }
2444
2445 /* The FDI link training functions for ILK/Ibexpeak. */
2446 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2447 {
2448         struct drm_device *dev = crtc->dev;
2449         struct drm_i915_private *dev_priv = dev->dev_private;
2450         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2451         int pipe = intel_crtc->pipe;
2452         int plane = intel_crtc->plane;
2453         u32 reg, temp, tries;
2454
2455         /* FDI needs bits from pipe & plane first */
2456         assert_pipe_enabled(dev_priv, pipe);
2457         assert_plane_enabled(dev_priv, plane);
2458
2459         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2460            for train result */
2461         reg = FDI_RX_IMR(pipe);
2462         temp = I915_READ(reg);
2463         temp &= ~FDI_RX_SYMBOL_LOCK;
2464         temp &= ~FDI_RX_BIT_LOCK;
2465         I915_WRITE(reg, temp);
2466         I915_READ(reg);
2467         udelay(150);
2468
2469         /* enable CPU FDI TX and PCH FDI RX */
2470         reg = FDI_TX_CTL(pipe);
2471         temp = I915_READ(reg);
2472         temp &= ~(7 << 19);
2473         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2474         temp &= ~FDI_LINK_TRAIN_NONE;
2475         temp |= FDI_LINK_TRAIN_PATTERN_1;
2476         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2477
2478         reg = FDI_RX_CTL(pipe);
2479         temp = I915_READ(reg);
2480         temp &= ~FDI_LINK_TRAIN_NONE;
2481         temp |= FDI_LINK_TRAIN_PATTERN_1;
2482         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2483
2484         POSTING_READ(reg);
2485         udelay(150);
2486
2487         /* Ironlake workaround, enable clock pointer after FDI enable*/
2488         if (HAS_PCH_IBX(dev)) {
2489                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2490                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2491                            FDI_RX_PHASE_SYNC_POINTER_EN);
2492         }
2493
2494         reg = FDI_RX_IIR(pipe);
2495         for (tries = 0; tries < 5; tries++) {
2496                 temp = I915_READ(reg);
2497                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2498
2499                 if ((temp & FDI_RX_BIT_LOCK)) {
2500                         DRM_DEBUG_KMS("FDI train 1 done.\n");
2501                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2502                         break;
2503                 }
2504         }
2505         if (tries == 5)
2506                 DRM_ERROR("FDI train 1 fail!\n");
2507
2508         /* Train 2 */
2509         reg = FDI_TX_CTL(pipe);
2510         temp = I915_READ(reg);
2511         temp &= ~FDI_LINK_TRAIN_NONE;
2512         temp |= FDI_LINK_TRAIN_PATTERN_2;
2513         I915_WRITE(reg, temp);
2514
2515         reg = FDI_RX_CTL(pipe);
2516         temp = I915_READ(reg);
2517         temp &= ~FDI_LINK_TRAIN_NONE;
2518         temp |= FDI_LINK_TRAIN_PATTERN_2;
2519         I915_WRITE(reg, temp);
2520
2521         POSTING_READ(reg);
2522         udelay(150);
2523
2524         reg = FDI_RX_IIR(pipe);
2525         for (tries = 0; tries < 5; tries++) {
2526                 temp = I915_READ(reg);
2527                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2528
2529                 if (temp & FDI_RX_SYMBOL_LOCK) {
2530                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2531                         DRM_DEBUG_KMS("FDI train 2 done.\n");
2532                         break;
2533                 }
2534         }
2535         if (tries == 5)
2536                 DRM_ERROR("FDI train 2 fail!\n");
2537
2538         DRM_DEBUG_KMS("FDI train done\n");
2539
2540 }
2541
2542 static const int snb_b_fdi_train_param[] = {
2543         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2544         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2545         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2546         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2547 };
2548
2549 /* The FDI link training functions for SNB/Cougarpoint. */
2550 static void gen6_fdi_link_train(struct drm_crtc *crtc)
2551 {
2552         struct drm_device *dev = crtc->dev;
2553         struct drm_i915_private *dev_priv = dev->dev_private;
2554         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2555         int pipe = intel_crtc->pipe;
2556         u32 reg, temp, i;
2557
2558         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2559            for train result */
2560         reg = FDI_RX_IMR(pipe);
2561         temp = I915_READ(reg);
2562         temp &= ~FDI_RX_SYMBOL_LOCK;
2563         temp &= ~FDI_RX_BIT_LOCK;
2564         I915_WRITE(reg, temp);
2565
2566         POSTING_READ(reg);
2567         udelay(150);
2568
2569         /* enable CPU FDI TX and PCH FDI RX */
2570         reg = FDI_TX_CTL(pipe);
2571         temp = I915_READ(reg);
2572         temp &= ~(7 << 19);
2573         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2574         temp &= ~FDI_LINK_TRAIN_NONE;
2575         temp |= FDI_LINK_TRAIN_PATTERN_1;
2576         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2577         /* SNB-B */
2578         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2579         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2580
2581         reg = FDI_RX_CTL(pipe);
2582         temp = I915_READ(reg);
2583         if (HAS_PCH_CPT(dev)) {
2584                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2585                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2586         } else {
2587                 temp &= ~FDI_LINK_TRAIN_NONE;
2588                 temp |= FDI_LINK_TRAIN_PATTERN_1;
2589         }
2590         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2591
2592         POSTING_READ(reg);
2593         udelay(150);
2594
2595         for (i = 0; i < 4; i++) {
2596                 reg = FDI_TX_CTL(pipe);
2597                 temp = I915_READ(reg);
2598                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2599                 temp |= snb_b_fdi_train_param[i];
2600                 I915_WRITE(reg, temp);
2601
2602                 POSTING_READ(reg);
2603                 udelay(500);
2604
2605                 reg = FDI_RX_IIR(pipe);
2606                 temp = I915_READ(reg);
2607                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2608
2609                 if (temp & FDI_RX_BIT_LOCK) {
2610                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2611                         DRM_DEBUG_KMS("FDI train 1 done.\n");
2612                         break;
2613                 }
2614         }
2615         if (i == 4)
2616                 DRM_ERROR("FDI train 1 fail!\n");
2617
2618         /* Train 2 */
2619         reg = FDI_TX_CTL(pipe);
2620         temp = I915_READ(reg);
2621         temp &= ~FDI_LINK_TRAIN_NONE;
2622         temp |= FDI_LINK_TRAIN_PATTERN_2;
2623         if (IS_GEN6(dev)) {
2624                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2625                 /* SNB-B */
2626                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2627         }
2628         I915_WRITE(reg, temp);
2629
2630         reg = FDI_RX_CTL(pipe);
2631         temp = I915_READ(reg);
2632         if (HAS_PCH_CPT(dev)) {
2633                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2634                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2635         } else {
2636                 temp &= ~FDI_LINK_TRAIN_NONE;
2637                 temp |= FDI_LINK_TRAIN_PATTERN_2;
2638         }
2639         I915_WRITE(reg, temp);
2640
2641         POSTING_READ(reg);
2642         udelay(150);
2643
2644         for (i = 0; i < 4; i++) {
2645                 reg = FDI_TX_CTL(pipe);
2646                 temp = I915_READ(reg);
2647                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2648                 temp |= snb_b_fdi_train_param[i];
2649                 I915_WRITE(reg, temp);
2650
2651                 POSTING_READ(reg);
2652                 udelay(500);
2653
2654                 reg = FDI_RX_IIR(pipe);
2655                 temp = I915_READ(reg);
2656                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2657
2658                 if (temp & FDI_RX_SYMBOL_LOCK) {
2659                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2660                         DRM_DEBUG_KMS("FDI train 2 done.\n");
2661                         break;
2662                 }
2663         }
2664         if (i == 4)
2665                 DRM_ERROR("FDI train 2 fail!\n");
2666
2667         DRM_DEBUG_KMS("FDI train done.\n");
2668 }
2669
2670 /* Manual link training for Ivy Bridge A0 parts */
2671 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2672 {
2673         struct drm_device *dev = crtc->dev;
2674         struct drm_i915_private *dev_priv = dev->dev_private;
2675         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2676         int pipe = intel_crtc->pipe;
2677         u32 reg, temp, i;
2678
2679         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2680            for train result */
2681         reg = FDI_RX_IMR(pipe);
2682         temp = I915_READ(reg);
2683         temp &= ~FDI_RX_SYMBOL_LOCK;
2684         temp &= ~FDI_RX_BIT_LOCK;
2685         I915_WRITE(reg, temp);
2686
2687         POSTING_READ(reg);
2688         udelay(150);
2689
2690         /* enable CPU FDI TX and PCH FDI RX */
2691         reg = FDI_TX_CTL(pipe);
2692         temp = I915_READ(reg);
2693         temp &= ~(7 << 19);
2694         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2695         temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2696         temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2697         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2698         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2699         temp |= FDI_COMPOSITE_SYNC;
2700         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2701
2702         reg = FDI_RX_CTL(pipe);
2703         temp = I915_READ(reg);
2704         temp &= ~FDI_LINK_TRAIN_AUTO;
2705         temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2706         temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2707         temp |= FDI_COMPOSITE_SYNC;
2708         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2709
2710         POSTING_READ(reg);
2711         udelay(150);
2712
2713         for (i = 0; i < 4; i++) {
2714                 reg = FDI_TX_CTL(pipe);
2715                 temp = I915_READ(reg);
2716                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2717                 temp |= snb_b_fdi_train_param[i];
2718                 I915_WRITE(reg, temp);
2719
2720                 POSTING_READ(reg);
2721                 udelay(500);
2722
2723                 reg = FDI_RX_IIR(pipe);
2724                 temp = I915_READ(reg);
2725                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2726
2727                 if (temp & FDI_RX_BIT_LOCK ||
2728                     (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2729                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2730                         DRM_DEBUG_KMS("FDI train 1 done.\n");
2731                         break;
2732                 }
2733         }
2734         if (i == 4)
2735                 DRM_ERROR("FDI train 1 fail!\n");
2736
2737         /* Train 2 */
2738         reg = FDI_TX_CTL(pipe);
2739         temp = I915_READ(reg);
2740         temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2741         temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2742         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2743         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2744         I915_WRITE(reg, temp);
2745
2746         reg = FDI_RX_CTL(pipe);
2747         temp = I915_READ(reg);
2748         temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2749         temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2750         I915_WRITE(reg, temp);
2751
2752         POSTING_READ(reg);
2753         udelay(150);
2754
2755         for (i = 0; i < 4; i++) {
2756                 reg = FDI_TX_CTL(pipe);
2757                 temp = I915_READ(reg);
2758                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2759                 temp |= snb_b_fdi_train_param[i];
2760                 I915_WRITE(reg, temp);
2761
2762                 POSTING_READ(reg);
2763                 udelay(500);
2764
2765                 reg = FDI_RX_IIR(pipe);
2766                 temp = I915_READ(reg);
2767                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2768
2769                 if (temp & FDI_RX_SYMBOL_LOCK) {
2770                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2771                         DRM_DEBUG_KMS("FDI train 2 done.\n");
2772                         break;
2773                 }
2774         }
2775         if (i == 4)
2776                 DRM_ERROR("FDI train 2 fail!\n");
2777
2778         DRM_DEBUG_KMS("FDI train done.\n");
2779 }
2780
2781 static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
2782 {
2783         struct drm_device *dev = crtc->dev;
2784         struct drm_i915_private *dev_priv = dev->dev_private;
2785         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2786         int pipe = intel_crtc->pipe;
2787         u32 reg, temp;
2788
2789         /* Write the TU size bits so error detection works */
2790         I915_WRITE(FDI_RX_TUSIZE1(pipe),
2791                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2792
2793         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2794         reg = FDI_RX_CTL(pipe);
2795         temp = I915_READ(reg);
2796         temp &= ~((0x7 << 19) | (0x7 << 16));
2797         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2798         temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2799         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2800
2801         POSTING_READ(reg);
2802         udelay(200);
2803
2804         /* Switch from Rawclk to PCDclk */
2805         temp = I915_READ(reg);
2806         I915_WRITE(reg, temp | FDI_PCDCLK);
2807
2808         POSTING_READ(reg);
2809         udelay(200);
2810
2811         /* Enable CPU FDI TX PLL, always on for Ironlake */
2812         reg = FDI_TX_CTL(pipe);
2813         temp = I915_READ(reg);
2814         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2815                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2816
2817                 POSTING_READ(reg);
2818                 udelay(100);
2819         }
2820 }
2821
2822 static void ironlake_fdi_disable(struct drm_crtc *crtc)
2823 {
2824         struct drm_device *dev = crtc->dev;
2825         struct drm_i915_private *dev_priv = dev->dev_private;
2826         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2827         int pipe = intel_crtc->pipe;
2828         u32 reg, temp;
2829
2830         /* disable CPU FDI tx and PCH FDI rx */
2831         reg = FDI_TX_CTL(pipe);
2832         temp = I915_READ(reg);
2833         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2834         POSTING_READ(reg);
2835
2836         reg = FDI_RX_CTL(pipe);
2837         temp = I915_READ(reg);
2838         temp &= ~(0x7 << 16);
2839         temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2840         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2841
2842         POSTING_READ(reg);
2843         udelay(100);
2844
2845         /* Ironlake workaround, disable clock pointer after downing FDI */
2846         if (HAS_PCH_IBX(dev)) {
2847                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2848                 I915_WRITE(FDI_RX_CHICKEN(pipe),
2849                            I915_READ(FDI_RX_CHICKEN(pipe) &
2850                                      ~FDI_RX_PHASE_SYNC_POINTER_EN));
2851         }
2852
2853         /* still set train pattern 1 */
2854         reg = FDI_TX_CTL(pipe);
2855         temp = I915_READ(reg);
2856         temp &= ~FDI_LINK_TRAIN_NONE;
2857         temp |= FDI_LINK_TRAIN_PATTERN_1;
2858         I915_WRITE(reg, temp);
2859
2860         reg = FDI_RX_CTL(pipe);
2861         temp = I915_READ(reg);
2862         if (HAS_PCH_CPT(dev)) {
2863                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2864                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2865         } else {
2866                 temp &= ~FDI_LINK_TRAIN_NONE;
2867                 temp |= FDI_LINK_TRAIN_PATTERN_1;
2868         }
2869         /* BPC in FDI rx is consistent with that in PIPECONF */
2870         temp &= ~(0x07 << 16);
2871         temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2872         I915_WRITE(reg, temp);
2873
2874         POSTING_READ(reg);
2875         udelay(100);
2876 }
2877
2878 /*
2879  * When we disable a pipe, we need to clear any pending scanline wait events
2880  * to avoid hanging the ring, which we assume we are waiting on.
2881  */
2882 static void intel_clear_scanline_wait(struct drm_device *dev)
2883 {
2884         struct drm_i915_private *dev_priv = dev->dev_private;
2885         struct intel_ring_buffer *ring;
2886         u32 tmp;
2887
2888         if (IS_GEN2(dev))
2889                 /* Can't break the hang on i8xx */
2890                 return;
2891
2892         ring = LP_RING(dev_priv);
2893         tmp = I915_READ_CTL(ring);
2894         if (tmp & RING_WAIT)
2895                 I915_WRITE_CTL(ring, tmp);
2896 }
2897
2898 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
2899 {
2900         struct drm_device *dev = crtc->dev;
2901         struct drm_i915_private *dev_priv = dev->dev_private;
2902         unsigned long flags;
2903         bool pending;
2904
2905         if (atomic_read(&dev_priv->mm.wedged))
2906                 return false;
2907
2908         spin_lock_irqsave(&dev->event_lock, flags);
2909         pending = to_intel_crtc(crtc)->unpin_work != NULL;
2910         spin_unlock_irqrestore(&dev->event_lock, flags);
2911
2912         return pending;
2913 }
2914
2915 static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2916 {
2917         struct drm_device *dev = crtc->dev;
2918         struct drm_i915_private *dev_priv = dev->dev_private;
2919
2920         if (crtc->fb == NULL)
2921                 return;
2922
2923         wait_event(dev_priv->pending_flip_queue,
2924                    !intel_crtc_has_pending_flip(crtc));
2925
2926         mutex_lock(&dev->struct_mutex);
2927         intel_finish_fb(crtc->fb);
2928         mutex_unlock(&dev->struct_mutex);
2929 }
2930
2931 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2932 {
2933         struct drm_device *dev = crtc->dev;
2934         struct drm_mode_config *mode_config = &dev->mode_config;
2935         struct intel_encoder *encoder;
2936
2937         /*
2938          * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2939          * must be driven by its own crtc; no sharing is possible.
2940          */
2941         list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
2942                 if (encoder->base.crtc != crtc)
2943                         continue;
2944
2945                 switch (encoder->type) {
2946                 case INTEL_OUTPUT_EDP:
2947                         if (!intel_encoder_is_pch_edp(&encoder->base))
2948                                 return false;
2949                         continue;
2950                 }
2951         }
2952
2953         return true;
2954 }
2955
2956 /*
2957  * Enable PCH resources required for PCH ports:
2958  *   - PCH PLLs
2959  *   - FDI training & RX/TX
2960  *   - update transcoder timings
2961  *   - DP transcoding bits
2962  *   - transcoder
2963  */
2964 static void ironlake_pch_enable(struct drm_crtc *crtc)
2965 {
2966         struct drm_device *dev = crtc->dev;
2967         struct drm_i915_private *dev_priv = dev->dev_private;
2968         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2969         int pipe = intel_crtc->pipe;
2970         u32 reg, temp, transc_sel;
2971
2972         /* For PCH output, training FDI link */
2973         dev_priv->display.fdi_link_train(crtc);
2974
2975         intel_enable_pch_pll(dev_priv, pipe);
2976
2977         if (HAS_PCH_CPT(dev)) {
2978                 transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL :
2979                         TRANSC_DPLLB_SEL;
2980
2981                 /* Be sure PCH DPLL SEL is set */
2982                 temp = I915_READ(PCH_DPLL_SEL);
2983                 if (pipe == 0) {
2984                         temp &= ~(TRANSA_DPLLB_SEL);
2985                         temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
2986                 } else if (pipe == 1) {
2987                         temp &= ~(TRANSB_DPLLB_SEL);
2988                         temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
2989                 } else if (pipe == 2) {
2990                         temp &= ~(TRANSC_DPLLB_SEL);
2991                         temp |= (TRANSC_DPLL_ENABLE | transc_sel);
2992                 }
2993                 I915_WRITE(PCH_DPLL_SEL, temp);
2994         }
2995
2996         /* set transcoder timing, panel must allow it */
2997         assert_panel_unlocked(dev_priv, pipe);
2998         I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
2999         I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
3000         I915_WRITE(TRANS_HSYNC(pipe),  I915_READ(HSYNC(pipe)));
3001
3002         I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
3003         I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
3004         I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
3005         I915_WRITE(TRANS_VSYNCSHIFT(pipe),  I915_READ(VSYNCSHIFT(pipe)));
3006
3007         intel_fdi_normal_train(crtc);
3008
3009         /* For PCH DP, enable TRANS_DP_CTL */
3010         if (HAS_PCH_CPT(dev) &&
3011             (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
3012              intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3013                 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
3014                 reg = TRANS_DP_CTL(pipe);
3015                 temp = I915_READ(reg);
3016                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
3017                           TRANS_DP_SYNC_MASK |
3018                           TRANS_DP_BPC_MASK);
3019                 temp |= (TRANS_DP_OUTPUT_ENABLE |
3020                          TRANS_DP_ENH_FRAMING);
3021                 temp |= bpc << 9; /* same format but at 11:9 */
3022
3023                 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
3024                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3025                 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
3026                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3027
3028                 switch (intel_trans_dp_port_sel(crtc)) {
3029                 case PCH_DP_B:
3030                         temp |= TRANS_DP_PORT_SEL_B;
3031                         break;
3032                 case PCH_DP_C:
3033                         temp |= TRANS_DP_PORT_SEL_C;
3034                         break;
3035                 case PCH_DP_D:
3036                         temp |= TRANS_DP_PORT_SEL_D;
3037                         break;
3038                 default:
3039                         DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
3040                         temp |= TRANS_DP_PORT_SEL_B;
3041                         break;
3042                 }
3043
3044                 I915_WRITE(reg, temp);
3045         }
3046
3047         intel_enable_transcoder(dev_priv, pipe);
3048 }
3049
3050 void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
3051 {
3052         struct drm_i915_private *dev_priv = dev->dev_private;
3053         int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe);
3054         u32 temp;
3055
3056         temp = I915_READ(dslreg);
3057         udelay(500);
3058         if (wait_for(I915_READ(dslreg) != temp, 5)) {
3059                 /* Without this, mode sets may fail silently on FDI */
3060                 I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS);
3061                 udelay(250);
3062                 I915_WRITE(tc2reg, 0);
3063                 if (wait_for(I915_READ(dslreg) != temp, 5))
3064                         DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
3065         }
3066 }
3067
3068 static void ironlake_crtc_enable(struct drm_crtc *crtc)
3069 {
3070         struct drm_device *dev = crtc->dev;
3071         struct drm_i915_private *dev_priv = dev->dev_private;
3072         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3073         int pipe = intel_crtc->pipe;
3074         int plane = intel_crtc->plane;
3075         u32 temp;
3076         bool is_pch_port;
3077
3078         if (intel_crtc->active)
3079                 return;
3080
3081         intel_crtc->active = true;
3082         intel_update_watermarks(dev);
3083
3084         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3085                 temp = I915_READ(PCH_LVDS);
3086                 if ((temp & LVDS_PORT_EN) == 0)
3087                         I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
3088         }
3089
3090         is_pch_port = intel_crtc_driving_pch(crtc);
3091
3092         if (is_pch_port)
3093                 ironlake_fdi_pll_enable(crtc);
3094         else
3095                 ironlake_fdi_disable(crtc);
3096
3097         /* Enable panel fitting for LVDS */
3098         if (dev_priv->pch_pf_size &&
3099             (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
3100                 /* Force use of hard-coded filter coefficients
3101                  * as some pre-programmed values are broken,
3102                  * e.g. x201.
3103                  */
3104                 if (IS_IVYBRIDGE(dev))
3105                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3106                                                  PF_PIPE_SEL_IVB(pipe));
3107                 else
3108                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3109                 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3110                 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
3111         }
3112
3113         /*
3114          * On ILK+ LUT must be loaded before the pipe is running but with
3115          * clocks enabled
3116          */
3117         intel_crtc_load_lut(crtc);
3118
3119         intel_enable_pipe(dev_priv, pipe, is_pch_port);
3120         intel_enable_plane(dev_priv, plane, pipe);
3121
3122         if (is_pch_port)
3123                 ironlake_pch_enable(crtc);
3124
3125         mutex_lock(&dev->struct_mutex);
3126         intel_update_fbc(dev);
3127         mutex_unlock(&dev->struct_mutex);
3128
3129         intel_crtc_update_cursor(crtc, true);
3130 }
3131
3132 static void ironlake_crtc_disable(struct drm_crtc *crtc)
3133 {
3134         struct drm_device *dev = crtc->dev;
3135         struct drm_i915_private *dev_priv = dev->dev_private;
3136         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3137         int pipe = intel_crtc->pipe;
3138         int plane = intel_crtc->plane;
3139         u32 reg, temp;
3140
3141         if (!intel_crtc->active)
3142                 return;
3143
3144         intel_crtc_wait_for_pending_flips(crtc);
3145         drm_vblank_off(dev, pipe);
3146         intel_crtc_update_cursor(crtc, false);
3147
3148         intel_disable_plane(dev_priv, plane, pipe);
3149
3150         if (dev_priv->cfb_plane == plane)
3151                 intel_disable_fbc(dev);
3152
3153         intel_disable_pipe(dev_priv, pipe);
3154
3155         /* Disable PF */
3156         I915_WRITE(PF_CTL(pipe), 0);
3157         I915_WRITE(PF_WIN_SZ(pipe), 0);
3158
3159         ironlake_fdi_disable(crtc);
3160
3161         /* This is a horrible layering violation; we should be doing this in
3162          * the connector/encoder ->prepare instead, but we don't always have
3163          * enough information there about the config to know whether it will
3164          * actually be necessary or just cause undesired flicker.
3165          */
3166         intel_disable_pch_ports(dev_priv, pipe);
3167
3168         intel_disable_transcoder(dev_priv, pipe);
3169
3170         if (HAS_PCH_CPT(dev)) {
3171                 /* disable TRANS_DP_CTL */
3172                 reg = TRANS_DP_CTL(pipe);
3173                 temp = I915_READ(reg);
3174                 temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
3175                 temp |= TRANS_DP_PORT_SEL_NONE;
3176                 I915_WRITE(reg, temp);
3177
3178                 /* disable DPLL_SEL */
3179                 temp = I915_READ(PCH_DPLL_SEL);
3180                 switch (pipe) {
3181                 case 0:
3182                         temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
3183                         break;
3184                 case 1:
3185                         temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
3186                         break;
3187                 case 2:
3188                         /* C shares PLL A or B */
3189                         temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
3190                         break;
3191                 default:
3192                         BUG(); /* wtf */
3193                 }
3194                 I915_WRITE(PCH_DPLL_SEL, temp);
3195         }
3196
3197         /* disable PCH DPLL */
3198         if (!intel_crtc->no_pll)
3199                 intel_disable_pch_pll(dev_priv, pipe);
3200
3201         /* Switch from PCDclk to Rawclk */
3202         reg = FDI_RX_CTL(pipe);
3203         temp = I915_READ(reg);
3204         I915_WRITE(reg, temp & ~FDI_PCDCLK);
3205
3206         /* Disable CPU FDI TX PLL */
3207         reg = FDI_TX_CTL(pipe);
3208         temp = I915_READ(reg);
3209         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3210
3211         POSTING_READ(reg);
3212         udelay(100);
3213
3214         reg = FDI_RX_CTL(pipe);
3215         temp = I915_READ(reg);
3216         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3217
3218         /* Wait for the clocks to turn off. */
3219         POSTING_READ(reg);
3220         udelay(100);
3221
3222         intel_crtc->active = false;
3223         intel_update_watermarks(dev);
3224
3225         mutex_lock(&dev->struct_mutex);
3226         intel_update_fbc(dev);
3227         intel_clear_scanline_wait(dev);
3228         mutex_unlock(&dev->struct_mutex);
3229 }
3230
3231 static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
3232 {
3233         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3234         int pipe = intel_crtc->pipe;
3235         int plane = intel_crtc->plane;
3236
3237         /* XXX: When our outputs are all unaware of DPMS modes other than off
3238          * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3239          */
3240         switch (mode) {
3241         case DRM_MODE_DPMS_ON:
3242         case DRM_MODE_DPMS_STANDBY:
3243         case DRM_MODE_DPMS_SUSPEND:
3244                 DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
3245                 ironlake_crtc_enable(crtc);
3246                 break;
3247
3248         case DRM_MODE_DPMS_OFF:
3249                 DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
3250                 ironlake_crtc_disable(crtc);
3251                 break;
3252         }
3253 }
3254
3255 static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3256 {
3257         if (!enable && intel_crtc->overlay) {
3258                 struct drm_device *dev = intel_crtc->base.dev;
3259                 struct drm_i915_private *dev_priv = dev->dev_private;
3260
3261                 mutex_lock(&dev->struct_mutex);
3262                 dev_priv->mm.interruptible = false;
3263                 (void) intel_overlay_switch_off(intel_crtc->overlay);
3264                 dev_priv->mm.interruptible = true;
3265                 mutex_unlock(&dev->struct_mutex);
3266         }
3267
3268         /* Let userspace switch the overlay on again. In most cases userspace
3269          * has to recompute where to put it anyway.
3270          */
3271 }
3272
3273 static void i9xx_crtc_enable(struct drm_crtc *crtc)
3274 {
3275         struct drm_device *dev = crtc->dev;
3276         struct drm_i915_private *dev_priv = dev->dev_private;
3277         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3278         int pipe = intel_crtc->pipe;
3279         int plane = intel_crtc->plane;
3280
3281         if (intel_crtc->active)
3282                 return;
3283
3284         intel_crtc->active = true;
3285         intel_update_watermarks(dev);
3286
3287         intel_enable_pll(dev_priv, pipe);
3288         intel_enable_pipe(dev_priv, pipe, false);
3289         intel_enable_plane(dev_priv, plane, pipe);
3290
3291         intel_crtc_load_lut(crtc);
3292         intel_update_fbc(dev);
3293
3294         /* Give the overlay scaler a chance to enable if it's on this pipe */
3295         intel_crtc_dpms_overlay(intel_crtc, true);
3296         intel_crtc_update_cursor(crtc, true);
3297 }
3298
3299 static void i9xx_crtc_disable(struct drm_crtc *crtc)
3300 {
3301         struct drm_device *dev = crtc->dev;
3302         struct drm_i915_private *dev_priv = dev->dev_private;
3303         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3304         int pipe = intel_crtc->pipe;
3305         int plane = intel_crtc->plane;
3306
3307         if (!intel_crtc->active)
3308                 return;
3309
3310         /* Give the overlay scaler a chance to disable if it's on this pipe */
3311         intel_crtc_wait_for_pending_flips(crtc);
3312         drm_vblank_off(dev, pipe);
3313         intel_crtc_dpms_overlay(intel_crtc, false);
3314         intel_crtc_update_cursor(crtc, false);
3315
3316         if (dev_priv->cfb_plane == plane)
3317                 intel_disable_fbc(dev);
3318
3319         intel_disable_plane(dev_priv, plane, pipe);
3320         intel_disable_pipe(dev_priv, pipe);
3321         intel_disable_pll(dev_priv, pipe);
3322
3323         intel_crtc->active = false;
3324         intel_update_fbc(dev);
3325         intel_update_watermarks(dev);
3326         intel_clear_scanline_wait(dev);
3327 }
3328
3329 static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
3330 {
3331         /* XXX: When our outputs are all unaware of DPMS modes other than off
3332          * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3333          */
3334         switch (mode) {
3335         case DRM_MODE_DPMS_ON:
3336         case DRM_MODE_DPMS_STANDBY:
3337         case DRM_MODE_DPMS_SUSPEND:
3338                 i9xx_crtc_enable(crtc);
3339                 break;
3340         case DRM_MODE_DPMS_OFF:
3341                 i9xx_crtc_disable(crtc);
3342                 break;
3343         }
3344 }
3345
3346 /**
3347  * Sets the power management mode of the pipe and plane.
3348  */
3349 static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
3350 {
3351         struct drm_device *dev = crtc->dev;
3352         struct drm_i915_private *dev_priv = dev->dev_private;
3353         struct drm_i915_master_private *master_priv;
3354         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3355         int pipe = intel_crtc->pipe;
3356         bool enabled;
3357
3358         if (intel_crtc->dpms_mode == mode)
3359                 return;
3360
3361         intel_crtc->dpms_mode = mode;
3362
3363         dev_priv->display.dpms(crtc, mode);
3364
3365         if (!dev->primary->master)
3366                 return;
3367
3368         master_priv = dev->primary->master->driver_priv;
3369         if (!master_priv->sarea_priv)
3370                 return;
3371
3372         enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
3373
3374         switch (pipe) {
3375         case 0:
3376                 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
3377                 master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
3378                 break;
3379         case 1:
3380                 master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
3381                 master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
3382                 break;
3383         default:
3384                 DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
3385                 break;
3386         }
3387 }
3388
3389 static void intel_crtc_disable(struct drm_crtc *crtc)
3390 {
3391         struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
3392         struct drm_device *dev = crtc->dev;
3393
3394         crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
3395         assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
3396         assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
3397
3398         if (crtc->fb) {
3399                 mutex_lock(&dev->struct_mutex);
3400                 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
3401                 mutex_unlock(&dev->struct_mutex);
3402         }
3403 }
3404
3405 /* Prepare for a mode set.
3406  *
3407  * Note we could be a lot smarter here.  We need to figure out which outputs
3408  * will be enabled, which disabled (in short, how the config will changes)
3409  * and perform the minimum necessary steps to accomplish that, e.g. updating
3410  * watermarks, FBC configuration, making sure PLLs are programmed correctly,
3411  * panel fitting is in the proper state, etc.
3412  */
3413 static void i9xx_crtc_prepare(struct drm_crtc *crtc)
3414 {
3415         i9xx_crtc_disable(crtc);
3416 }
3417
3418 static void i9xx_crtc_commit(struct drm_crtc *crtc)
3419 {
3420         i9xx_crtc_enable(crtc);
3421 }
3422
3423 static void ironlake_crtc_prepare(struct drm_crtc *crtc)
3424 {
3425         ironlake_crtc_disable(crtc);
3426 }
3427
3428 static void ironlake_crtc_commit(struct drm_crtc *crtc)
3429 {
3430         ironlake_crtc_enable(crtc);
3431 }
3432
3433 void intel_encoder_prepare(struct drm_encoder *encoder)
3434 {
3435         struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3436         /* lvds has its own version of prepare see intel_lvds_prepare */
3437         encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
3438 }
3439
3440 void intel_encoder_commit(struct drm_encoder *encoder)
3441 {
3442         struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3443         struct drm_device *dev = encoder->dev;
3444         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3445         struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
3446
3447         /* lvds has its own version of commit see intel_lvds_commit */
3448         encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
3449
3450         if (HAS_PCH_CPT(dev))
3451                 intel_cpt_verify_modeset(dev, intel_crtc->pipe);
3452 }
3453
3454 void intel_encoder_destroy(struct drm_encoder *encoder)
3455 {
3456         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3457
3458         drm_encoder_cleanup(encoder);
3459         kfree(intel_encoder);
3460 }
3461
3462 static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
3463                                   struct drm_display_mode *mode,
3464                                   struct drm_display_mode *adjusted_mode)
3465 {
3466         struct drm_device *dev = crtc->dev;
3467
3468         if (HAS_PCH_SPLIT(dev)) {
3469                 /* FDI link clock is fixed at 2.7G */
3470                 if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
3471                         return false;
3472         }
3473
3474         /* All interlaced capable intel hw wants timings in frames. Note though
3475          * that intel_lvds_mode_fixup does some funny tricks with the crtc
3476          * timings, so we need to be careful not to clobber these.*/
3477         if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
3478                 drm_mode_set_crtcinfo(adjusted_mode, 0);
3479
3480         return true;
3481 }
3482
3483 static int i945_get_display_clock_speed(struct drm_device *dev)
3484 {
3485         return 400000;
3486 }
3487
3488 static int i915_get_display_clock_speed(struct drm_device *dev)
3489 {
3490         return 333000;
3491 }
3492
3493 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
3494 {
3495         return 200000;
3496 }
3497
3498 static int i915gm_get_display_clock_speed(struct drm_device *dev)
3499 {
3500         u16 gcfgc = 0;
3501
3502         pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3503
3504         if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
3505                 return 133000;
3506         else {
3507                 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
3508                 case GC_DISPLAY_CLOCK_333_MHZ:
3509                         return 333000;
3510                 default:
3511                 case GC_DISPLAY_CLOCK_190_200_MHZ:
3512                         return 190000;
3513                 }
3514         }
3515 }
3516
3517 static int i865_get_display_clock_speed(struct drm_device *dev)
3518 {
3519         return 266000;
3520 }
3521
3522 static int i855_get_display_clock_speed(struct drm_device *dev)
3523 {
3524         u16 hpllcc = 0;
3525         /* Assume that the hardware is in the high speed state.  This
3526          * should be the default.
3527          */
3528         switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
3529         case GC_CLOCK_133_200:
3530         case GC_CLOCK_100_200:
3531                 return 200000;
3532         case GC_CLOCK_166_250:
3533                 return 250000;
3534         case GC_CLOCK_100_133:
3535                 return 133000;
3536         }
3537
3538         /* Shouldn't happen */
3539         return 0;
3540 }
3541
3542 static int i830_get_display_clock_speed(struct drm_device *dev)
3543 {
3544         return 133000;
3545 }
3546
3547 struct fdi_m_n {
3548         u32        tu;
3549         u32        gmch_m;
3550         u32        gmch_n;
3551         u32        link_m;
3552         u32        link_n;
3553 };
3554
3555 static void
3556 fdi_reduce_ratio(u32 *num, u32 *den)
3557 {
3558         while (*num > 0xffffff || *den > 0xffffff) {
3559                 *num >>= 1;
3560                 *den >>= 1;
3561         }
3562 }
3563
3564 static void
3565 ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
3566                      int link_clock, struct fdi_m_n *m_n)
3567 {
3568         m_n->tu = 64; /* default size */
3569
3570         /* BUG_ON(pixel_clock > INT_MAX / 36); */
3571         m_n->gmch_m = bits_per_pixel * pixel_clock;
3572         m_n->gmch_n = link_clock * nlanes * 8;
3573         fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
3574
3575         m_n->link_m = pixel_clock;
3576         m_n->link_n = link_clock;
3577         fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
3578 }
3579
3580
3581 struct intel_watermark_params {
3582         unsigned long fifo_size;
3583         unsigned long max_wm;
3584         unsigned long default_wm;
3585         unsigned long guard_size;
3586         unsigned long cacheline_size;
3587 };
3588
3589 /* Pineview has different values for various configs */
3590 static const struct intel_watermark_params pineview_display_wm = {
3591         PINEVIEW_DISPLAY_FIFO,
3592         PINEVIEW_MAX_WM,
3593         PINEVIEW_DFT_WM,
3594         PINEVIEW_GUARD_WM,
3595         PINEVIEW_FIFO_LINE_SIZE
3596 };
3597 static const struct intel_watermark_params pineview_display_hplloff_wm = {
3598         PINEVIEW_DISPLAY_FIFO,
3599         PINEVIEW_MAX_WM,
3600         PINEVIEW_DFT_HPLLOFF_WM,
3601         PINEVIEW_GUARD_WM,
3602         PINEVIEW_FIFO_LINE_SIZE
3603 };
3604 static const struct intel_watermark_params pineview_cursor_wm = {
3605         PINEVIEW_CURSOR_FIFO,
3606         PINEVIEW_CURSOR_MAX_WM,
3607         PINEVIEW_CURSOR_DFT_WM,
3608         PINEVIEW_CURSOR_GUARD_WM,
3609         PINEVIEW_FIFO_LINE_SIZE,
3610 };
3611 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
3612         PINEVIEW_CURSOR_FIFO,
3613         PINEVIEW_CURSOR_MAX_WM,
3614         PINEVIEW_CURSOR_DFT_WM,
3615         PINEVIEW_CURSOR_GUARD_WM,
3616         PINEVIEW_FIFO_LINE_SIZE
3617 };
3618 static const struct intel_watermark_params g4x_wm_info = {
3619         G4X_FIFO_SIZE,
3620         G4X_MAX_WM,
3621         G4X_MAX_WM,
3622         2,
3623         G4X_FIFO_LINE_SIZE,
3624 };
3625 static const struct intel_watermark_params g4x_cursor_wm_info = {
3626         I965_CURSOR_FIFO,
3627         I965_CURSOR_MAX_WM,
3628         I965_CURSOR_DFT_WM,
3629         2,
3630         G4X_FIFO_LINE_SIZE,
3631 };
3632 static const struct intel_watermark_params i965_cursor_wm_info = {
3633         I965_CURSOR_FIFO,
3634         I965_CURSOR_MAX_WM,
3635         I965_CURSOR_DFT_WM,
3636         2,
3637         I915_FIFO_LINE_SIZE,
3638 };
3639 static const struct intel_watermark_params i945_wm_info = {
3640         I945_FIFO_SIZE,
3641         I915_MAX_WM,
3642         1,
3643         2,
3644         I915_FIFO_LINE_SIZE
3645 };
3646 static const struct intel_watermark_params i915_wm_info = {
3647         I915_FIFO_SIZE,
3648         I915_MAX_WM,
3649         1,
3650         2,
3651         I915_FIFO_LINE_SIZE
3652 };
3653 static const struct intel_watermark_params i855_wm_info = {
3654         I855GM_FIFO_SIZE,
3655         I915_MAX_WM,
3656         1,
3657         2,
3658         I830_FIFO_LINE_SIZE
3659 };
3660 static const struct intel_watermark_params i830_wm_info = {
3661         I830_FIFO_SIZE,
3662         I915_MAX_WM,
3663         1,
3664         2,
3665         I830_FIFO_LINE_SIZE
3666 };
3667
3668 static const struct intel_watermark_params ironlake_display_wm_info = {
3669         ILK_DISPLAY_FIFO,
3670         ILK_DISPLAY_MAXWM,
3671         ILK_DISPLAY_DFTWM,
3672         2,
3673         ILK_FIFO_LINE_SIZE
3674 };
3675 static const struct intel_watermark_params ironlake_cursor_wm_info = {
3676         ILK_CURSOR_FIFO,
3677         ILK_CURSOR_MAXWM,
3678         ILK_CURSOR_DFTWM,
3679         2,
3680         ILK_FIFO_LINE_SIZE
3681 };
3682 static const struct intel_watermark_params ironlake_display_srwm_info = {
3683         ILK_DISPLAY_SR_FIFO,
3684         ILK_DISPLAY_MAX_SRWM,
3685         ILK_DISPLAY_DFT_SRWM,
3686         2,
3687         ILK_FIFO_LINE_SIZE
3688 };
3689 static const struct intel_watermark_params ironlake_cursor_srwm_info = {
3690         ILK_CURSOR_SR_FIFO,
3691         ILK_CURSOR_MAX_SRWM,
3692         ILK_CURSOR_DFT_SRWM,
3693         2,
3694         ILK_FIFO_LINE_SIZE
3695 };
3696
3697 static const struct intel_watermark_params sandybridge_display_wm_info = {
3698         SNB_DISPLAY_FIFO,
3699         SNB_DISPLAY_MAXWM,
3700         SNB_DISPLAY_DFTWM,
3701         2,
3702         SNB_FIFO_LINE_SIZE
3703 };
3704 static const struct intel_watermark_params sandybridge_cursor_wm_info = {
3705         SNB_CURSOR_FIFO,
3706         SNB_CURSOR_MAXWM,
3707         SNB_CURSOR_DFTWM,
3708         2,
3709         SNB_FIFO_LINE_SIZE
3710 };
3711 static const struct intel_watermark_params sandybridge_display_srwm_info = {
3712         SNB_DISPLAY_SR_FIFO,
3713         SNB_DISPLAY_MAX_SRWM,
3714         SNB_DISPLAY_DFT_SRWM,
3715         2,
3716         SNB_FIFO_LINE_SIZE
3717 };
3718 static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
3719         SNB_CURSOR_SR_FIFO,
3720         SNB_CURSOR_MAX_SRWM,
3721         SNB_CURSOR_DFT_SRWM,
3722         2,
3723         SNB_FIFO_LINE_SIZE
3724 };
3725
3726
3727 /**
3728  * intel_calculate_wm - calculate watermark level
3729  * @clock_in_khz: pixel clock
3730  * @wm: chip FIFO params
3731  * @pixel_size: display pixel size
3732  * @latency_ns: memory latency for the platform
3733  *
3734  * Calculate the watermark level (the level at which the display plane will
3735  * start fetching from memory again).  Each chip has a different display
3736  * FIFO size and allocation, so the caller needs to figure that out and pass
3737  * in the correct intel_watermark_params structure.
3738  *
3739  * As the pixel clock runs, the FIFO will be drained at a rate that depends
3740  * on the pixel size.  When it reaches the watermark level, it'll start
3741  * fetching FIFO line sized based chunks from memory until the FIFO fills
3742  * past the watermark point.  If the FIFO drains completely, a FIFO underrun
3743  * will occur, and a display engine hang could result.
3744  */
3745 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
3746                                         const struct intel_watermark_params *wm,
3747                                         int fifo_size,
3748                                         int pixel_size,
3749                                         unsigned long latency_ns)
3750 {
3751         long entries_required, wm_size;
3752
3753         /*
3754          * Note: we need to make sure we don't overflow for various clock &
3755          * latency values.
3756          * clocks go from a few thousand to several hundred thousand.
3757          * latency is usually a few thousand
3758          */
3759         entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
3760                 1000;
3761         entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
3762
3763         DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
3764
3765         wm_size = fifo_size - (entries_required + wm->guard_size);
3766
3767         DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
3768
3769         /* Don't promote wm_size to unsigned... */
3770         if (wm_size > (long)wm->max_wm)
3771                 wm_size = wm->max_wm;
3772         if (wm_size <= 0)
3773                 wm_size = wm->default_wm;
3774         return wm_size;
3775 }
3776
3777 struct cxsr_latency {
3778         int is_desktop;
3779         int is_ddr3;
3780         unsigned long fsb_freq;
3781         unsigned long mem_freq;
3782         unsigned long display_sr;
3783         unsigned long display_hpll_disable;
3784         unsigned long cursor_sr;
3785         unsigned long cursor_hpll_disable;
3786 };
3787
3788 static const struct cxsr_latency cxsr_latency_table[] = {
3789         {1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
3790         {1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
3791         {1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
3792         {1, 1, 800, 667, 6420, 36420, 6873, 36873},    /* DDR3-667 SC */
3793         {1, 1, 800, 800, 5902, 35902, 6318, 36318},    /* DDR3-800 SC */
3794
3795         {1, 0, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
3796         {1, 0, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
3797         {1, 0, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
3798         {1, 1, 667, 667, 6438, 36438, 6911, 36911},    /* DDR3-667 SC */
3799         {1, 1, 667, 800, 5941, 35941, 6377, 36377},    /* DDR3-800 SC */
3800
3801         {1, 0, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
3802         {1, 0, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
3803         {1, 0, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
3804         {1, 1, 400, 667, 6509, 36509, 7062, 37062},    /* DDR3-667 SC */
3805         {1, 1, 400, 800, 5985, 35985, 6501, 36501},    /* DDR3-800 SC */
3806
3807         {0, 0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
3808         {0, 0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
3809         {0, 0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
3810         {0, 1, 800, 667, 6476, 36476, 6955, 36955},    /* DDR3-667 SC */
3811         {0, 1, 800, 800, 5958, 35958, 6400, 36400},    /* DDR3-800 SC */
3812
3813         {0, 0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
3814         {0, 0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
3815         {0, 0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
3816         {0, 1, 667, 667, 6494, 36494, 6993, 36993},    /* DDR3-667 SC */
3817         {0, 1, 667, 800, 5998, 35998, 6460, 36460},    /* DDR3-800 SC */
3818
3819         {0, 0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
3820         {0, 0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
3821         {0, 0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
3822         {0, 1, 400, 667, 6566, 36566, 7145, 37145},    /* DDR3-667 SC */
3823         {0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
3824 };
3825
3826 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
3827                                                          int is_ddr3,
3828                                                          int fsb,
3829                                                          int mem)
3830 {
3831         const struct cxsr_latency *latency;
3832         int i;
3833
3834         if (fsb == 0 || mem == 0)
3835                 return NULL;
3836
3837         for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
3838                 latency = &cxsr_latency_table[i];
3839                 if (is_desktop == latency->is_desktop &&
3840                     is_ddr3 == latency->is_ddr3 &&
3841                     fsb == latency->fsb_freq && mem == latency->mem_freq)
3842                         return latency;
3843         }
3844
3845         DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3846
3847         return NULL;
3848 }
3849
3850 static void pineview_disable_cxsr(struct drm_device *dev)
3851 {
3852         struct drm_i915_private *dev_priv = dev->dev_private;
3853
3854         /* deactivate cxsr */
3855         I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
3856 }
3857
3858 /*
3859  * Latency for FIFO fetches is dependent on several factors:
3860  *   - memory configuration (speed, channels)
3861  *   - chipset
3862  *   - current MCH state
3863  * It can be fairly high in some situations, so here we assume a fairly
3864  * pessimal value.  It's a tradeoff between extra memory fetches (if we
3865  * set this value too high, the FIFO will fetch frequently to stay full)
3866  * and power consumption (set it too low to save power and we might see
3867  * FIFO underruns and display "flicker").
3868  *
3869  * A value of 5us seems to be a good balance; safe for very low end
3870  * platforms but not overly aggressive on lower latency configs.
3871  */
3872 static const int latency_ns = 5000;
3873
3874 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
3875 {
3876         struct drm_i915_private *dev_priv = dev->dev_private;
3877         uint32_t dsparb = I915_READ(DSPARB);
3878         int size;
3879
3880         size = dsparb & 0x7f;
3881         if (plane)
3882                 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
3883
3884         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3885                       plane ? "B" : "A", size);
3886
3887         return size;
3888 }
3889
3890 static int i85x_get_fifo_size(struct drm_device *dev, int plane)
3891 {
3892         struct drm_i915_private *dev_priv = dev->dev_private;
3893         uint32_t dsparb = I915_READ(DSPARB);
3894         int size;
3895
3896         size = dsparb & 0x1ff;
3897         if (plane)
3898                 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
3899         size >>= 1; /* Convert to cachelines */
3900
3901         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3902                       plane ? "B" : "A", size);
3903
3904         return size;
3905 }
3906
3907 static int i845_get_fifo_size(struct drm_device *dev, int plane)
3908 {
3909         struct drm_i915_private *dev_priv = dev->dev_private;
3910         uint32_t dsparb = I915_READ(DSPARB);
3911         int size;
3912
3913         size = dsparb & 0x7f;
3914         size >>= 2; /* Convert to cachelines */
3915
3916         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3917                       plane ? "B" : "A",
3918                       size);
3919
3920         return size;
3921 }
3922
3923 static int i830_get_fifo_size(struct drm_device *dev, int plane)
3924 {
3925         struct drm_i915_private *dev_priv = dev->dev_private;
3926         uint32_t dsparb = I915_READ(DSPARB);
3927         int size;
3928
3929         size = dsparb & 0x7f;
3930         size >>= 1; /* Convert to cachelines */
3931
3932         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3933                       plane ? "B" : "A", size);
3934
3935         return size;
3936 }
3937
3938 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
3939 {
3940         struct drm_crtc *crtc, *enabled = NULL;
3941
3942         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3943                 if (crtc->enabled && crtc->fb) {
3944                         if (enabled)
3945                                 return NULL;
3946                         enabled = crtc;
3947                 }
3948         }
3949
3950         return enabled;
3951 }
3952
3953 static void pineview_update_wm(struct drm_device *dev)
3954 {
3955         struct drm_i915_private *dev_priv = dev->dev_private;
3956         struct drm_crtc *crtc;
3957         const struct cxsr_latency *latency;
3958         u32 reg;
3959         unsigned long wm;
3960
3961         latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
3962                                          dev_priv->fsb_freq, dev_priv->mem_freq);
3963         if (!latency) {
3964                 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3965                 pineview_disable_cxsr(dev);
3966                 return;
3967         }
3968
3969         crtc = single_enabled_crtc(dev);
3970         if (crtc) {
3971                 int clock = crtc->mode.clock;
3972                 int pixel_size = crtc->fb->bits_per_pixel / 8;
3973
3974                 /* Display SR */
3975                 wm = intel_calculate_wm(clock, &pineview_display_wm,
3976                                         pineview_display_wm.fifo_size,
3977                                         pixel_size, latency->display_sr);
3978                 reg = I915_READ(DSPFW1);
3979                 reg &= ~DSPFW_SR_MASK;
3980                 reg |= wm << DSPFW_SR_SHIFT;
3981                 I915_WRITE(DSPFW1, reg);
3982                 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
3983
3984                 /* cursor SR */
3985                 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
3986                                         pineview_display_wm.fifo_size,
3987                                         pixel_size, latency->cursor_sr);
3988                 reg = I915_READ(DSPFW3);
3989                 reg &= ~DSPFW_CURSOR_SR_MASK;
3990                 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
3991                 I915_WRITE(DSPFW3, reg);
3992
3993                 /* Display HPLL off SR */
3994                 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
3995                                         pineview_display_hplloff_wm.fifo_size,
3996                                         pixel_size, latency->display_hpll_disable);
3997                 reg = I915_READ(DSPFW3);
3998                 reg &= ~DSPFW_HPLL_SR_MASK;
3999                 reg |= wm & DSPFW_HPLL_SR_MASK;
4000                 I915_WRITE(DSPFW3, reg);
4001
4002                 /* cursor HPLL off SR */
4003                 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
4004                                         pineview_display_hplloff_wm.fifo_size,
4005                                         pixel_size, latency->cursor_hpll_disable);
4006                 reg = I915_READ(DSPFW3);
4007                 reg &= ~DSPFW_HPLL_CURSOR_MASK;
4008                 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
4009                 I915_WRITE(DSPFW3, reg);
4010                 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
4011
4012                 /* activate cxsr */
4013                 I915_WRITE(DSPFW3,
4014                            I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
4015                 DRM_DEBUG_KMS("Self-refresh is enabled\n");
4016         } else {
4017                 pineview_disable_cxsr(dev);
4018                 DRM_DEBUG_KMS("Self-refresh is disabled\n");
4019         }
4020 }
4021
4022 static bool g4x_compute_wm0(struct drm_device *dev,
4023                             int plane,
4024                             const struct intel_watermark_params *display,
4025                             int display_latency_ns,
4026                             const struct intel_watermark_params *cursor,
4027                             int cursor_latency_ns,
4028                             int *plane_wm,
4029                             int *cursor_wm)
4030 {
4031         struct drm_crtc *crtc;
4032         int htotal, hdisplay, clock, pixel_size;
4033         int line_time_us, line_count;
4034         int entries, tlb_miss;
4035
4036         crtc = intel_get_crtc_for_plane(dev, plane);
4037         if (crtc->fb == NULL || !crtc->enabled) {
4038                 *cursor_wm = cursor->guard_size;
4039                 *plane_wm = display->guard_size;
4040                 return false;
4041         }
4042
4043         htotal = crtc->mode.htotal;
4044         hdisplay = crtc->mode.hdisplay;
4045         clock = crtc->mode.clock;
4046         pixel_size = crtc->fb->bits_per_pixel / 8;
4047
4048         /* Use the small buffer method to calculate plane watermark */
4049         entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
4050         tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
4051         if (tlb_miss > 0)
4052                 entries += tlb_miss;
4053         entries = DIV_ROUND_UP(entries, display->cacheline_size);
4054         *plane_wm = entries + display->guard_size;
4055         if (*plane_wm > (int)display->max_wm)
4056                 *plane_wm = display->max_wm;
4057
4058         /* Use the large buffer method to calculate cursor watermark */
4059         line_time_us = ((htotal * 1000) / clock);
4060         line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
4061         entries = line_count * 64 * pixel_size;
4062         tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
4063         if (tlb_miss > 0)
4064                 entries += tlb_miss;
4065         entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4066         *cursor_wm = entries + cursor->guard_size;
4067         if (*cursor_wm > (int)cursor->max_wm)
4068                 *cursor_wm = (int)cursor->max_wm;
4069
4070         return true;
4071 }
4072
4073 /*
4074  * Check the wm result.
4075  *
4076  * If any calculated watermark values is larger than the maximum value that
4077  * can be programmed into the associated watermark register, that watermark
4078  * must be disabled.
4079  */
4080 static bool g4x_check_srwm(struct drm_device *dev,
4081                            int display_wm, int cursor_wm,
4082                            const struct intel_watermark_params *display,
4083                            const struct intel_watermark_params *cursor)
4084 {
4085         DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
4086                       display_wm, cursor_wm);
4087
4088         if (display_wm > display->max_wm) {
4089                 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
4090                               display_wm, display->max_wm);
4091                 return false;
4092         }
4093
4094         if (cursor_wm > cursor->max_wm) {
4095                 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
4096                               cursor_wm, cursor->max_wm);
4097                 return false;
4098         }
4099
4100         if (!(display_wm || cursor_wm)) {
4101                 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
4102                 return false;
4103         }
4104
4105         return true;
4106 }
4107
4108 static bool g4x_compute_srwm(struct drm_device *dev,
4109                              int plane,
4110                              int latency_ns,
4111                              const struct intel_watermark_params *display,
4112                              const struct intel_watermark_params *cursor,
4113                              int *display_wm, int *cursor_wm)
4114 {
4115         struct drm_crtc *crtc;
4116         int hdisplay, htotal, pixel_size, clock;
4117         unsigned long line_time_us;
4118         int line_count, line_size;
4119         int small, large;
4120         int entries;
4121
4122         if (!latency_ns) {
4123                 *display_wm = *cursor_wm = 0;
4124                 return false;
4125         }
4126
4127         crtc = intel_get_crtc_for_plane(dev, plane);
4128         hdisplay = crtc->mode.hdisplay;
4129         htotal = crtc->mode.htotal;
4130         clock = crtc->mode.clock;
4131         pixel_size = crtc->fb->bits_per_pixel / 8;
4132
4133         line_time_us = (htotal * 1000) / clock;
4134         line_count = (latency_ns / line_time_us + 1000) / 1000;
4135         line_size = hdisplay * pixel_size;
4136
4137         /* Use the minimum of the small and large buffer method for primary */
4138         small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4139         large = line_count * line_size;
4140
4141         entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4142         *display_wm = entries + display->guard_size;
4143
4144         /* calculate the self-refresh watermark for display cursor */
4145         entries = line_count * pixel_size * 64;
4146         entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4147         *cursor_wm = entries + cursor->guard_size;
4148
4149         return g4x_check_srwm(dev,
4150                               *display_wm, *cursor_wm,
4151                               display, cursor);
4152 }
4153
4154 #define single_plane_enabled(mask) is_power_of_2(mask)
4155
4156 static void g4x_update_wm(struct drm_device *dev)
4157 {
4158         static const int sr_latency_ns = 12000;
4159         struct drm_i915_private *dev_priv = dev->dev_private;
4160         int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
4161         int plane_sr, cursor_sr;
4162         unsigned int enabled = 0;
4163
4164         if (g4x_compute_wm0(dev, 0,
4165                             &g4x_wm_info, latency_ns,
4166                             &g4x_cursor_wm_info, latency_ns,
4167                             &planea_wm, &cursora_wm))
4168                 enabled |= 1;
4169
4170         if (g4x_compute_wm0(dev, 1,
4171                             &g4x_wm_info, latency_ns,
4172                             &g4x_cursor_wm_info, latency_ns,
4173                             &planeb_wm, &cursorb_wm))
4174                 enabled |= 2;
4175
4176         plane_sr = cursor_sr = 0;
4177         if (single_plane_enabled(enabled) &&
4178             g4x_compute_srwm(dev, ffs(enabled) - 1,
4179                              sr_latency_ns,
4180                              &g4x_wm_info,
4181                              &g4x_cursor_wm_info,
4182                              &plane_sr, &cursor_sr))
4183                 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4184         else
4185                 I915_WRITE(FW_BLC_SELF,
4186                            I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
4187
4188         DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
4189                       planea_wm, cursora_wm,
4190                       planeb_wm, cursorb_wm,
4191                       plane_sr, cursor_sr);
4192
4193         I915_WRITE(DSPFW1,
4194                    (plane_sr << DSPFW_SR_SHIFT) |
4195                    (cursorb_wm << DSPFW_CURSORB_SHIFT) |
4196                    (planeb_wm << DSPFW_PLANEB_SHIFT) |
4197                    planea_wm);
4198         I915_WRITE(DSPFW2,
4199                    (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
4200                    (cursora_wm << DSPFW_CURSORA_SHIFT));
4201         /* HPLL off in SR has some issues on G4x... disable it */
4202         I915_WRITE(DSPFW3,
4203                    (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
4204                    (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4205 }
4206
4207 static void i965_update_wm(struct drm_device *dev)
4208 {
4209         struct drm_i915_private *dev_priv = dev->dev_private;
4210         struct drm_crtc *crtc;
4211         int srwm = 1;
4212         int cursor_sr = 16;
4213
4214         /* Calc sr entries for one plane configs */
4215         crtc = single_enabled_crtc(dev);
4216         if (crtc) {
4217                 /* self-refresh has much higher latency */
4218                 static const int sr_latency_ns = 12000;
4219                 int clock = crtc->mode.clock;
4220                 int htotal = crtc->mode.htotal;
4221                 int hdisplay = crtc->mode.hdisplay;
4222                 int pixel_size = crtc->fb->bits_per_pixel / 8;
4223                 unsigned long line_time_us;
4224                 int entries;
4225
4226                 line_time_us = ((htotal * 1000) / clock);
4227
4228                 /* Use ns/us then divide to preserve precision */
4229                 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4230                         pixel_size * hdisplay;
4231                 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
4232                 srwm = I965_FIFO_SIZE - entries;
4233                 if (srwm < 0)
4234                         srwm = 1;
4235                 srwm &= 0x1ff;
4236                 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
4237                               entries, srwm);
4238
4239                 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4240                         pixel_size * 64;
4241                 entries = DIV_ROUND_UP(entries,
4242                                           i965_cursor_wm_info.cacheline_size);
4243                 cursor_sr = i965_cursor_wm_info.fifo_size -
4244                         (entries + i965_cursor_wm_info.guard_size);
4245
4246                 if (cursor_sr > i965_cursor_wm_info.max_wm)
4247                         cursor_sr = i965_cursor_wm_info.max_wm;
4248
4249                 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
4250                               "cursor %d\n", srwm, cursor_sr);
4251
4252                 if (IS_CRESTLINE(dev))
4253                         I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4254         } else {
4255                 /* Turn off self refresh if both pipes are enabled */
4256                 if (IS_CRESTLINE(dev))
4257                         I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
4258                                    & ~FW_BLC_SELF_EN);
4259         }
4260
4261         DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
4262                       srwm);
4263
4264         /* 965 has limitations... */
4265         I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
4266                    (8 << 16) | (8 << 8) | (8 << 0));
4267         I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
4268         /* update cursor SR watermark */
4269         I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4270 }
4271
4272 static void i9xx_update_wm(struct drm_device *dev)
4273 {
4274         struct drm_i915_private *dev_priv = dev->dev_private;
4275         const struct intel_watermark_params *wm_info;
4276         uint32_t fwater_lo;
4277         uint32_t fwater_hi;
4278         int cwm, srwm = 1;
4279         int fifo_size;
4280         int planea_wm, planeb_wm;
4281         struct drm_crtc *crtc, *enabled = NULL;
4282
4283         if (IS_I945GM(dev))
4284                 wm_info = &i945_wm_info;
4285         else if (!IS_GEN2(dev))
4286                 wm_info = &i915_wm_info;
4287         else
4288                 wm_info = &i855_wm_info;
4289
4290         fifo_size = dev_priv->display.get_fifo_size(dev, 0);
4291         crtc = intel_get_crtc_for_plane(dev, 0);
4292         if (crtc->enabled && crtc->fb) {
4293                 planea_wm = intel_calculate_wm(crtc->mode.clock,
4294                                                wm_info, fifo_size,
4295                                                crtc->fb->bits_per_pixel / 8,
4296                                                latency_ns);
4297                 enabled = crtc;
4298         } else
4299                 planea_wm = fifo_size - wm_info->guard_size;
4300
4301         fifo_size = dev_priv->display.get_fifo_size(dev, 1);
4302         crtc = intel_get_crtc_for_plane(dev, 1);
4303         if (crtc->enabled && crtc->fb) {
4304                 planeb_wm = intel_calculate_wm(crtc->mode.clock,
4305                                                wm_info, fifo_size,
4306                                                crtc->fb->bits_per_pixel / 8,
4307                                                latency_ns);
4308                 if (enabled == NULL)
4309                         enabled = crtc;
4310                 else
4311                         enabled = NULL;
4312         } else
4313                 planeb_wm = fifo_size - wm_info->guard_size;
4314
4315         DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
4316
4317         /*
4318          * Overlay gets an aggressive default since video jitter is bad.
4319          */
4320         cwm = 2;
4321
4322         /* Play safe and disable self-refresh before adjusting watermarks. */
4323         if (IS_I945G(dev) || IS_I945GM(dev))
4324                 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
4325         else if (IS_I915GM(dev))
4326                 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
4327
4328         /* Calc sr entries for one plane configs */
4329         if (HAS_FW_BLC(dev) && enabled) {
4330                 /* self-refresh has much higher latency */
4331                 static const int sr_latency_ns = 6000;
4332                 int clock = enabled->mode.clock;
4333                 int htotal = enabled->mode.htotal;
4334                 int hdisplay = enabled->mode.hdisplay;
4335                 int pixel_size = enabled->fb->bits_per_pixel / 8;
4336                 unsigned long line_time_us;
4337                 int entries;
4338
4339                 line_time_us = (htotal * 1000) / clock;
4340
4341                 /* Use ns/us then divide to preserve precision */
4342                 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4343                         pixel_size * hdisplay;
4344                 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
4345                 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
4346                 srwm = wm_info->fifo_size - entries;
4347                 if (srwm < 0)
4348                         srwm = 1;
4349
4350                 if (IS_I945G(dev) || IS_I945GM(dev))
4351                         I915_WRITE(FW_BLC_SELF,
4352                                    FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
4353                 else if (IS_I915GM(dev))
4354                         I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
4355         }
4356
4357         DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
4358                       planea_wm, planeb_wm, cwm, srwm);
4359
4360         fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
4361         fwater_hi = (cwm & 0x1f);
4362
4363         /* Set request length to 8 cachelines per fetch */
4364         fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
4365         fwater_hi = fwater_hi | (1 << 8);
4366
4367         I915_WRITE(FW_BLC, fwater_lo);
4368         I915_WRITE(FW_BLC2, fwater_hi);
4369
4370         if (HAS_FW_BLC(dev)) {
4371                 if (enabled) {
4372                         if (IS_I945G(dev) || IS_I945GM(dev))
4373                                 I915_WRITE(FW_BLC_SELF,
4374                                            FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
4375                         else if (IS_I915GM(dev))
4376                                 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
4377                         DRM_DEBUG_KMS("memory self refresh enabled\n");
4378                 } else
4379                         DRM_DEBUG_KMS("memory self refresh disabled\n");
4380         }
4381 }
4382
4383 static void i830_update_wm(struct drm_device *dev)
4384 {
4385         struct drm_i915_private *dev_priv = dev->dev_private;
4386         struct drm_crtc *crtc;
4387         uint32_t fwater_lo;
4388         int planea_wm;
4389
4390         crtc = single_enabled_crtc(dev);
4391         if (crtc == NULL)
4392                 return;
4393
4394         planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
4395                                        dev_priv->display.get_fifo_size(dev, 0),
4396                                        crtc->fb->bits_per_pixel / 8,
4397                                        latency_ns);
4398         fwater_lo = I915_READ(FW_BLC) & ~0xfff;
4399         fwater_lo |= (3<<8) | planea_wm;
4400
4401         DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
4402
4403         I915_WRITE(FW_BLC, fwater_lo);
4404 }
4405
4406 #define ILK_LP0_PLANE_LATENCY           700
4407 #define ILK_LP0_CURSOR_LATENCY          1300
4408
4409 /*
4410  * Check the wm result.
4411  *
4412  * If any calculated watermark values is larger than the maximum value that
4413  * can be programmed into the associated watermark register, that watermark
4414  * must be disabled.
4415  */
4416 static bool ironlake_check_srwm(struct drm_device *dev, int level,
4417                                 int fbc_wm, int display_wm, int cursor_wm,
4418                                 const struct intel_watermark_params *display,
4419                                 const struct intel_watermark_params *cursor)
4420 {
4421         struct drm_i915_private *dev_priv = dev->dev_private;
4422
4423         DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
4424                       " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
4425
4426         if (fbc_wm > SNB_FBC_MAX_SRWM) {
4427                 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
4428                               fbc_wm, SNB_FBC_MAX_SRWM, level);
4429
4430                 /* fbc has it's own way to disable FBC WM */
4431                 I915_WRITE(DISP_ARB_CTL,
4432                            I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
4433                 return false;
4434         }
4435
4436         if (display_wm > display->max_wm) {
4437                 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
4438                               display_wm, SNB_DISPLAY_MAX_SRWM, level);
4439                 return false;
4440         }
4441
4442         if (cursor_wm > cursor->max_wm) {
4443                 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
4444                               cursor_wm, SNB_CURSOR_MAX_SRWM, level);
4445                 return false;
4446         }
4447
4448         if (!(fbc_wm || display_wm || cursor_wm)) {
4449                 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
4450                 return false;
4451         }
4452
4453         return true;
4454 }
4455
4456 /*
4457  * Compute watermark values of WM[1-3],
4458  */
4459 static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
4460                                   int latency_ns,
4461                                   const struct intel_watermark_params *display,
4462                                   const struct intel_watermark_params *cursor,
4463                                   int *fbc_wm, int *display_wm, int *cursor_wm)
4464 {
4465         struct drm_crtc *crtc;
4466         unsigned long line_time_us;
4467         int hdisplay, htotal, pixel_size, clock;
4468         int line_count, line_size;
4469         int small, large;
4470         int entries;
4471
4472         if (!latency_ns) {
4473                 *fbc_wm = *display_wm = *cursor_wm = 0;
4474                 return false;
4475         }
4476
4477         crtc = intel_get_crtc_for_plane(dev, plane);
4478         hdisplay = crtc->mode.hdisplay;
4479         htotal = crtc->mode.htotal;
4480         clock = crtc->mode.clock;
4481         pixel_size = crtc->fb->bits_per_pixel / 8;
4482
4483         line_time_us = (htotal * 1000) / clock;
4484         line_count = (latency_ns / line_time_us + 1000) / 1000;
4485         line_size = hdisplay * pixel_size;
4486
4487         /* Use the minimum of the small and large buffer method for primary */
4488         small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4489         large = line_count * line_size;
4490
4491         entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4492         *display_wm = entries + display->guard_size;
4493
4494         /*
4495          * Spec says:
4496          * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
4497          */
4498         *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
4499
4500         /* calculate the self-refresh watermark for display cursor */
4501         entries = line_count * pixel_size * 64;
4502         entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4503         *cursor_wm = entries + cursor->guard_size;
4504
4505         return ironlake_check_srwm(dev, level,
4506                                    *fbc_wm, *display_wm, *cursor_wm,
4507                                    display, cursor);
4508 }
4509
4510 static void ironlake_update_wm(struct drm_device *dev)
4511 {
4512         struct drm_i915_private *dev_priv = dev->dev_private;
4513         int fbc_wm, plane_wm, cursor_wm;
4514         unsigned int enabled;
4515
4516         enabled = 0;
4517         if (g4x_compute_wm0(dev, 0,
4518                             &ironlake_display_wm_info,
4519                             ILK_LP0_PLANE_LATENCY,
4520                             &ironlake_cursor_wm_info,
4521                             ILK_LP0_CURSOR_LATENCY,
4522                             &plane_wm, &cursor_wm)) {
4523                 I915_WRITE(WM0_PIPEA_ILK,
4524                            (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4525                 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4526                               " plane %d, " "cursor: %d\n",
4527                               plane_wm, cursor_wm);
4528                 enabled |= 1;
4529         }
4530
4531         if (g4x_compute_wm0(dev, 1,
4532                             &ironlake_display_wm_info,
4533                             ILK_LP0_PLANE_LATENCY,
4534                             &ironlake_cursor_wm_info,
4535                             ILK_LP0_CURSOR_LATENCY,
4536                             &plane_wm, &cursor_wm)) {
4537                 I915_WRITE(WM0_PIPEB_ILK,
4538                            (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4539                 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4540                               " plane %d, cursor: %d\n",
4541                               plane_wm, cursor_wm);
4542                 enabled |= 2;
4543         }
4544
4545         /*
4546          * Calculate and update the self-refresh watermark only when one
4547          * display plane is used.
4548          */
4549         I915_WRITE(WM3_LP_ILK, 0);
4550         I915_WRITE(WM2_LP_ILK, 0);
4551         I915_WRITE(WM1_LP_ILK, 0);
4552
4553         if (!single_plane_enabled(enabled))
4554                 return;
4555         enabled = ffs(enabled) - 1;
4556
4557         /* WM1 */
4558         if (!ironlake_compute_srwm(dev, 1, enabled,
4559                                    ILK_READ_WM1_LATENCY() * 500,
4560                                    &ironlake_display_srwm_info,
4561                                    &ironlake_cursor_srwm_info,
4562                                    &fbc_wm, &plane_wm, &cursor_wm))
4563                 return;
4564
4565         I915_WRITE(WM1_LP_ILK,
4566                    WM1_LP_SR_EN |
4567                    (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4568                    (fbc_wm << WM1_LP_FBC_SHIFT) |
4569                    (plane_wm << WM1_LP_SR_SHIFT) |
4570                    cursor_wm);
4571
4572         /* WM2 */
4573         if (!ironlake_compute_srwm(dev, 2, enabled,
4574                                    ILK_READ_WM2_LATENCY() * 500,
4575                                    &ironlake_display_srwm_info,
4576                                    &ironlake_cursor_srwm_info,
4577                                    &fbc_wm, &plane_wm, &cursor_wm))
4578                 return;
4579
4580         I915_WRITE(WM2_LP_ILK,
4581                    WM2_LP_EN |
4582                    (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4583                    (fbc_wm << WM1_LP_FBC_SHIFT) |
4584                    (plane_wm << WM1_LP_SR_SHIFT) |
4585                    cursor_wm);
4586
4587         /*
4588          * WM3 is unsupported on ILK, probably because we don't have latency
4589          * data for that power state
4590          */
4591 }
4592
4593 void sandybridge_update_wm(struct drm_device *dev)
4594 {
4595         struct drm_i915_private *dev_priv = dev->dev_private;
4596         int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
4597         u32 val;
4598         int fbc_wm, plane_wm, cursor_wm;
4599         unsigned int enabled;
4600
4601         enabled = 0;
4602         if (g4x_compute_wm0(dev, 0,
4603                             &sandybridge_display_wm_info, latency,
4604                             &sandybridge_cursor_wm_info, latency,
4605                             &plane_wm, &cursor_wm)) {
4606                 val = I915_READ(WM0_PIPEA_ILK);
4607                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4608                 I915_WRITE(WM0_PIPEA_ILK, val |
4609                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4610                 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4611                               " plane %d, " "cursor: %d\n",
4612                               plane_wm, cursor_wm);
4613                 enabled |= 1;
4614         }
4615
4616         if (g4x_compute_wm0(dev, 1,
4617                             &sandybridge_display_wm_info, latency,
4618                             &sandybridge_cursor_wm_info, latency,
4619                             &plane_wm, &cursor_wm)) {
4620                 val = I915_READ(WM0_PIPEB_ILK);
4621                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4622                 I915_WRITE(WM0_PIPEB_ILK, val |
4623                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4624                 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4625                               " plane %d, cursor: %d\n",
4626                               plane_wm, cursor_wm);
4627                 enabled |= 2;
4628         }
4629
4630         /* IVB has 3 pipes */
4631         if (IS_IVYBRIDGE(dev) &&
4632             g4x_compute_wm0(dev, 2,
4633                             &sandybridge_display_wm_info, latency,
4634                             &sandybridge_cursor_wm_info, latency,
4635                             &plane_wm, &cursor_wm)) {
4636                 val = I915_READ(WM0_PIPEC_IVB);
4637                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4638                 I915_WRITE(WM0_PIPEC_IVB, val |
4639                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4640                 DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
4641                               " plane %d, cursor: %d\n",
4642                               plane_wm, cursor_wm);
4643                 enabled |= 3;
4644         }
4645
4646         /*
4647          * Calculate and update the self-refresh watermark only when one
4648          * display plane is used.
4649          *
4650          * SNB support 3 levels of watermark.
4651          *
4652          * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
4653          * and disabled in the descending order
4654          *
4655          */
4656         I915_WRITE(WM3_LP_ILK, 0);
4657         I915_WRITE(WM2_LP_ILK, 0);
4658         I915_WRITE(WM1_LP_ILK, 0);
4659
4660         if (!single_plane_enabled(enabled) ||
4661             dev_priv->sprite_scaling_enabled)
4662                 return;
4663         enabled = ffs(enabled) - 1;
4664
4665         /* WM1 */
4666         if (!ironlake_compute_srwm(dev, 1, enabled,
4667                                    SNB_READ_WM1_LATENCY() * 500,
4668                                    &sandybridge_display_srwm_info,
4669                                    &sandybridge_cursor_srwm_info,
4670                                    &fbc_wm, &plane_wm, &cursor_wm))
4671                 return;
4672
4673         I915_WRITE(WM1_LP_ILK,
4674                    WM1_LP_SR_EN |
4675                    (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4676                    (fbc_wm << WM1_LP_FBC_SHIFT) |
4677                    (plane_wm << WM1_LP_SR_SHIFT) |
4678                    cursor_wm);
4679
4680         /* WM2 */
4681         if (!ironlake_compute_srwm(dev, 2, enabled,
4682                                    SNB_READ_WM2_LATENCY() * 500,
4683                                    &sandybridge_display_srwm_info,
4684                                    &sandybridge_cursor_srwm_info,
4685                                    &fbc_wm, &plane_wm, &cursor_wm))
4686                 return;
4687
4688         I915_WRITE(WM2_LP_ILK,
4689                    WM2_LP_EN |
4690                    (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4691                    (fbc_wm << WM1_LP_FBC_SHIFT) |
4692                    (plane_wm << WM1_LP_SR_SHIFT) |
4693                    cursor_wm);
4694
4695         /* WM3 */
4696         if (!ironlake_compute_srwm(dev, 3, enabled,
4697                                    SNB_READ_WM3_LATENCY() * 500,
4698                                    &sandybridge_display_srwm_info,
4699                                    &sandybridge_cursor_srwm_info,
4700                                    &fbc_wm, &plane_wm, &cursor_wm))
4701                 return;
4702
4703         I915_WRITE(WM3_LP_ILK,
4704                    WM3_LP_EN |
4705                    (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4706                    (fbc_wm << WM1_LP_FBC_SHIFT) |
4707                    (plane_wm << WM1_LP_SR_SHIFT) |
4708                    cursor_wm);
4709 }
4710
4711 static bool
4712 sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
4713                               uint32_t sprite_width, int pixel_size,
4714                               const struct intel_watermark_params *display,
4715                               int display_latency_ns, int *sprite_wm)
4716 {
4717         struct drm_crtc *crtc;
4718         int clock;
4719         int entries, tlb_miss;
4720
4721         crtc = intel_get_crtc_for_plane(dev, plane);
4722         if (crtc->fb == NULL || !crtc->enabled) {
4723                 *sprite_wm = display->guard_size;
4724                 return false;
4725         }
4726
4727         clock = crtc->mode.clock;
4728
4729         /* Use the small buffer method to calculate the sprite watermark */
4730         entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
4731         tlb_miss = display->fifo_size*display->cacheline_size -
4732                 sprite_width * 8;
4733         if (tlb_miss > 0)
4734                 entries += tlb_miss;
4735         entries = DIV_ROUND_UP(entries, display->cacheline_size);
4736         *sprite_wm = entries + display->guard_size;
4737         if (*sprite_wm > (int)display->max_wm)
4738                 *sprite_wm = display->max_wm;
4739
4740         return true;
4741 }
4742
4743 static bool
4744 sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
4745                                 uint32_t sprite_width, int pixel_size,
4746                                 const struct intel_watermark_params *display,
4747                                 int latency_ns, int *sprite_wm)
4748 {
4749         struct drm_crtc *crtc;
4750         unsigned long line_time_us;
4751         int clock;
4752         int line_count, line_size;
4753         int small, large;
4754         int entries;
4755
4756         if (!latency_ns) {
4757                 *sprite_wm = 0;
4758                 return false;
4759         }
4760
4761         crtc = intel_get_crtc_for_plane(dev, plane);
4762         clock = crtc->mode.clock;
4763         if (!clock) {
4764                 *sprite_wm = 0;
4765                 return false;
4766         }
4767
4768         line_time_us = (sprite_width * 1000) / clock;
4769         if (!line_time_us) {
4770                 *sprite_wm = 0;
4771                 return false;
4772         }
4773
4774         line_count = (latency_ns / line_time_us + 1000) / 1000;
4775         line_size = sprite_width * pixel_size;
4776
4777         /* Use the minimum of the small and large buffer method for primary */
4778         small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4779         large = line_count * line_size;
4780
4781         entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4782         *sprite_wm = entries + display->guard_size;
4783
4784         return *sprite_wm > 0x3ff ? false : true;
4785 }
4786
4787 static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
4788                                          uint32_t sprite_width, int pixel_size)
4789 {
4790         struct drm_i915_private *dev_priv = dev->dev_private;
4791         int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
4792         u32 val;
4793         int sprite_wm, reg;
4794         int ret;
4795
4796         switch (pipe) {
4797         case 0:
4798                 reg = WM0_PIPEA_ILK;
4799                 break;
4800         case 1:
4801                 reg = WM0_PIPEB_ILK;
4802                 break;
4803         case 2:
4804                 reg = WM0_PIPEC_IVB;
4805                 break;
4806         default:
4807                 return; /* bad pipe */
4808         }
4809
4810         ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
4811                                             &sandybridge_display_wm_info,
4812                                             latency, &sprite_wm);
4813         if (!ret) {
4814                 DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
4815                               pipe);
4816                 return;
4817         }
4818
4819         val = I915_READ(reg);
4820         val &= ~WM0_PIPE_SPRITE_MASK;
4821         I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
4822         DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
4823
4824
4825         ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4826                                               pixel_size,
4827                                               &sandybridge_display_srwm_info,
4828                                               SNB_READ_WM1_LATENCY() * 500,
4829                                               &sprite_wm);
4830         if (!ret) {
4831                 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
4832                               pipe);
4833                 return;
4834         }
4835         I915_WRITE(WM1S_LP_ILK, sprite_wm);
4836
4837         /* Only IVB has two more LP watermarks for sprite */
4838         if (!IS_IVYBRIDGE(dev))
4839                 return;
4840
4841         ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4842                                               pixel_size,
4843                                               &sandybridge_display_srwm_info,
4844                                               SNB_READ_WM2_LATENCY() * 500,
4845                                               &sprite_wm);
4846         if (!ret) {
4847                 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
4848                               pipe);
4849                 return;
4850         }
4851         I915_WRITE(WM2S_LP_IVB, sprite_wm);
4852
4853         ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4854                                               pixel_size,
4855                                               &sandybridge_display_srwm_info,
4856                                               SNB_READ_WM3_LATENCY() * 500,
4857                                               &sprite_wm);
4858         if (!ret) {
4859                 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
4860                               pipe);
4861                 return;
4862         }
4863         I915_WRITE(WM3S_LP_IVB, sprite_wm);
4864 }
4865
4866 /**
4867  * intel_update_watermarks - update FIFO watermark values based on current modes
4868  *
4869  * Calculate watermark values for the various WM regs based on current mode
4870  * and plane configuration.
4871  *
4872  * There are several cases to deal with here:
4873  *   - normal (i.e. non-self-refresh)
4874  *   - self-refresh (SR) mode
4875  *   - lines are large relative to FIFO size (buffer can hold up to 2)
4876  *   - lines are small relative to FIFO size (buffer can hold more than 2
4877  *     lines), so need to account for TLB latency
4878  *
4879  *   The normal calculation is:
4880  *     watermark = dotclock * bytes per pixel * latency
4881  *   where latency is platform & configuration dependent (we assume pessimal
4882  *   values here).
4883  *
4884  *   The SR calculation is:
4885  *     watermark = (trunc(latency/line time)+1) * surface width *
4886  *       bytes per pixel
4887  *   where
4888  *     line time = htotal / dotclock
4889  *     surface width = hdisplay for normal plane and 64 for cursor
4890  *   and latency is assumed to be high, as above.
4891  *
4892  * The final value programmed to the register should always be rounded up,
4893  * and include an extra 2 entries to account for clock crossings.
4894  *
4895  * We don't use the sprite, so we can ignore that.  And on Crestline we have
4896  * to set the non-SR watermarks to 8.
4897  */
4898 static void intel_update_watermarks(struct drm_device *dev)
4899 {
4900         struct drm_i915_private *dev_priv = dev->dev_private;
4901
4902         if (dev_priv->display.update_wm)
4903                 dev_priv->display.update_wm(dev);
4904 }
4905
4906 void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
4907                                     uint32_t sprite_width, int pixel_size)
4908 {
4909         struct drm_i915_private *dev_priv = dev->dev_private;
4910
4911         if (dev_priv->display.update_sprite_wm)
4912                 dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
4913                                                    pixel_size);
4914 }
4915
4916 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4917 {
4918         if (i915_panel_use_ssc >= 0)
4919                 return i915_panel_use_ssc != 0;
4920         return dev_priv->lvds_use_ssc
4921                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4922 }
4923
4924 /**
4925  * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
4926  * @crtc: CRTC structure
4927  * @mode: requested mode
4928  *
4929  * A pipe may be connected to one or more outputs.  Based on the depth of the
4930  * attached framebuffer, choose a good color depth to use on the pipe.
4931  *
4932  * If possible, match the pipe depth to the fb depth.  In some cases, this
4933  * isn't ideal, because the connected output supports a lesser or restricted
4934  * set of depths.  Resolve that here:
4935  *    LVDS typically supports only 6bpc, so clamp down in that case
4936  *    HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
4937  *    Displays may support a restricted set as well, check EDID and clamp as
4938  *      appropriate.
4939  *    DP may want to dither down to 6bpc to fit larger modes
4940  *
4941  * RETURNS:
4942  * Dithering requirement (i.e. false if display bpc and pipe bpc match,
4943  * true if they don't match).
4944  */
4945 static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4946                                          unsigned int *pipe_bpp,
4947                                          struct drm_display_mode *mode)
4948 {
4949         struct drm_device *dev = crtc->dev;
4950         struct drm_i915_private *dev_priv = dev->dev_private;
4951         struct drm_encoder *encoder;
4952         struct drm_connector *connector;
4953         unsigned int display_bpc = UINT_MAX, bpc;
4954
4955         /* Walk the encoders & connectors on this crtc, get min bpc */
4956         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
4957                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4958
4959                 if (encoder->crtc != crtc)
4960                         continue;
4961
4962                 if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
4963                         unsigned int lvds_bpc;
4964
4965                         if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
4966                             LVDS_A3_POWER_UP)
4967                                 lvds_bpc = 8;
4968                         else
4969                                 lvds_bpc = 6;
4970
4971                         if (lvds_bpc < display_bpc) {
4972                                 DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
4973                                 display_bpc = lvds_bpc;
4974                         }
4975                         continue;
4976                 }
4977
4978                 /* Not one of the known troublemakers, check the EDID */
4979                 list_for_each_entry(connector, &dev->mode_config.connector_list,
4980                                     head) {
4981                         if (connector->encoder != encoder)
4982                                 continue;
4983
4984                         /* Don't use an invalid EDID bpc value */
4985                         if (connector->display_info.bpc &&
4986                             connector->display_info.bpc < display_bpc) {
4987                                 DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
4988                                 display_bpc = connector->display_info.bpc;
4989                         }
4990                 }
4991
4992                 if (intel_encoder->type == INTEL_OUTPUT_EDP) {
4993                         /* Use VBT settings if we have an eDP panel */
4994                         unsigned int edp_bpc = dev_priv->edp.bpp / 3;
4995
4996                         if (edp_bpc && edp_bpc < display_bpc) {
4997                                 DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
4998                                 display_bpc = edp_bpc;
4999                         }
5000                         continue;
5001                 }
5002
5003                 /*
5004                  * HDMI is either 12 or 8, so if the display lets 10bpc sneak
5005                  * through, clamp it down.  (Note: >12bpc will be caught below.)
5006                  */
5007                 if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
5008                         if (display_bpc > 8 && display_bpc < 12) {
5009                                 DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
5010                                 display_bpc = 12;
5011                         } else {
5012                                 DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
5013                                 display_bpc = 8;
5014                         }
5015                 }
5016         }
5017
5018         if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
5019                 DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
5020                 display_bpc = 6;
5021         }
5022
5023         /*
5024          * We could just drive the pipe at the highest bpc all the time and
5025          * enable dithering as needed, but that costs bandwidth.  So choose
5026          * the minimum value that expresses the full color range of the fb but
5027          * also stays within the max display bpc discovered above.
5028          */
5029
5030         switch (crtc->fb->depth) {
5031         case 8:
5032                 bpc = 8; /* since we go through a colormap */
5033                 break;
5034         case 15:
5035         case 16:
5036                 bpc = 6; /* min is 18bpp */
5037                 break;
5038         case 24:
5039                 bpc = 8;
5040                 break;
5041         case 30:
5042                 bpc = 10;
5043                 break;
5044         case 48:
5045                 bpc = 12;
5046                 break;
5047         default:
5048                 DRM_DEBUG("unsupported depth, assuming 24 bits\n");
5049                 bpc = min((unsigned int)8, display_bpc);
5050                 break;
5051         }
5052
5053         display_bpc = min(display_bpc, bpc);
5054
5055         DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
5056                       bpc, display_bpc);
5057
5058         *pipe_bpp = display_bpc * 3;
5059
5060         return display_bpc != bpc;
5061 }
5062
5063 static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
5064 {
5065         struct drm_device *dev = crtc->dev;
5066         struct drm_i915_private *dev_priv = dev->dev_private;
5067         int refclk;
5068
5069         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5070             intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5071                 refclk = dev_priv->lvds_ssc_freq * 1000;
5072                 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
5073                               refclk / 1000);
5074         } else if (!IS_GEN2(dev)) {
5075                 refclk = 96000;
5076         } else {
5077                 refclk = 48000;
5078         }
5079
5080         return refclk;
5081 }
5082
5083 static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode,
5084                                       intel_clock_t *clock)
5085 {
5086         /* SDVO TV has fixed PLL values depend on its clock range,
5087            this mirrors vbios setting. */
5088         if (adjusted_mode->clock >= 100000
5089             && adjusted_mode->clock < 140500) {
5090                 clock->p1 = 2;
5091                 clock->p2 = 10;
5092                 clock->n = 3;
5093                 clock->m1 = 16;
5094                 clock->m2 = 8;
5095         } else if (adjusted_mode->clock >= 140500
5096                    && adjusted_mode->clock <= 200000) {
5097                 clock->p1 = 1;
5098                 clock->p2 = 10;
5099                 clock->n = 6;
5100                 clock->m1 = 12;
5101                 clock->m2 = 8;
5102         }
5103 }
5104
5105 static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
5106                                      intel_clock_t *clock,
5107                                      intel_clock_t *reduced_clock)
5108 {
5109         struct drm_device *dev = crtc->dev;
5110         struct drm_i915_private *dev_priv = dev->dev_private;
5111         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5112         int pipe = intel_crtc->pipe;
5113         u32 fp, fp2 = 0;
5114
5115         if (IS_PINEVIEW(dev)) {
5116                 fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
5117                 if (reduced_clock)
5118                         fp2 = (1 << reduced_clock->n) << 16 |
5119                                 reduced_clock->m1 << 8 | reduced_clock->m2;
5120         } else {
5121                 fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
5122                 if (reduced_clock)
5123                         fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
5124                                 reduced_clock->m2;
5125         }
5126
5127         I915_WRITE(FP0(pipe), fp);
5128
5129         intel_crtc->lowfreq_avail = false;
5130         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5131             reduced_clock && i915_powersave) {
5132                 I915_WRITE(FP1(pipe), fp2);
5133                 intel_crtc->lowfreq_avail = true;
5134         } else {
5135                 I915_WRITE(FP1(pipe), fp);
5136         }
5137 }
5138
5139 static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5140                               struct drm_display_mode *mode,
5141                               struct drm_display_mode *adjusted_mode,
5142                               int x, int y,
5143                               struct drm_framebuffer *old_fb)
5144 {
5145         struct drm_device *dev = crtc->dev;
5146         struct drm_i915_private *dev_priv = dev->dev_private;
5147         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5148         int pipe = intel_crtc->pipe;
5149         int plane = intel_crtc->plane;
5150         int refclk, num_connectors = 0;
5151         intel_clock_t clock, reduced_clock;
5152         u32 dpll, dspcntr, pipeconf, vsyncshift;
5153         bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
5154         bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
5155         struct drm_mode_config *mode_config = &dev->mode_config;
5156         struct intel_encoder *encoder;
5157         const intel_limit_t *limit;
5158         int ret;
5159         u32 temp;
5160         u32 lvds_sync = 0;
5161
5162         list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5163                 if (encoder->base.crtc != crtc)
5164                         continue;
5165
5166                 switch (encoder->type) {
5167                 case INTEL_OUTPUT_LVDS:
5168                         is_lvds = true;
5169                         break;
5170                 case INTEL_OUTPUT_SDVO:
5171                 case INTEL_OUTPUT_HDMI:
5172                         is_sdvo = true;
5173                         if (encoder->needs_tv_clock)
5174                                 is_tv = true;
5175                         break;
5176                 case INTEL_OUTPUT_DVO:
5177                         is_dvo = true;
5178                         break;
5179                 case INTEL_OUTPUT_TVOUT:
5180                         is_tv = true;
5181                         break;
5182                 case INTEL_OUTPUT_ANALOG:
5183                         is_crt = true;
5184                         break;
5185                 case INTEL_OUTPUT_DISPLAYPORT:
5186                         is_dp = true;
5187                         break;
5188                 }
5189
5190                 num_connectors++;
5191         }
5192
5193         refclk = i9xx_get_refclk(crtc, num_connectors);
5194
5195         /*
5196          * Returns a set of divisors for the desired target clock with the given
5197          * refclk, or FALSE.  The returned values represent the clock equation:
5198          * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5199          */
5200         limit = intel_limit(crtc, refclk);
5201         ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
5202                              &clock);
5203         if (!ok) {
5204                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
5205                 return -EINVAL;
5206         }
5207
5208         /* Ensure that the cursor is valid for the new mode before changing... */
5209         intel_crtc_update_cursor(crtc, true);
5210
5211         if (is_lvds && dev_priv->lvds_downclock_avail) {
5212                 /*
5213                  * Ensure we match the reduced clock's P to the target clock.
5214                  * If the clocks don't match, we can't switch the display clock
5215                  * by using the FP0/FP1. In such case we will disable the LVDS
5216                  * downclock feature.
5217                 */
5218                 has_reduced_clock = limit->find_pll(limit, crtc,
5219                                                     dev_priv->lvds_downclock,
5220                                                     refclk,
5221                                                     &clock,
5222                                                     &reduced_clock);
5223         }
5224
5225         if (is_sdvo && is_tv)
5226                 i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
5227
5228         i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
5229                                  &reduced_clock : NULL);
5230
5231         dpll = DPLL_VGA_MODE_DIS;
5232
5233         if (!IS_GEN2(dev)) {
5234                 if (is_lvds)
5235                         dpll |= DPLLB_MODE_LVDS;
5236                 else
5237                         dpll |= DPLLB_MODE_DAC_SERIAL;
5238                 if (is_sdvo) {
5239                         int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5240                         if (pixel_multiplier > 1) {
5241                                 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
5242                                         dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
5243                         }
5244                         dpll |= DPLL_DVO_HIGH_SPEED;
5245                 }
5246                 if (is_dp)
5247                         dpll |= DPLL_DVO_HIGH_SPEED;
5248
5249                 /* compute bitmask from p1 value */
5250                 if (IS_PINEVIEW(dev))
5251                         dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
5252                 else {
5253                         dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5254                         if (IS_G4X(dev) && has_reduced_clock)
5255                                 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5256                 }
5257                 switch (clock.p2) {
5258                 case 5:
5259                         dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5260                         break;
5261                 case 7:
5262                         dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5263                         break;
5264                 case 10:
5265                         dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5266                         break;
5267                 case 14:
5268                         dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5269                         break;
5270                 }
5271                 if (INTEL_INFO(dev)->gen >= 4)
5272                         dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
5273         } else {
5274                 if (is_lvds) {
5275                         dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5276                 } else {
5277                         if (clock.p1 == 2)
5278                                 dpll |= PLL_P1_DIVIDE_BY_TWO;
5279                         else
5280                                 dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5281                         if (clock.p2 == 4)
5282                                 dpll |= PLL_P2_DIVIDE_BY_4;
5283                 }
5284         }
5285
5286         if (is_sdvo && is_tv)
5287                 dpll |= PLL_REF_INPUT_TVCLKINBC;
5288         else if (is_tv)
5289                 /* XXX: just matching BIOS for now */
5290                 /*      dpll |= PLL_REF_INPUT_TVCLKINBC; */
5291                 dpll |= 3;
5292         else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5293                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5294         else
5295                 dpll |= PLL_REF_INPUT_DREFCLK;
5296
5297         /* setup pipeconf */
5298         pipeconf = I915_READ(PIPECONF(pipe));
5299
5300         /* Set up the display plane register */
5301         dspcntr = DISPPLANE_GAMMA_ENABLE;
5302
5303         if (pipe == 0)
5304                 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
5305         else
5306                 dspcntr |= DISPPLANE_SEL_PIPE_B;
5307
5308         if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
5309                 /* Enable pixel doubling when the dot clock is > 90% of the (display)
5310                  * core speed.
5311                  *
5312                  * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
5313                  * pipe == 0 check?
5314                  */
5315                 if (mode->clock >
5316                     dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
5317                         pipeconf |= PIPECONF_DOUBLE_WIDE;
5318                 else
5319                         pipeconf &= ~PIPECONF_DOUBLE_WIDE;
5320         }
5321
5322         /* default to 8bpc */
5323         pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
5324         if (is_dp) {
5325                 if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
5326                         pipeconf |= PIPECONF_BPP_6 |
5327                                     PIPECONF_DITHER_EN |
5328                                     PIPECONF_DITHER_TYPE_SP;
5329                 }
5330         }
5331
5332         dpll |= DPLL_VCO_ENABLE;
5333
5334         DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
5335         drm_mode_debug_printmodeline(mode);
5336
5337         I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5338
5339         POSTING_READ(DPLL(pipe));
5340         udelay(150);
5341
5342         /* The LVDS pin pair needs to be on before the DPLLs are enabled.
5343          * This is an exception to the general rule that mode_set doesn't turn
5344          * things on.
5345          */
5346         if (is_lvds) {
5347                 temp = I915_READ(LVDS);
5348                 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5349                 if (pipe == 1) {
5350                         temp |= LVDS_PIPEB_SELECT;
5351                 } else {
5352                         temp &= ~LVDS_PIPEB_SELECT;
5353                 }
5354                 /* set the corresponsding LVDS_BORDER bit */
5355                 temp |= dev_priv->lvds_border_bits;
5356                 /* Set the B0-B3 data pairs corresponding to whether we're going to
5357                  * set the DPLLs for dual-channel mode or not.
5358                  */
5359                 if (clock.p2 == 7)
5360                         temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5361                 else
5362                         temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
5363
5364                 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
5365                  * appropriately here, but we need to look more thoroughly into how
5366                  * panels behave in the two modes.
5367                  */
5368                 /* set the dithering flag on LVDS as needed */
5369                 if (INTEL_INFO(dev)->gen >= 4) {
5370                         if (dev_priv->lvds_dither)
5371                                 temp |= LVDS_ENABLE_DITHER;
5372                         else
5373                                 temp &= ~LVDS_ENABLE_DITHER;
5374                 }
5375                 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5376                         lvds_sync |= LVDS_HSYNC_POLARITY;
5377                 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5378                         lvds_sync |= LVDS_VSYNC_POLARITY;
5379                 if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
5380                     != lvds_sync) {
5381                         char flags[2] = "-+";
5382                         DRM_INFO("Changing LVDS panel from "
5383                                  "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
5384                                  flags[!(temp & LVDS_HSYNC_POLARITY)],
5385                                  flags[!(temp & LVDS_VSYNC_POLARITY)],
5386                                  flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5387                                  flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5388                         temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5389                         temp |= lvds_sync;
5390                 }
5391                 I915_WRITE(LVDS, temp);
5392         }
5393
5394         if (is_dp) {
5395                 intel_dp_set_m_n(crtc, mode, adjusted_mode);
5396         }
5397
5398         I915_WRITE(DPLL(pipe), dpll);
5399
5400         /* Wait for the clocks to stabilize. */
5401         POSTING_READ(DPLL(pipe));
5402         udelay(150);
5403
5404         if (INTEL_INFO(dev)->gen >= 4) {
5405                 temp = 0;
5406                 if (is_sdvo) {
5407                         temp = intel_mode_get_pixel_multiplier(adjusted_mode);
5408                         if (temp > 1)
5409                                 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5410                         else
5411                                 temp = 0;
5412                 }
5413                 I915_WRITE(DPLL_MD(pipe), temp);
5414         } else {
5415                 /* The pixel multiplier can only be updated once the
5416                  * DPLL is enabled and the clocks are stable.
5417                  *
5418                  * So write it again.
5419                  */
5420                 I915_WRITE(DPLL(pipe), dpll);
5421         }
5422
5423         if (HAS_PIPE_CXSR(dev)) {
5424                 if (intel_crtc->lowfreq_avail) {
5425                         DRM_DEBUG_KMS("enabling CxSR downclocking\n");
5426                         pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
5427                 } else {
5428                         DRM_DEBUG_KMS("disabling CxSR downclocking\n");
5429                         pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
5430                 }
5431         }
5432
5433         pipeconf &= ~PIPECONF_INTERLACE_MASK;
5434         if (!IS_GEN2(dev) &&
5435             adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5436                 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5437                 /* the chip adds 2 halflines automatically */
5438                 adjusted_mode->crtc_vtotal -= 1;
5439                 adjusted_mode->crtc_vblank_end -= 1;
5440                 vsyncshift = adjusted_mode->crtc_hsync_start
5441                              - adjusted_mode->crtc_htotal/2;
5442         } else {
5443                 pipeconf |= PIPECONF_PROGRESSIVE;
5444                 vsyncshift = 0;
5445         }
5446
5447         if (!IS_GEN3(dev))
5448                 I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
5449
5450         I915_WRITE(HTOTAL(pipe),
5451                    (adjusted_mode->crtc_hdisplay - 1) |
5452                    ((adjusted_mode->crtc_htotal - 1) << 16));
5453         I915_WRITE(HBLANK(pipe),
5454                    (adjusted_mode->crtc_hblank_start - 1) |
5455                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
5456         I915_WRITE(HSYNC(pipe),
5457                    (adjusted_mode->crtc_hsync_start - 1) |
5458                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
5459
5460         I915_WRITE(VTOTAL(pipe),
5461                    (adjusted_mode->crtc_vdisplay - 1) |
5462                    ((adjusted_mode->crtc_vtotal - 1) << 16));
5463         I915_WRITE(VBLANK(pipe),
5464                    (adjusted_mode->crtc_vblank_start - 1) |
5465                    ((adjusted_mode->crtc_vblank_end - 1) << 16));
5466         I915_WRITE(VSYNC(pipe),
5467                    (adjusted_mode->crtc_vsync_start - 1) |
5468                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
5469
5470         /* pipesrc and dspsize control the size that is scaled from,
5471          * which should always be the user's requested size.
5472          */
5473         I915_WRITE(DSPSIZE(plane),
5474                    ((mode->vdisplay - 1) << 16) |
5475                    (mode->hdisplay - 1));
5476         I915_WRITE(DSPPOS(plane), 0);
5477         I915_WRITE(PIPESRC(pipe),
5478                    ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
5479
5480         I915_WRITE(PIPECONF(pipe), pipeconf);
5481         POSTING_READ(PIPECONF(pipe));
5482         intel_enable_pipe(dev_priv, pipe, false);
5483
5484         intel_wait_for_vblank(dev, pipe);
5485
5486         I915_WRITE(DSPCNTR(plane), dspcntr);
5487         POSTING_READ(DSPCNTR(plane));
5488         intel_enable_plane(dev_priv, plane, pipe);
5489
5490         ret = intel_pipe_set_base(crtc, x, y, old_fb);
5491
5492         intel_update_watermarks(dev);
5493
5494         return ret;
5495 }
5496
5497 /*
5498  * Initialize reference clocks when the driver loads
5499  */
5500 void ironlake_init_pch_refclk(struct drm_device *dev)
5501 {
5502         struct drm_i915_private *dev_priv = dev->dev_private;
5503         struct drm_mode_config *mode_config = &dev->mode_config;
5504         struct intel_encoder *encoder;
5505         u32 temp;
5506         bool has_lvds = false;
5507         bool has_cpu_edp = false;
5508         bool has_pch_edp = false;
5509         bool has_panel = false;
5510         bool has_ck505 = false;
5511         bool can_ssc = false;
5512
5513         /* We need to take the global config into account */
5514         list_for_each_entry(encoder, &mode_config->encoder_list,
5515                             base.head) {
5516                 switch (encoder->type) {
5517                 case INTEL_OUTPUT_LVDS:
5518                         has_panel = true;
5519                         has_lvds = true;
5520                         break;
5521                 case INTEL_OUTPUT_EDP:
5522                         has_panel = true;
5523                         if (intel_encoder_is_pch_edp(&encoder->base))
5524                                 has_pch_edp = true;
5525                         else
5526                                 has_cpu_edp = true;
5527                         break;
5528                 }
5529         }
5530
5531         if (HAS_PCH_IBX(dev)) {
5532                 has_ck505 = dev_priv->display_clock_mode;
5533                 can_ssc = has_ck505;
5534         } else {
5535                 has_ck505 = false;
5536                 can_ssc = true;
5537         }
5538
5539         DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
5540                       has_panel, has_lvds, has_pch_edp, has_cpu_edp,
5541                       has_ck505);
5542
5543         /* Ironlake: try to setup display ref clock before DPLL
5544          * enabling. This is only under driver's control after
5545          * PCH B stepping, previous chipset stepping should be
5546          * ignoring this setting.
5547          */
5548         temp = I915_READ(PCH_DREF_CONTROL);
5549         /* Always enable nonspread source */
5550         temp &= ~DREF_NONSPREAD_SOURCE_MASK;
5551
5552         if (has_ck505)
5553                 temp |= DREF_NONSPREAD_CK505_ENABLE;
5554         else
5555                 temp |= DREF_NONSPREAD_SOURCE_ENABLE;
5556
5557         if (has_panel) {
5558                 temp &= ~DREF_SSC_SOURCE_MASK;
5559                 temp |= DREF_SSC_SOURCE_ENABLE;
5560
5561                 /* SSC must be turned on before enabling the CPU output  */
5562                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5563                         DRM_DEBUG_KMS("Using SSC on panel\n");
5564                         temp |= DREF_SSC1_ENABLE;
5565                 } else
5566                         temp &= ~DREF_SSC1_ENABLE;
5567
5568                 /* Get SSC going before enabling the outputs */
5569                 I915_WRITE(PCH_DREF_CONTROL, temp);
5570                 POSTING_READ(PCH_DREF_CONTROL);
5571                 udelay(200);
5572
5573                 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5574
5575                 /* Enable CPU source on CPU attached eDP */
5576                 if (has_cpu_edp) {
5577                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5578                                 DRM_DEBUG_KMS("Using SSC on eDP\n");
5579                                 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5580                         }
5581                         else
5582                                 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5583                 } else
5584                         temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5585
5586                 I915_WRITE(PCH_DREF_CONTROL, temp);
5587                 POSTING_READ(PCH_DREF_CONTROL);
5588                 udelay(200);
5589         } else {
5590                 DRM_DEBUG_KMS("Disabling SSC entirely\n");
5591
5592                 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5593
5594                 /* Turn off CPU output */
5595                 temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5596
5597                 I915_WRITE(PCH_DREF_CONTROL, temp);
5598                 POSTING_READ(PCH_DREF_CONTROL);
5599                 udelay(200);
5600
5601                 /* Turn off the SSC source */
5602                 temp &= ~DREF_SSC_SOURCE_MASK;
5603                 temp |= DREF_SSC_SOURCE_DISABLE;
5604
5605                 /* Turn off SSC1 */
5606                 temp &= ~ DREF_SSC1_ENABLE;
5607
5608                 I915_WRITE(PCH_DREF_CONTROL, temp);
5609                 POSTING_READ(PCH_DREF_CONTROL);
5610                 udelay(200);
5611         }
5612 }
5613
5614 static int ironlake_get_refclk(struct drm_crtc *crtc)
5615 {
5616         struct drm_device *dev = crtc->dev;
5617         struct drm_i915_private *dev_priv = dev->dev_private;
5618         struct intel_encoder *encoder;
5619         struct drm_mode_config *mode_config = &dev->mode_config;
5620         struct intel_encoder *edp_encoder = NULL;
5621         int num_connectors = 0;
5622         bool is_lvds = false;
5623
5624         list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5625                 if (encoder->base.crtc != crtc)
5626                         continue;
5627
5628                 switch (encoder->type) {
5629                 case INTEL_OUTPUT_LVDS:
5630                         is_lvds = true;
5631                         break;
5632                 case INTEL_OUTPUT_EDP:
5633                         edp_encoder = encoder;
5634                         break;
5635                 }
5636                 num_connectors++;
5637         }
5638
5639         if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5640                 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
5641                               dev_priv->lvds_ssc_freq);
5642                 return dev_priv->lvds_ssc_freq * 1000;
5643         }
5644
5645         return 120000;
5646 }
5647
5648 static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5649                                   struct drm_display_mode *mode,
5650                                   struct drm_display_mode *adjusted_mode,
5651                                   int x, int y,
5652                                   struct drm_framebuffer *old_fb)
5653 {
5654         struct drm_device *dev = crtc->dev;
5655         struct drm_i915_private *dev_priv = dev->dev_private;
5656         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5657         int pipe = intel_crtc->pipe;
5658         int plane = intel_crtc->plane;
5659         int refclk, num_connectors = 0;
5660         intel_clock_t clock, reduced_clock;
5661         u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
5662         bool ok, has_reduced_clock = false, is_sdvo = false;
5663         bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
5664         struct intel_encoder *has_edp_encoder = NULL;
5665         struct drm_mode_config *mode_config = &dev->mode_config;
5666         struct intel_encoder *encoder;
5667         const intel_limit_t *limit;
5668         int ret;
5669         struct fdi_m_n m_n = {0};
5670         u32 temp;
5671         u32 lvds_sync = 0;
5672         int target_clock, pixel_multiplier, lane, link_bw, factor;
5673         unsigned int pipe_bpp;
5674         bool dither;
5675
5676         list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5677                 if (encoder->base.crtc != crtc)
5678                         continue;
5679
5680                 switch (encoder->type) {
5681                 case INTEL_OUTPUT_LVDS:
5682                         is_lvds = true;
5683                         break;
5684                 case INTEL_OUTPUT_SDVO:
5685                 case INTEL_OUTPUT_HDMI:
5686                         is_sdvo = true;
5687                         if (encoder->needs_tv_clock)
5688                                 is_tv = true;
5689                         break;
5690                 case INTEL_OUTPUT_TVOUT:
5691                         is_tv = true;
5692                         break;
5693                 case INTEL_OUTPUT_ANALOG:
5694                         is_crt = true;
5695                         break;
5696                 case INTEL_OUTPUT_DISPLAYPORT:
5697                         is_dp = true;
5698                         break;
5699                 case INTEL_OUTPUT_EDP:
5700                         has_edp_encoder = encoder;
5701                         break;
5702                 }
5703
5704                 num_connectors++;
5705         }
5706
5707         refclk = ironlake_get_refclk(crtc);
5708
5709         /*
5710          * Returns a set of divisors for the desired target clock with the given
5711          * refclk, or FALSE.  The returned values represent the clock equation:
5712          * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5713          */
5714         limit = intel_limit(crtc, refclk);
5715         ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
5716                              &clock);
5717         if (!ok) {
5718                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
5719                 return -EINVAL;
5720         }
5721
5722         /* Ensure that the cursor is valid for the new mode before changing... */
5723         intel_crtc_update_cursor(crtc, true);
5724
5725         if (is_lvds && dev_priv->lvds_downclock_avail) {
5726                 /*
5727                  * Ensure we match the reduced clock's P to the target clock.
5728                  * If the clocks don't match, we can't switch the display clock
5729                  * by using the FP0/FP1. In such case we will disable the LVDS
5730                  * downclock feature.
5731                 */
5732                 has_reduced_clock = limit->find_pll(limit, crtc,
5733                                                     dev_priv->lvds_downclock,
5734                                                     refclk,
5735                                                     &clock,
5736                                                     &reduced_clock);
5737         }
5738         /* SDVO TV has fixed PLL values depend on its clock range,
5739            this mirrors vbios setting. */
5740         if (is_sdvo && is_tv) {
5741                 if (adjusted_mode->clock >= 100000
5742                     && adjusted_mode->clock < 140500) {
5743                         clock.p1 = 2;
5744                         clock.p2 = 10;
5745                         clock.n = 3;
5746                         clock.m1 = 16;
5747                         clock.m2 = 8;
5748                 } else if (adjusted_mode->clock >= 140500
5749                            && adjusted_mode->clock <= 200000) {
5750                         clock.p1 = 1;
5751                         clock.p2 = 10;
5752                         clock.n = 6;
5753                         clock.m1 = 12;
5754                         clock.m2 = 8;
5755                 }
5756         }
5757
5758         /* FDI link */
5759         pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5760         lane = 0;
5761         /* CPU eDP doesn't require FDI link, so just set DP M/N
5762            according to current link config */
5763         if (has_edp_encoder &&
5764             !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5765                 target_clock = mode->clock;
5766                 intel_edp_link_config(has_edp_encoder,
5767                                       &lane, &link_bw);
5768         } else {
5769                 /* [e]DP over FDI requires target mode clock
5770                    instead of link clock */
5771                 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5772                         target_clock = mode->clock;
5773                 else
5774                         target_clock = adjusted_mode->clock;
5775
5776                 /* FDI is a binary signal running at ~2.7GHz, encoding
5777                  * each output octet as 10 bits. The actual frequency
5778                  * is stored as a divider into a 100MHz clock, and the
5779                  * mode pixel clock is stored in units of 1KHz.
5780                  * Hence the bw of each lane in terms of the mode signal
5781                  * is:
5782                  */
5783                 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5784         }
5785
5786         /* determine panel color depth */
5787         temp = I915_READ(PIPECONF(pipe));
5788         temp &= ~PIPE_BPC_MASK;
5789         dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, adjusted_mode);
5790         switch (pipe_bpp) {
5791         case 18:
5792                 temp |= PIPE_6BPC;
5793                 break;
5794         case 24:
5795                 temp |= PIPE_8BPC;
5796                 break;
5797         case 30:
5798                 temp |= PIPE_10BPC;
5799                 break;
5800         case 36:
5801                 temp |= PIPE_12BPC;
5802                 break;
5803         default:
5804                 WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
5805                         pipe_bpp);
5806                 temp |= PIPE_8BPC;
5807                 pipe_bpp = 24;
5808                 break;
5809         }
5810
5811         intel_crtc->bpp = pipe_bpp;
5812         I915_WRITE(PIPECONF(pipe), temp);
5813
5814         if (!lane) {
5815                 /*
5816                  * Account for spread spectrum to avoid
5817                  * oversubscribing the link. Max center spread
5818                  * is 2.5%; use 5% for safety's sake.
5819                  */
5820                 u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
5821                 lane = bps / (link_bw * 8) + 1;
5822         }
5823
5824         intel_crtc->fdi_lanes = lane;
5825
5826         if (pixel_multiplier > 1)
5827                 link_bw *= pixel_multiplier;
5828         ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
5829                              &m_n);
5830
5831         fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5832         if (has_reduced_clock)
5833                 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5834                         reduced_clock.m2;
5835
5836         /* Enable autotuning of the PLL clock (if permissible) */
5837         factor = 21;
5838         if (is_lvds) {
5839                 if ((intel_panel_use_ssc(dev_priv) &&
5840                      dev_priv->lvds_ssc_freq == 100) ||
5841                     (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
5842                         factor = 25;
5843         } else if (is_sdvo && is_tv)
5844                 factor = 20;
5845
5846         if (clock.m < factor * clock.n)
5847                 fp |= FP_CB_TUNE;
5848
5849         dpll = 0;
5850
5851         if (is_lvds)
5852                 dpll |= DPLLB_MODE_LVDS;
5853         else
5854                 dpll |= DPLLB_MODE_DAC_SERIAL;
5855         if (is_sdvo) {
5856                 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5857                 if (pixel_multiplier > 1) {
5858                         dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
5859                 }
5860                 dpll |= DPLL_DVO_HIGH_SPEED;
5861         }
5862         if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5863                 dpll |= DPLL_DVO_HIGH_SPEED;
5864
5865         /* compute bitmask from p1 value */
5866         dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5867         /* also FPA1 */
5868         dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5869
5870         switch (clock.p2) {
5871         case 5:
5872                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5873                 break;
5874         case 7:
5875                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5876                 break;
5877         case 10:
5878                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5879                 break;
5880         case 14:
5881                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5882                 break;
5883         }
5884
5885         if (is_sdvo && is_tv)
5886                 dpll |= PLL_REF_INPUT_TVCLKINBC;
5887         else if (is_tv)
5888                 /* XXX: just matching BIOS for now */
5889                 /*      dpll |= PLL_REF_INPUT_TVCLKINBC; */
5890                 dpll |= 3;
5891         else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5892                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5893         else
5894                 dpll |= PLL_REF_INPUT_DREFCLK;
5895
5896         /* setup pipeconf */
5897         pipeconf = I915_READ(PIPECONF(pipe));
5898
5899         /* Set up the display plane register */
5900         dspcntr = DISPPLANE_GAMMA_ENABLE;
5901
5902         DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
5903         drm_mode_debug_printmodeline(mode);
5904
5905         /* PCH eDP needs FDI, but CPU eDP does not */
5906         if (!intel_crtc->no_pll) {
5907                 if (!has_edp_encoder ||
5908                     intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5909                         I915_WRITE(PCH_FP0(pipe), fp);
5910                         I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5911
5912                         POSTING_READ(PCH_DPLL(pipe));
5913                         udelay(150);
5914                 }
5915         } else {
5916                 if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) &&
5917                     fp == I915_READ(PCH_FP0(0))) {
5918                         intel_crtc->use_pll_a = true;
5919                         DRM_DEBUG_KMS("using pipe a dpll\n");
5920                 } else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) &&
5921                            fp == I915_READ(PCH_FP0(1))) {
5922                         intel_crtc->use_pll_a = false;
5923                         DRM_DEBUG_KMS("using pipe b dpll\n");
5924                 } else {
5925                         DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n");
5926                         return -EINVAL;
5927                 }
5928         }
5929
5930         /* The LVDS pin pair needs to be on before the DPLLs are enabled.
5931          * This is an exception to the general rule that mode_set doesn't turn
5932          * things on.
5933          */
5934         if (is_lvds) {
5935                 temp = I915_READ(PCH_LVDS);
5936                 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5937                 if (HAS_PCH_CPT(dev)) {
5938                         temp &= ~PORT_TRANS_SEL_MASK;
5939                         temp |= PORT_TRANS_SEL_CPT(pipe);
5940                 } else {
5941                         if (pipe == 1)
5942                                 temp |= LVDS_PIPEB_SELECT;
5943                         else
5944                                 temp &= ~LVDS_PIPEB_SELECT;
5945                 }
5946
5947                 /* set the corresponsding LVDS_BORDER bit */
5948                 temp |= dev_priv->lvds_border_bits;
5949                 /* Set the B0-B3 data pairs corresponding to whether we're going to
5950                  * set the DPLLs for dual-channel mode or not.
5951                  */
5952                 if (clock.p2 == 7)
5953                         temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5954                 else
5955                         temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
5956
5957                 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
5958                  * appropriately here, but we need to look more thoroughly into how
5959                  * panels behave in the two modes.
5960                  */
5961                 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5962                         lvds_sync |= LVDS_HSYNC_POLARITY;
5963                 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5964                         lvds_sync |= LVDS_VSYNC_POLARITY;
5965                 if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
5966                     != lvds_sync) {
5967                         char flags[2] = "-+";
5968                         DRM_INFO("Changing LVDS panel from "
5969                                  "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
5970                                  flags[!(temp & LVDS_HSYNC_POLARITY)],
5971                                  flags[!(temp & LVDS_VSYNC_POLARITY)],
5972                                  flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5973                                  flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5974                         temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5975                         temp |= lvds_sync;
5976                 }
5977                 I915_WRITE(PCH_LVDS, temp);
5978         }
5979
5980         pipeconf &= ~PIPECONF_DITHER_EN;
5981         pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
5982         if ((is_lvds && dev_priv->lvds_dither) || dither) {
5983                 pipeconf |= PIPECONF_DITHER_EN;
5984                 pipeconf |= PIPECONF_DITHER_TYPE_SP;
5985         }
5986         if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {