d2733e526a68bc75b7a1e71266b47115ed4d5530
[linux-2.6.git] / drivers / char / agp / intel-gtt.c
1 /*
2  * Intel GTT (Graphics Translation Table) routines
3  *
4  * Caveat: This driver implements the linux agp interface, but this is far from
5  * a agp driver! GTT support ended up here for purely historical reasons: The
6  * old userspace intel graphics drivers needed an interface to map memory into
7  * the GTT. And the drm provides a default interface for graphic devices sitting
8  * on an agp port. So it made sense to fake the GTT support as an agp port to
9  * avoid having to create a new api.
10  *
11  * With gem this does not make much sense anymore, just needlessly complicates
12  * the code. But as long as the old graphics stack is still support, it's stuck
13  * here.
14  *
15  * /fairy-tale-mode off
16  */
17
18 #include <linux/module.h>
19 #include <linux/pci.h>
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/pagemap.h>
23 #include <linux/agp_backend.h>
24 #include <asm/smp.h>
25 #include "agp.h"
26 #include "intel-agp.h"
27 #include <linux/intel-gtt.h>
28 #include <drm/intel-gtt.h>
29
30 /*
31  * If we have Intel graphics, we're not going to have anything other than
32  * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
33  * on the Intel IOMMU support (CONFIG_DMAR).
34  * Only newer chipsets need to bother with this, of course.
35  */
36 #ifdef CONFIG_DMAR
37 #define USE_PCI_DMA_API 1
38 #else
39 #define USE_PCI_DMA_API 0
40 #endif
41
42 static const struct aper_size_info_fixed intel_i810_sizes[] =
43 {
44         {64, 16384, 4},
45         /* The 32M mode still requires a 64k gatt */
46         {32, 8192, 4}
47 };
48
49 #define AGP_DCACHE_MEMORY       1
50 #define AGP_PHYS_MEMORY         2
51 #define INTEL_AGP_CACHED_MEMORY 3
52
53 static struct gatt_mask intel_i810_masks[] =
54 {
55         {.mask = I810_PTE_VALID, .type = 0},
56         {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
57         {.mask = I810_PTE_VALID, .type = 0},
58         {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
59          .type = INTEL_AGP_CACHED_MEMORY}
60 };
61
62 #define INTEL_AGP_UNCACHED_MEMORY              0
63 #define INTEL_AGP_CACHED_MEMORY_LLC            1
64 #define INTEL_AGP_CACHED_MEMORY_LLC_GFDT       2
65 #define INTEL_AGP_CACHED_MEMORY_LLC_MLC        3
66 #define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT   4
67
68 struct intel_gtt_driver {
69         unsigned int gen : 8;
70         unsigned int is_g33 : 1;
71         unsigned int is_pineview : 1;
72         unsigned int is_ironlake : 1;
73         unsigned int has_pgtbl_enable : 1;
74         unsigned int dma_mask_size : 8;
75         /* Chipset specific GTT setup */
76         int (*setup)(void);
77         /* This should undo anything done in ->setup() save the unmapping
78          * of the mmio register file, that's done in the generic code. */
79         void (*cleanup)(void);
80         void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
81         /* Flags is a more or less chipset specific opaque value.
82          * For chipsets that need to support old ums (non-gem) code, this
83          * needs to be identical to the various supported agp memory types! */
84         bool (*check_flags)(unsigned int flags);
85         void (*chipset_flush)(void);
86 };
87
88 static struct _intel_private {
89         struct intel_gtt base;
90         const struct intel_gtt_driver *driver;
91         struct pci_dev *pcidev; /* device one */
92         struct pci_dev *bridge_dev;
93         u8 __iomem *registers;
94         phys_addr_t gtt_bus_addr;
95         phys_addr_t gma_bus_addr;
96         u32 PGETBL_save;
97         u32 __iomem *gtt;               /* I915G */
98         int num_dcache_entries;
99         union {
100                 void __iomem *i9xx_flush_page;
101                 void *i8xx_flush_page;
102         };
103         struct page *i8xx_page;
104         struct resource ifp_resource;
105         int resource_valid;
106         struct page *scratch_page;
107         dma_addr_t scratch_page_dma;
108 } intel_private;
109
110 #define INTEL_GTT_GEN   intel_private.driver->gen
111 #define IS_G33          intel_private.driver->is_g33
112 #define IS_PINEVIEW     intel_private.driver->is_pineview
113 #define IS_IRONLAKE     intel_private.driver->is_ironlake
114 #define HAS_PGTBL_EN    intel_private.driver->has_pgtbl_enable
115
116 static void intel_agp_free_sglist(struct agp_memory *mem)
117 {
118         struct sg_table st;
119
120         st.sgl = mem->sg_list;
121         st.orig_nents = st.nents = mem->page_count;
122
123         sg_free_table(&st);
124
125         mem->sg_list = NULL;
126         mem->num_sg = 0;
127 }
128
129 static int intel_agp_map_memory(struct agp_memory *mem)
130 {
131         struct sg_table st;
132         struct scatterlist *sg;
133         int i;
134
135         if (mem->sg_list)
136                 return 0; /* already mapped (for e.g. resume */
137
138         DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
139
140         if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
141                 goto err;
142
143         mem->sg_list = sg = st.sgl;
144
145         for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
146                 sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
147
148         mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
149                                  mem->page_count, PCI_DMA_BIDIRECTIONAL);
150         if (unlikely(!mem->num_sg))
151                 goto err;
152
153         return 0;
154
155 err:
156         sg_free_table(&st);
157         return -ENOMEM;
158 }
159
160 static void intel_agp_unmap_memory(struct agp_memory *mem)
161 {
162         DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
163
164         pci_unmap_sg(intel_private.pcidev, mem->sg_list,
165                      mem->page_count, PCI_DMA_BIDIRECTIONAL);
166         intel_agp_free_sglist(mem);
167 }
168
169 static int intel_i810_fetch_size(void)
170 {
171         u32 smram_miscc;
172         struct aper_size_info_fixed *values;
173
174         pci_read_config_dword(intel_private.bridge_dev,
175                               I810_SMRAM_MISCC, &smram_miscc);
176         values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
177
178         if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
179                 dev_warn(&intel_private.bridge_dev->dev, "i810 is disabled\n");
180                 return 0;
181         }
182         if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
183                 agp_bridge->current_size = (void *) (values + 1);
184                 agp_bridge->aperture_size_idx = 1;
185                 return values[1].size;
186         } else {
187                 agp_bridge->current_size = (void *) (values);
188                 agp_bridge->aperture_size_idx = 0;
189                 return values[0].size;
190         }
191
192         return 0;
193 }
194
195 static int intel_i810_configure(void)
196 {
197         struct aper_size_info_fixed *current_size;
198         u32 temp;
199         int i;
200
201         current_size = A_SIZE_FIX(agp_bridge->current_size);
202
203         if (!intel_private.registers) {
204                 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
205                 temp &= 0xfff80000;
206
207                 intel_private.registers = ioremap(temp, 128 * 4096);
208                 if (!intel_private.registers) {
209                         dev_err(&intel_private.pcidev->dev,
210                                 "can't remap memory\n");
211                         return -ENOMEM;
212                 }
213         }
214
215         if ((readl(intel_private.registers+I810_DRAM_CTL)
216                 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
217                 /* This will need to be dynamically assigned */
218                 dev_info(&intel_private.pcidev->dev,
219                          "detected 4MB dedicated video ram\n");
220                 intel_private.num_dcache_entries = 1024;
221         }
222         pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
223         agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
224         writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
225         readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
226
227         if (agp_bridge->driver->needs_scratch_page) {
228                 for (i = 0; i < current_size->num_entries; i++) {
229                         writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
230                 }
231                 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */
232         }
233         global_cache_flush();
234         return 0;
235 }
236
237 static void intel_i810_cleanup(void)
238 {
239         writel(0, intel_private.registers+I810_PGETBL_CTL);
240         readl(intel_private.registers); /* PCI Posting. */
241         iounmap(intel_private.registers);
242 }
243
244 static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
245 {
246         return;
247 }
248
249 /* Exists to support ARGB cursors */
250 static struct page *i8xx_alloc_pages(void)
251 {
252         struct page *page;
253
254         page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
255         if (page == NULL)
256                 return NULL;
257
258         if (set_pages_uc(page, 4) < 0) {
259                 set_pages_wb(page, 4);
260                 __free_pages(page, 2);
261                 return NULL;
262         }
263         get_page(page);
264         atomic_inc(&agp_bridge->current_memory_agp);
265         return page;
266 }
267
268 static void i8xx_destroy_pages(struct page *page)
269 {
270         if (page == NULL)
271                 return;
272
273         set_pages_wb(page, 4);
274         put_page(page);
275         __free_pages(page, 2);
276         atomic_dec(&agp_bridge->current_memory_agp);
277 }
278
279 static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
280                                 int type)
281 {
282         int i, j, num_entries;
283         void *temp;
284         int ret = -EINVAL;
285         int mask_type;
286
287         if (mem->page_count == 0)
288                 goto out;
289
290         temp = agp_bridge->current_size;
291         num_entries = A_SIZE_FIX(temp)->num_entries;
292
293         if ((pg_start + mem->page_count) > num_entries)
294                 goto out_err;
295
296
297         for (j = pg_start; j < (pg_start + mem->page_count); j++) {
298                 if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
299                         ret = -EBUSY;
300                         goto out_err;
301                 }
302         }
303
304         if (type != mem->type)
305                 goto out_err;
306
307         mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
308
309         switch (mask_type) {
310         case AGP_DCACHE_MEMORY:
311                 if (!mem->is_flushed)
312                         global_cache_flush();
313                 for (i = pg_start; i < (pg_start + mem->page_count); i++) {
314                         writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
315                                intel_private.registers+I810_PTE_BASE+(i*4));
316                 }
317                 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
318                 break;
319         case AGP_PHYS_MEMORY:
320         case AGP_NORMAL_MEMORY:
321                 if (!mem->is_flushed)
322                         global_cache_flush();
323                 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
324                         writel(agp_bridge->driver->mask_memory(agp_bridge,
325                                         page_to_phys(mem->pages[i]), mask_type),
326                                intel_private.registers+I810_PTE_BASE+(j*4));
327                 }
328                 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
329                 break;
330         default:
331                 goto out_err;
332         }
333
334 out:
335         ret = 0;
336 out_err:
337         mem->is_flushed = true;
338         return ret;
339 }
340
341 static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
342                                 int type)
343 {
344         int i;
345
346         if (mem->page_count == 0)
347                 return 0;
348
349         for (i = pg_start; i < (mem->page_count + pg_start); i++) {
350                 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
351         }
352         readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
353
354         return 0;
355 }
356
357 /*
358  * The i810/i830 requires a physical address to program its mouse
359  * pointer into hardware.
360  * However the Xserver still writes to it through the agp aperture.
361  */
362 static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
363 {
364         struct agp_memory *new;
365         struct page *page;
366
367         switch (pg_count) {
368         case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
369                 break;
370         case 4:
371                 /* kludge to get 4 physical pages for ARGB cursor */
372                 page = i8xx_alloc_pages();
373                 break;
374         default:
375                 return NULL;
376         }
377
378         if (page == NULL)
379                 return NULL;
380
381         new = agp_create_memory(pg_count);
382         if (new == NULL)
383                 return NULL;
384
385         new->pages[0] = page;
386         if (pg_count == 4) {
387                 /* kludge to get 4 physical pages for ARGB cursor */
388                 new->pages[1] = new->pages[0] + 1;
389                 new->pages[2] = new->pages[1] + 1;
390                 new->pages[3] = new->pages[2] + 1;
391         }
392         new->page_count = pg_count;
393         new->num_scratch_pages = pg_count;
394         new->type = AGP_PHYS_MEMORY;
395         new->physical = page_to_phys(new->pages[0]);
396         return new;
397 }
398
399 static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
400 {
401         struct agp_memory *new;
402
403         if (type == AGP_DCACHE_MEMORY) {
404                 if (pg_count != intel_private.num_dcache_entries)
405                         return NULL;
406
407                 new = agp_create_memory(1);
408                 if (new == NULL)
409                         return NULL;
410
411                 new->type = AGP_DCACHE_MEMORY;
412                 new->page_count = pg_count;
413                 new->num_scratch_pages = 0;
414                 agp_free_page_array(new);
415                 return new;
416         }
417         if (type == AGP_PHYS_MEMORY)
418                 return alloc_agpphysmem_i8xx(pg_count, type);
419         return NULL;
420 }
421
422 static void intel_i810_free_by_type(struct agp_memory *curr)
423 {
424         agp_free_key(curr->key);
425         if (curr->type == AGP_PHYS_MEMORY) {
426                 if (curr->page_count == 4)
427                         i8xx_destroy_pages(curr->pages[0]);
428                 else {
429                         agp_bridge->driver->agp_destroy_page(curr->pages[0],
430                                                              AGP_PAGE_DESTROY_UNMAP);
431                         agp_bridge->driver->agp_destroy_page(curr->pages[0],
432                                                              AGP_PAGE_DESTROY_FREE);
433                 }
434                 agp_free_page_array(curr);
435         }
436         kfree(curr);
437 }
438
439 static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
440                                             dma_addr_t addr, int type)
441 {
442         /* Type checking must be done elsewhere */
443         return addr | bridge->driver->masks[type].mask;
444 }
445
446 static int intel_gtt_setup_scratch_page(void)
447 {
448         struct page *page;
449         dma_addr_t dma_addr;
450
451         page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
452         if (page == NULL)
453                 return -ENOMEM;
454         get_page(page);
455         set_pages_uc(page, 1);
456
457         if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
458                 dma_addr = pci_map_page(intel_private.pcidev, page, 0,
459                                     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
460                 if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
461                         return -EINVAL;
462
463                 intel_private.scratch_page_dma = dma_addr;
464         } else
465                 intel_private.scratch_page_dma = page_to_phys(page);
466
467         intel_private.scratch_page = page;
468
469         return 0;
470 }
471
472 static const struct aper_size_info_fixed const intel_fake_agp_sizes[] = {
473         {128, 32768, 5},
474         /* The 64M mode still requires a 128k gatt */
475         {64, 16384, 5},
476         {256, 65536, 6},
477         {512, 131072, 7},
478 };
479
480 static unsigned int intel_gtt_stolen_size(void)
481 {
482         u16 gmch_ctrl;
483         u8 rdct;
484         int local = 0;
485         static const int ddt[4] = { 0, 16, 32, 64 };
486         unsigned int stolen_size = 0;
487
488         pci_read_config_word(intel_private.bridge_dev,
489                              I830_GMCH_CTRL, &gmch_ctrl);
490
491         if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
492             intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
493                 switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
494                 case I830_GMCH_GMS_STOLEN_512:
495                         stolen_size = KB(512);
496                         break;
497                 case I830_GMCH_GMS_STOLEN_1024:
498                         stolen_size = MB(1);
499                         break;
500                 case I830_GMCH_GMS_STOLEN_8192:
501                         stolen_size = MB(8);
502                         break;
503                 case I830_GMCH_GMS_LOCAL:
504                         rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
505                         stolen_size = (I830_RDRAM_ND(rdct) + 1) *
506                                         MB(ddt[I830_RDRAM_DDT(rdct)]);
507                         local = 1;
508                         break;
509                 default:
510                         stolen_size = 0;
511                         break;
512                 }
513         } else if (INTEL_GTT_GEN == 6) {
514                 /*
515                  * SandyBridge has new memory control reg at 0x50.w
516                  */
517                 u16 snb_gmch_ctl;
518                 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
519                 switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
520                 case SNB_GMCH_GMS_STOLEN_32M:
521                         stolen_size = MB(32);
522                         break;
523                 case SNB_GMCH_GMS_STOLEN_64M:
524                         stolen_size = MB(64);
525                         break;
526                 case SNB_GMCH_GMS_STOLEN_96M:
527                         stolen_size = MB(96);
528                         break;
529                 case SNB_GMCH_GMS_STOLEN_128M:
530                         stolen_size = MB(128);
531                         break;
532                 case SNB_GMCH_GMS_STOLEN_160M:
533                         stolen_size = MB(160);
534                         break;
535                 case SNB_GMCH_GMS_STOLEN_192M:
536                         stolen_size = MB(192);
537                         break;
538                 case SNB_GMCH_GMS_STOLEN_224M:
539                         stolen_size = MB(224);
540                         break;
541                 case SNB_GMCH_GMS_STOLEN_256M:
542                         stolen_size = MB(256);
543                         break;
544                 case SNB_GMCH_GMS_STOLEN_288M:
545                         stolen_size = MB(288);
546                         break;
547                 case SNB_GMCH_GMS_STOLEN_320M:
548                         stolen_size = MB(320);
549                         break;
550                 case SNB_GMCH_GMS_STOLEN_352M:
551                         stolen_size = MB(352);
552                         break;
553                 case SNB_GMCH_GMS_STOLEN_384M:
554                         stolen_size = MB(384);
555                         break;
556                 case SNB_GMCH_GMS_STOLEN_416M:
557                         stolen_size = MB(416);
558                         break;
559                 case SNB_GMCH_GMS_STOLEN_448M:
560                         stolen_size = MB(448);
561                         break;
562                 case SNB_GMCH_GMS_STOLEN_480M:
563                         stolen_size = MB(480);
564                         break;
565                 case SNB_GMCH_GMS_STOLEN_512M:
566                         stolen_size = MB(512);
567                         break;
568                 }
569         } else {
570                 switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
571                 case I855_GMCH_GMS_STOLEN_1M:
572                         stolen_size = MB(1);
573                         break;
574                 case I855_GMCH_GMS_STOLEN_4M:
575                         stolen_size = MB(4);
576                         break;
577                 case I855_GMCH_GMS_STOLEN_8M:
578                         stolen_size = MB(8);
579                         break;
580                 case I855_GMCH_GMS_STOLEN_16M:
581                         stolen_size = MB(16);
582                         break;
583                 case I855_GMCH_GMS_STOLEN_32M:
584                         stolen_size = MB(32);
585                         break;
586                 case I915_GMCH_GMS_STOLEN_48M:
587                         stolen_size = MB(48);
588                         break;
589                 case I915_GMCH_GMS_STOLEN_64M:
590                         stolen_size = MB(64);
591                         break;
592                 case G33_GMCH_GMS_STOLEN_128M:
593                         stolen_size = MB(128);
594                         break;
595                 case G33_GMCH_GMS_STOLEN_256M:
596                         stolen_size = MB(256);
597                         break;
598                 case INTEL_GMCH_GMS_STOLEN_96M:
599                         stolen_size = MB(96);
600                         break;
601                 case INTEL_GMCH_GMS_STOLEN_160M:
602                         stolen_size = MB(160);
603                         break;
604                 case INTEL_GMCH_GMS_STOLEN_224M:
605                         stolen_size = MB(224);
606                         break;
607                 case INTEL_GMCH_GMS_STOLEN_352M:
608                         stolen_size = MB(352);
609                         break;
610                 default:
611                         stolen_size = 0;
612                         break;
613                 }
614         }
615
616         if (stolen_size > 0) {
617                 dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
618                        stolen_size / KB(1), local ? "local" : "stolen");
619         } else {
620                 dev_info(&intel_private.bridge_dev->dev,
621                        "no pre-allocated video memory detected\n");
622                 stolen_size = 0;
623         }
624
625         return stolen_size;
626 }
627
628 static void i965_adjust_pgetbl_size(unsigned int size_flag)
629 {
630         u32 pgetbl_ctl, pgetbl_ctl2;
631
632         /* ensure that ppgtt is disabled */
633         pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
634         pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
635         writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
636
637         /* write the new ggtt size */
638         pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
639         pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
640         pgetbl_ctl |= size_flag;
641         writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
642 }
643
644 static unsigned int i965_gtt_total_entries(void)
645 {
646         int size;
647         u32 pgetbl_ctl;
648         u16 gmch_ctl;
649
650         pci_read_config_word(intel_private.bridge_dev,
651                              I830_GMCH_CTRL, &gmch_ctl);
652
653         if (INTEL_GTT_GEN == 5) {
654                 switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
655                 case G4x_GMCH_SIZE_1M:
656                 case G4x_GMCH_SIZE_VT_1M:
657                         i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
658                         break;
659                 case G4x_GMCH_SIZE_VT_1_5M:
660                         i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
661                         break;
662                 case G4x_GMCH_SIZE_2M:
663                 case G4x_GMCH_SIZE_VT_2M:
664                         i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
665                         break;
666                 }
667         }
668
669         pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
670
671         switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
672         case I965_PGETBL_SIZE_128KB:
673                 size = KB(128);
674                 break;
675         case I965_PGETBL_SIZE_256KB:
676                 size = KB(256);
677                 break;
678         case I965_PGETBL_SIZE_512KB:
679                 size = KB(512);
680                 break;
681         /* GTT pagetable sizes bigger than 512KB are not possible on G33! */
682         case I965_PGETBL_SIZE_1MB:
683                 size = KB(1024);
684                 break;
685         case I965_PGETBL_SIZE_2MB:
686                 size = KB(2048);
687                 break;
688         case I965_PGETBL_SIZE_1_5MB:
689                 size = KB(1024 + 512);
690                 break;
691         default:
692                 dev_info(&intel_private.pcidev->dev,
693                          "unknown page table size, assuming 512KB\n");
694                 size = KB(512);
695         }
696
697         return size/4;
698 }
699
700 static unsigned int intel_gtt_total_entries(void)
701 {
702         int size;
703
704         if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
705                 return i965_gtt_total_entries();
706         else if (INTEL_GTT_GEN == 6) {
707                 u16 snb_gmch_ctl;
708
709                 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
710                 switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
711                 default:
712                 case SNB_GTT_SIZE_0M:
713                         printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
714                         size = MB(0);
715                         break;
716                 case SNB_GTT_SIZE_1M:
717                         size = MB(1);
718                         break;
719                 case SNB_GTT_SIZE_2M:
720                         size = MB(2);
721                         break;
722                 }
723                 return size/4;
724         } else {
725                 /* On previous hardware, the GTT size was just what was
726                  * required to map the aperture.
727                  */
728                 return intel_private.base.gtt_mappable_entries;
729         }
730 }
731
732 static unsigned int intel_gtt_mappable_entries(void)
733 {
734         unsigned int aperture_size;
735
736         if (INTEL_GTT_GEN == 2) {
737                 u16 gmch_ctrl;
738
739                 pci_read_config_word(intel_private.bridge_dev,
740                                      I830_GMCH_CTRL, &gmch_ctrl);
741
742                 if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
743                         aperture_size = MB(64);
744                 else
745                         aperture_size = MB(128);
746         } else {
747                 /* 9xx supports large sizes, just look at the length */
748                 aperture_size = pci_resource_len(intel_private.pcidev, 2);
749         }
750
751         return aperture_size >> PAGE_SHIFT;
752 }
753
754 static void intel_gtt_teardown_scratch_page(void)
755 {
756         set_pages_wb(intel_private.scratch_page, 1);
757         pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
758                        PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
759         put_page(intel_private.scratch_page);
760         __free_page(intel_private.scratch_page);
761 }
762
763 static void intel_gtt_cleanup(void)
764 {
765         intel_private.driver->cleanup();
766
767         iounmap(intel_private.gtt);
768         iounmap(intel_private.registers);
769         
770         intel_gtt_teardown_scratch_page();
771 }
772
773 static int intel_gtt_init(void)
774 {
775         u32 gtt_map_size;
776         int ret;
777
778         ret = intel_private.driver->setup();
779         if (ret != 0)
780                 return ret;
781
782         intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
783         intel_private.base.gtt_total_entries = intel_gtt_total_entries();
784
785         /* save the PGETBL reg for resume */
786         intel_private.PGETBL_save =
787                 readl(intel_private.registers+I810_PGETBL_CTL)
788                         & ~I810_PGETBL_ENABLED;
789         /* we only ever restore the register when enabling the PGTBL... */
790         if (HAS_PGTBL_EN)
791                 intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
792
793         dev_info(&intel_private.bridge_dev->dev,
794                         "detected gtt size: %dK total, %dK mappable\n",
795                         intel_private.base.gtt_total_entries * 4,
796                         intel_private.base.gtt_mappable_entries * 4);
797
798         gtt_map_size = intel_private.base.gtt_total_entries * 4;
799
800         intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
801                                     gtt_map_size);
802         if (!intel_private.gtt) {
803                 intel_private.driver->cleanup();
804                 iounmap(intel_private.registers);
805                 return -ENOMEM;
806         }
807
808         global_cache_flush();   /* FIXME: ? */
809
810         /* we have to call this as early as possible after the MMIO base address is known */
811         intel_private.base.stolen_size = intel_gtt_stolen_size();
812         if (intel_private.base.stolen_size == 0) {
813                 intel_private.driver->cleanup();
814                 iounmap(intel_private.registers);
815                 iounmap(intel_private.gtt);
816                 return -ENOMEM;
817         }
818
819         ret = intel_gtt_setup_scratch_page();
820         if (ret != 0) {
821                 intel_gtt_cleanup();
822                 return ret;
823         }
824
825         return 0;
826 }
827
828 static int intel_fake_agp_fetch_size(void)
829 {
830         int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
831         unsigned int aper_size;
832         int i;
833
834         aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT)
835                     / MB(1);
836
837         for (i = 0; i < num_sizes; i++) {
838                 if (aper_size == intel_fake_agp_sizes[i].size) {
839                         agp_bridge->current_size =
840                                 (void *) (intel_fake_agp_sizes + i);
841                         return aper_size;
842                 }
843         }
844
845         return 0;
846 }
847
848 static void i830_cleanup(void)
849 {
850         kunmap(intel_private.i8xx_page);
851         intel_private.i8xx_flush_page = NULL;
852
853         __free_page(intel_private.i8xx_page);
854         intel_private.i8xx_page = NULL;
855 }
856
857 static void intel_i830_setup_flush(void)
858 {
859         /* return if we've already set the flush mechanism up */
860         if (intel_private.i8xx_page)
861                 return;
862
863         intel_private.i8xx_page = alloc_page(GFP_KERNEL);
864         if (!intel_private.i8xx_page)
865                 return;
866
867         intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
868         if (!intel_private.i8xx_flush_page)
869                 i830_cleanup();
870 }
871
872 /* The chipset_flush interface needs to get data that has already been
873  * flushed out of the CPU all the way out to main memory, because the GPU
874  * doesn't snoop those buffers.
875  *
876  * The 8xx series doesn't have the same lovely interface for flushing the
877  * chipset write buffers that the later chips do. According to the 865
878  * specs, it's 64 octwords, or 1KB.  So, to get those previous things in
879  * that buffer out, we just fill 1KB and clflush it out, on the assumption
880  * that it'll push whatever was in there out.  It appears to work.
881  */
882 static void i830_chipset_flush(void)
883 {
884         unsigned int *pg = intel_private.i8xx_flush_page;
885
886         memset(pg, 0, 1024);
887
888         if (cpu_has_clflush)
889                 clflush_cache_range(pg, 1024);
890         else if (wbinvd_on_all_cpus() != 0)
891                 printk(KERN_ERR "Timed out waiting for cache flush.\n");
892 }
893
894 static void i830_write_entry(dma_addr_t addr, unsigned int entry,
895                              unsigned int flags)
896 {
897         u32 pte_flags = I810_PTE_VALID;
898         
899         switch (flags) {
900         case AGP_DCACHE_MEMORY:
901                 pte_flags |= I810_PTE_LOCAL;
902                 break;
903         case AGP_USER_CACHED_MEMORY:
904                 pte_flags |= I830_PTE_SYSTEM_CACHED;
905                 break;
906         }
907
908         writel(addr | pte_flags, intel_private.gtt + entry);
909 }
910
911 static bool intel_enable_gtt(void)
912 {
913         u32 gma_addr;
914         u8 __iomem *reg;
915
916         if (INTEL_GTT_GEN == 2)
917                 pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
918                                       &gma_addr);
919         else
920                 pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
921                                       &gma_addr);
922
923         intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
924
925         if (INTEL_GTT_GEN >= 6)
926             return true;
927
928         if (INTEL_GTT_GEN == 2) {
929                 u16 gmch_ctrl;
930
931                 pci_read_config_word(intel_private.bridge_dev,
932                                      I830_GMCH_CTRL, &gmch_ctrl);
933                 gmch_ctrl |= I830_GMCH_ENABLED;
934                 pci_write_config_word(intel_private.bridge_dev,
935                                       I830_GMCH_CTRL, gmch_ctrl);
936
937                 pci_read_config_word(intel_private.bridge_dev,
938                                      I830_GMCH_CTRL, &gmch_ctrl);
939                 if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
940                         dev_err(&intel_private.pcidev->dev,
941                                 "failed to enable the GTT: GMCH_CTRL=%x\n",
942                                 gmch_ctrl);
943                         return false;
944                 }
945         }
946
947         reg = intel_private.registers+I810_PGETBL_CTL;
948         writel(intel_private.PGETBL_save, reg);
949         if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
950                 dev_err(&intel_private.pcidev->dev,
951                         "failed to enable the GTT: PGETBL=%x [expected %x]\n",
952                         readl(reg), intel_private.PGETBL_save);
953                 return false;
954         }
955
956         return true;
957 }
958
959 static int i830_setup(void)
960 {
961         u32 reg_addr;
962
963         pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &reg_addr);
964         reg_addr &= 0xfff80000;
965
966         intel_private.registers = ioremap(reg_addr, KB(64));
967         if (!intel_private.registers)
968                 return -ENOMEM;
969
970         intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
971
972         intel_i830_setup_flush();
973
974         return 0;
975 }
976
977 static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
978 {
979         agp_bridge->gatt_table_real = NULL;
980         agp_bridge->gatt_table = NULL;
981         agp_bridge->gatt_bus_addr = 0;
982
983         return 0;
984 }
985
986 static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
987 {
988         return 0;
989 }
990
991 static int intel_fake_agp_configure(void)
992 {
993         int i;
994
995         if (!intel_enable_gtt())
996             return -EIO;
997
998         agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
999
1000         for (i = 0; i < intel_private.base.gtt_total_entries; i++) {
1001                 intel_private.driver->write_entry(intel_private.scratch_page_dma,
1002                                                   i, 0);
1003         }
1004         readl(intel_private.gtt+i-1);   /* PCI Posting. */
1005
1006         global_cache_flush();
1007
1008         return 0;
1009 }
1010
1011 static bool i830_check_flags(unsigned int flags)
1012 {
1013         switch (flags) {
1014         case 0:
1015         case AGP_PHYS_MEMORY:
1016         case AGP_USER_CACHED_MEMORY:
1017         case AGP_USER_MEMORY:
1018                 return true;
1019         }
1020
1021         return false;
1022 }
1023
1024 static void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
1025                                         unsigned int sg_len,
1026                                         unsigned int pg_start,
1027                                         unsigned int flags)
1028 {
1029         struct scatterlist *sg;
1030         unsigned int len, m;
1031         int i, j;
1032
1033         j = pg_start;
1034
1035         /* sg may merge pages, but we have to separate
1036          * per-page addr for GTT */
1037         for_each_sg(sg_list, sg, sg_len, i) {
1038                 len = sg_dma_len(sg) >> PAGE_SHIFT;
1039                 for (m = 0; m < len; m++) {
1040                         dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
1041                         intel_private.driver->write_entry(addr,
1042                                                           j, flags);
1043                         j++;
1044                 }
1045         }
1046         readl(intel_private.gtt+j-1);
1047 }
1048
1049 static int intel_fake_agp_insert_entries(struct agp_memory *mem,
1050                                          off_t pg_start, int type)
1051 {
1052         int i, j;
1053         int ret = -EINVAL;
1054
1055         if (mem->page_count == 0)
1056                 goto out;
1057
1058         if (pg_start + mem->page_count > intel_private.base.gtt_total_entries)
1059                 goto out_err;
1060
1061         if (type != mem->type)
1062                 goto out_err;
1063
1064         if (!intel_private.driver->check_flags(type))
1065                 goto out_err;
1066
1067         if (!mem->is_flushed)
1068                 global_cache_flush();
1069
1070         if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
1071                 ret = intel_agp_map_memory(mem);
1072                 if (ret != 0)
1073                         return ret;
1074
1075                 intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg,
1076                                             pg_start, type);
1077         } else {
1078                 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
1079                         dma_addr_t addr = page_to_phys(mem->pages[i]);
1080                         intel_private.driver->write_entry(addr,
1081                                                           j, type);
1082                 }
1083                 readl(intel_private.gtt+j-1);
1084         }
1085
1086 out:
1087         ret = 0;
1088 out_err:
1089         mem->is_flushed = true;
1090         return ret;
1091 }
1092
1093 static int intel_fake_agp_remove_entries(struct agp_memory *mem,
1094                                          off_t pg_start, int type)
1095 {
1096         int i;
1097
1098         if (mem->page_count == 0)
1099                 return 0;
1100
1101         if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2)
1102                 intel_agp_unmap_memory(mem);
1103
1104         for (i = pg_start; i < (mem->page_count + pg_start); i++) {
1105                 intel_private.driver->write_entry(intel_private.scratch_page_dma,
1106                                                   i, 0);
1107         }
1108         readl(intel_private.gtt+i-1);
1109
1110         return 0;
1111 }
1112
1113 static void intel_fake_agp_chipset_flush(struct agp_bridge_data *bridge)
1114 {
1115         intel_private.driver->chipset_flush();
1116 }
1117
1118 static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
1119                                                        int type)
1120 {
1121         if (type == AGP_PHYS_MEMORY)
1122                 return alloc_agpphysmem_i8xx(pg_count, type);
1123         /* always return NULL for other allocation types for now */
1124         return NULL;
1125 }
1126
1127 static int intel_alloc_chipset_flush_resource(void)
1128 {
1129         int ret;
1130         ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
1131                                      PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
1132                                      pcibios_align_resource, intel_private.bridge_dev);
1133
1134         return ret;
1135 }
1136
1137 static void intel_i915_setup_chipset_flush(void)
1138 {
1139         int ret;
1140         u32 temp;
1141
1142         pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
1143         if (!(temp & 0x1)) {
1144                 intel_alloc_chipset_flush_resource();
1145                 intel_private.resource_valid = 1;
1146                 pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1147         } else {
1148                 temp &= ~1;
1149
1150                 intel_private.resource_valid = 1;
1151                 intel_private.ifp_resource.start = temp;
1152                 intel_private.ifp_resource.end = temp + PAGE_SIZE;
1153                 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1154                 /* some BIOSes reserve this area in a pnp some don't */
1155                 if (ret)
1156                         intel_private.resource_valid = 0;
1157         }
1158 }
1159
1160 static void intel_i965_g33_setup_chipset_flush(void)
1161 {
1162         u32 temp_hi, temp_lo;
1163         int ret;
1164
1165         pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi);
1166         pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo);
1167
1168         if (!(temp_lo & 0x1)) {
1169
1170                 intel_alloc_chipset_flush_resource();
1171
1172                 intel_private.resource_valid = 1;
1173                 pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4,
1174                         upper_32_bits(intel_private.ifp_resource.start));
1175                 pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1176         } else {
1177                 u64 l64;
1178
1179                 temp_lo &= ~0x1;
1180                 l64 = ((u64)temp_hi << 32) | temp_lo;
1181
1182                 intel_private.resource_valid = 1;
1183                 intel_private.ifp_resource.start = l64;
1184                 intel_private.ifp_resource.end = l64 + PAGE_SIZE;
1185                 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1186                 /* some BIOSes reserve this area in a pnp some don't */
1187                 if (ret)
1188                         intel_private.resource_valid = 0;
1189         }
1190 }
1191
1192 static void intel_i9xx_setup_flush(void)
1193 {
1194         /* return if already configured */
1195         if (intel_private.ifp_resource.start)
1196                 return;
1197
1198         if (INTEL_GTT_GEN == 6)
1199                 return;
1200
1201         /* setup a resource for this object */
1202         intel_private.ifp_resource.name = "Intel Flush Page";
1203         intel_private.ifp_resource.flags = IORESOURCE_MEM;
1204
1205         /* Setup chipset flush for 915 */
1206         if (IS_G33 || INTEL_GTT_GEN >= 4) {
1207                 intel_i965_g33_setup_chipset_flush();
1208         } else {
1209                 intel_i915_setup_chipset_flush();
1210         }
1211
1212         if (intel_private.ifp_resource.start)
1213                 intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
1214         if (!intel_private.i9xx_flush_page)
1215                 dev_err(&intel_private.pcidev->dev,
1216                         "can't ioremap flush page - no chipset flushing\n");
1217 }
1218
1219 static void i9xx_cleanup(void)
1220 {
1221         if (intel_private.i9xx_flush_page)
1222                 iounmap(intel_private.i9xx_flush_page);
1223         if (intel_private.resource_valid)
1224                 release_resource(&intel_private.ifp_resource);
1225         intel_private.ifp_resource.start = 0;
1226         intel_private.resource_valid = 0;
1227 }
1228
1229 static void i9xx_chipset_flush(void)
1230 {
1231         if (intel_private.i9xx_flush_page)
1232                 writel(1, intel_private.i9xx_flush_page);
1233 }
1234
1235 static void i965_write_entry(dma_addr_t addr, unsigned int entry,
1236                              unsigned int flags)
1237 {
1238         /* Shift high bits down */
1239         addr |= (addr >> 28) & 0xf0;
1240         writel(addr | I810_PTE_VALID, intel_private.gtt + entry);
1241 }
1242
1243 static bool gen6_check_flags(unsigned int flags)
1244 {
1245         return true;
1246 }
1247
1248 static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
1249                              unsigned int flags)
1250 {
1251         unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
1252         unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
1253         u32 pte_flags;
1254
1255         if (type_mask == AGP_USER_MEMORY)
1256                 pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
1257         else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
1258                 pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
1259                 if (gfdt)
1260                         pte_flags |= GEN6_PTE_GFDT;
1261         } else { /* set 'normal'/'cached' to LLC by default */
1262                 pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
1263                 if (gfdt)
1264                         pte_flags |= GEN6_PTE_GFDT;
1265         }
1266
1267         /* gen6 has bit11-4 for physical addr bit39-32 */
1268         addr |= (addr >> 28) & 0xff0;
1269         writel(addr | pte_flags, intel_private.gtt + entry);
1270 }
1271
1272 static void gen6_cleanup(void)
1273 {
1274 }
1275
1276 static int i9xx_setup(void)
1277 {
1278         u32 reg_addr;
1279
1280         pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
1281
1282         reg_addr &= 0xfff80000;
1283
1284         intel_private.registers = ioremap(reg_addr, 128 * 4096);
1285         if (!intel_private.registers)
1286                 return -ENOMEM;
1287
1288         if (INTEL_GTT_GEN == 3) {
1289                 u32 gtt_addr;
1290
1291                 pci_read_config_dword(intel_private.pcidev,
1292                                       I915_PTEADDR, &gtt_addr);
1293                 intel_private.gtt_bus_addr = gtt_addr;
1294         } else {
1295                 u32 gtt_offset;
1296
1297                 switch (INTEL_GTT_GEN) {
1298                 case 5:
1299                 case 6:
1300                         gtt_offset = MB(2);
1301                         break;
1302                 case 4:
1303                 default:
1304                         gtt_offset =  KB(512);
1305                         break;
1306                 }
1307                 intel_private.gtt_bus_addr = reg_addr + gtt_offset;
1308         }
1309
1310         intel_i9xx_setup_flush();
1311
1312         return 0;
1313 }
1314
1315 static const struct agp_bridge_driver intel_810_driver = {
1316         .owner                  = THIS_MODULE,
1317         .aperture_sizes         = intel_i810_sizes,
1318         .size_type              = FIXED_APER_SIZE,
1319         .num_aperture_sizes     = 2,
1320         .needs_scratch_page     = true,
1321         .configure              = intel_i810_configure,
1322         .fetch_size             = intel_i810_fetch_size,
1323         .cleanup                = intel_i810_cleanup,
1324         .mask_memory            = intel_i810_mask_memory,
1325         .masks                  = intel_i810_masks,
1326         .agp_enable             = intel_fake_agp_enable,
1327         .cache_flush            = global_cache_flush,
1328         .create_gatt_table      = agp_generic_create_gatt_table,
1329         .free_gatt_table        = agp_generic_free_gatt_table,
1330         .insert_memory          = intel_i810_insert_entries,
1331         .remove_memory          = intel_i810_remove_entries,
1332         .alloc_by_type          = intel_i810_alloc_by_type,
1333         .free_by_type           = intel_i810_free_by_type,
1334         .agp_alloc_page         = agp_generic_alloc_page,
1335         .agp_alloc_pages        = agp_generic_alloc_pages,
1336         .agp_destroy_page       = agp_generic_destroy_page,
1337         .agp_destroy_pages      = agp_generic_destroy_pages,
1338         .agp_type_to_mask_type  = agp_generic_type_to_mask_type,
1339 };
1340
1341 static const struct agp_bridge_driver intel_fake_agp_driver = {
1342         .owner                  = THIS_MODULE,
1343         .size_type              = FIXED_APER_SIZE,
1344         .aperture_sizes         = intel_fake_agp_sizes,
1345         .num_aperture_sizes     = ARRAY_SIZE(intel_fake_agp_sizes),
1346         .configure              = intel_fake_agp_configure,
1347         .fetch_size             = intel_fake_agp_fetch_size,
1348         .cleanup                = intel_gtt_cleanup,
1349         .agp_enable             = intel_fake_agp_enable,
1350         .cache_flush            = global_cache_flush,
1351         .create_gatt_table      = intel_fake_agp_create_gatt_table,
1352         .free_gatt_table        = intel_fake_agp_free_gatt_table,
1353         .insert_memory          = intel_fake_agp_insert_entries,
1354         .remove_memory          = intel_fake_agp_remove_entries,
1355         .alloc_by_type          = intel_fake_agp_alloc_by_type,
1356         .free_by_type           = intel_i810_free_by_type,
1357         .agp_alloc_page         = agp_generic_alloc_page,
1358         .agp_alloc_pages        = agp_generic_alloc_pages,
1359         .agp_destroy_page       = agp_generic_destroy_page,
1360         .agp_destroy_pages      = agp_generic_destroy_pages,
1361         .chipset_flush          = intel_fake_agp_chipset_flush,
1362 };
1363
1364 static const struct intel_gtt_driver i81x_gtt_driver = {
1365         .gen = 1,
1366         .dma_mask_size = 32,
1367 };
1368 static const struct intel_gtt_driver i8xx_gtt_driver = {
1369         .gen = 2,
1370         .has_pgtbl_enable = 1,
1371         .setup = i830_setup,
1372         .cleanup = i830_cleanup,
1373         .write_entry = i830_write_entry,
1374         .dma_mask_size = 32,
1375         .check_flags = i830_check_flags,
1376         .chipset_flush = i830_chipset_flush,
1377 };
1378 static const struct intel_gtt_driver i915_gtt_driver = {
1379         .gen = 3,
1380         .has_pgtbl_enable = 1,
1381         .setup = i9xx_setup,
1382         .cleanup = i9xx_cleanup,
1383         /* i945 is the last gpu to need phys mem (for overlay and cursors). */
1384         .write_entry = i830_write_entry, 
1385         .dma_mask_size = 32,
1386         .check_flags = i830_check_flags,
1387         .chipset_flush = i9xx_chipset_flush,
1388 };
1389 static const struct intel_gtt_driver g33_gtt_driver = {
1390         .gen = 3,
1391         .is_g33 = 1,
1392         .setup = i9xx_setup,
1393         .cleanup = i9xx_cleanup,
1394         .write_entry = i965_write_entry,
1395         .dma_mask_size = 36,
1396         .check_flags = i830_check_flags,
1397         .chipset_flush = i9xx_chipset_flush,
1398 };
1399 static const struct intel_gtt_driver pineview_gtt_driver = {
1400         .gen = 3,
1401         .is_pineview = 1, .is_g33 = 1,
1402         .setup = i9xx_setup,
1403         .cleanup = i9xx_cleanup,
1404         .write_entry = i965_write_entry,
1405         .dma_mask_size = 36,
1406         .check_flags = i830_check_flags,
1407         .chipset_flush = i9xx_chipset_flush,
1408 };
1409 static const struct intel_gtt_driver i965_gtt_driver = {
1410         .gen = 4,
1411         .has_pgtbl_enable = 1,
1412         .setup = i9xx_setup,
1413         .cleanup = i9xx_cleanup,
1414         .write_entry = i965_write_entry,
1415         .dma_mask_size = 36,
1416         .check_flags = i830_check_flags,
1417         .chipset_flush = i9xx_chipset_flush,
1418 };
1419 static const struct intel_gtt_driver g4x_gtt_driver = {
1420         .gen = 5,
1421         .setup = i9xx_setup,
1422         .cleanup = i9xx_cleanup,
1423         .write_entry = i965_write_entry,
1424         .dma_mask_size = 36,
1425         .check_flags = i830_check_flags,
1426         .chipset_flush = i9xx_chipset_flush,
1427 };
1428 static const struct intel_gtt_driver ironlake_gtt_driver = {
1429         .gen = 5,
1430         .is_ironlake = 1,
1431         .setup = i9xx_setup,
1432         .cleanup = i9xx_cleanup,
1433         .write_entry = i965_write_entry,
1434         .dma_mask_size = 36,
1435         .check_flags = i830_check_flags,
1436         .chipset_flush = i9xx_chipset_flush,
1437 };
1438 static const struct intel_gtt_driver sandybridge_gtt_driver = {
1439         .gen = 6,
1440         .setup = i9xx_setup,
1441         .cleanup = gen6_cleanup,
1442         .write_entry = gen6_write_entry,
1443         .dma_mask_size = 40,
1444         .check_flags = gen6_check_flags,
1445         .chipset_flush = i9xx_chipset_flush,
1446 };
1447
1448 /* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
1449  * driver and gmch_driver must be non-null, and find_gmch will determine
1450  * which one should be used if a gmch_chip_id is present.
1451  */
1452 static const struct intel_gtt_driver_description {
1453         unsigned int gmch_chip_id;
1454         char *name;
1455         const struct agp_bridge_driver *gmch_driver;
1456         const struct intel_gtt_driver *gtt_driver;
1457 } intel_gtt_chipsets[] = {
1458         { PCI_DEVICE_ID_INTEL_82810_IG1, "i810", &intel_810_driver,
1459                 &i81x_gtt_driver},
1460         { PCI_DEVICE_ID_INTEL_82810_IG3, "i810", &intel_810_driver,
1461                 &i81x_gtt_driver},
1462         { PCI_DEVICE_ID_INTEL_82810E_IG, "i810", &intel_810_driver,
1463                 &i81x_gtt_driver},
1464         { PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &intel_810_driver,
1465                 &i81x_gtt_driver},
1466         { PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
1467                 &intel_fake_agp_driver, &i8xx_gtt_driver},
1468         { PCI_DEVICE_ID_INTEL_82845G_IG, "830M",
1469                 &intel_fake_agp_driver, &i8xx_gtt_driver},
1470         { PCI_DEVICE_ID_INTEL_82854_IG, "854",
1471                 &intel_fake_agp_driver, &i8xx_gtt_driver},
1472         { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
1473                 &intel_fake_agp_driver, &i8xx_gtt_driver},
1474         { PCI_DEVICE_ID_INTEL_82865_IG, "865",
1475                 &intel_fake_agp_driver, &i8xx_gtt_driver},
1476         { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
1477                 &intel_fake_agp_driver, &i915_gtt_driver },
1478         { PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
1479                 &intel_fake_agp_driver, &i915_gtt_driver },
1480         { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
1481                 &intel_fake_agp_driver, &i915_gtt_driver },
1482         { PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
1483                 &intel_fake_agp_driver, &i915_gtt_driver },
1484         { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
1485                 &intel_fake_agp_driver, &i915_gtt_driver },
1486         { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
1487                 &intel_fake_agp_driver, &i915_gtt_driver },
1488         { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
1489                 &intel_fake_agp_driver, &i965_gtt_driver },
1490         { PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
1491                 &intel_fake_agp_driver, &i965_gtt_driver },
1492         { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
1493                 &intel_fake_agp_driver, &i965_gtt_driver },
1494         { PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
1495                 &intel_fake_agp_driver, &i965_gtt_driver },
1496         { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
1497                 &intel_fake_agp_driver, &i965_gtt_driver },
1498         { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
1499                 &intel_fake_agp_driver, &i965_gtt_driver },
1500         { PCI_DEVICE_ID_INTEL_G33_IG, "G33",
1501                 &intel_fake_agp_driver, &g33_gtt_driver },
1502         { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
1503                 &intel_fake_agp_driver, &g33_gtt_driver },
1504         { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
1505                 &intel_fake_agp_driver, &g33_gtt_driver },
1506         { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
1507                 &intel_fake_agp_driver, &pineview_gtt_driver },
1508         { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
1509                 &intel_fake_agp_driver, &pineview_gtt_driver },
1510         { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
1511                 &intel_fake_agp_driver, &g4x_gtt_driver },
1512         { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
1513                 &intel_fake_agp_driver, &g4x_gtt_driver },
1514         { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
1515                 &intel_fake_agp_driver, &g4x_gtt_driver },
1516         { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
1517                 &intel_fake_agp_driver, &g4x_gtt_driver },
1518         { PCI_DEVICE_ID_INTEL_B43_IG, "B43",
1519                 &intel_fake_agp_driver, &g4x_gtt_driver },
1520         { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
1521                 &intel_fake_agp_driver, &g4x_gtt_driver },
1522         { PCI_DEVICE_ID_INTEL_G41_IG, "G41",
1523                 &intel_fake_agp_driver, &g4x_gtt_driver },
1524         { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
1525             "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver },
1526         { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
1527             "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver },
1528         { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
1529             "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
1530         { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
1531             "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
1532         { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
1533             "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
1534         { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
1535             "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
1536         { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
1537             "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
1538         { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
1539             "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
1540         { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
1541             "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
1542         { 0, NULL, NULL }
1543 };
1544
1545 static int find_gmch(u16 device)
1546 {
1547         struct pci_dev *gmch_device;
1548
1549         gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
1550         if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
1551                 gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
1552                                              device, gmch_device);
1553         }
1554
1555         if (!gmch_device)
1556                 return 0;
1557
1558         intel_private.pcidev = gmch_device;
1559         return 1;
1560 }
1561
1562 int intel_gmch_probe(struct pci_dev *pdev,
1563                                       struct agp_bridge_data *bridge)
1564 {
1565         int i, mask;
1566         bridge->driver = NULL;
1567
1568         for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
1569                 if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
1570                         bridge->driver =
1571                                 intel_gtt_chipsets[i].gmch_driver;
1572                         intel_private.driver = 
1573                                 intel_gtt_chipsets[i].gtt_driver;
1574                         break;
1575                 }
1576         }
1577
1578         if (!bridge->driver)
1579                 return 0;
1580
1581         bridge->dev_private_data = &intel_private;
1582         bridge->dev = pdev;
1583
1584         intel_private.bridge_dev = pci_dev_get(pdev);
1585
1586         dev_info(&pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
1587
1588         mask = intel_private.driver->dma_mask_size;
1589         if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
1590                 dev_err(&intel_private.pcidev->dev,
1591                         "set gfx device dma mask %d-bit failed!\n", mask);
1592         else
1593                 pci_set_consistent_dma_mask(intel_private.pcidev,
1594                                             DMA_BIT_MASK(mask));
1595
1596         if (bridge->driver == &intel_810_driver)
1597                 return 1;
1598
1599         if (intel_gtt_init() != 0)
1600                 return 0;
1601
1602         return 1;
1603 }
1604 EXPORT_SYMBOL(intel_gmch_probe);
1605
1606 const struct intel_gtt *intel_gtt_get(void)
1607 {
1608         return &intel_private.base;
1609 }
1610 EXPORT_SYMBOL(intel_gtt_get);
1611
1612 void intel_gmch_remove(struct pci_dev *pdev)
1613 {
1614         if (intel_private.pcidev)
1615                 pci_dev_put(intel_private.pcidev);
1616         if (intel_private.bridge_dev)
1617                 pci_dev_put(intel_private.bridge_dev);
1618 }
1619 EXPORT_SYMBOL(intel_gmch_remove);
1620
1621 MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
1622 MODULE_LICENSE("GPL and additional rights");