intel-gtt.c 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838
  1. /*
  2. * Intel GTT (Graphics Translation Table) routines
  3. *
  4. * Caveat: This driver implements the linux agp interface, but this is far from
  5. * a agp driver! GTT support ended up here for purely historical reasons: The
  6. * old userspace intel graphics drivers needed an interface to map memory into
  7. * the GTT. And the drm provides a default interface for graphic devices sitting
  8. * on an agp port. So it made sense to fake the GTT support as an agp port to
  9. * avoid having to create a new api.
  10. *
  11. * With gem this does not make much sense anymore, just needlessly complicates
  12. * the code. But as long as the old graphics stack is still support, it's stuck
  13. * here.
  14. *
  15. * /fairy-tale-mode off
  16. */
  17. #include <linux/module.h>
  18. #include <linux/pci.h>
  19. #include <linux/init.h>
  20. #include <linux/kernel.h>
  21. #include <linux/pagemap.h>
  22. #include <linux/agp_backend.h>
  23. #include <asm/smp.h>
  24. #include "agp.h"
  25. #include "intel-agp.h"
  26. #include <linux/intel-gtt.h>
  27. #include <drm/intel-gtt.h>
  28. /*
  29. * If we have Intel graphics, we're not going to have anything other than
  30. * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
  31. * on the Intel IOMMU support (CONFIG_DMAR).
  32. * Only newer chipsets need to bother with this, of course.
  33. */
  34. #ifdef CONFIG_DMAR
  35. #define USE_PCI_DMA_API 1
  36. #else
  37. #define USE_PCI_DMA_API 0
  38. #endif
  39. /* Max amount of stolen space, anything above will be returned to Linux */
  40. int intel_max_stolen = 32 * 1024 * 1024;
  41. EXPORT_SYMBOL(intel_max_stolen);
  42. static const struct aper_size_info_fixed intel_i810_sizes[] =
  43. {
  44. {64, 16384, 4},
  45. /* The 32M mode still requires a 64k gatt */
  46. {32, 8192, 4}
  47. };
  48. #define AGP_DCACHE_MEMORY 1
  49. #define AGP_PHYS_MEMORY 2
  50. #define INTEL_AGP_CACHED_MEMORY 3
  51. static struct gatt_mask intel_i810_masks[] =
  52. {
  53. {.mask = I810_PTE_VALID, .type = 0},
  54. {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
  55. {.mask = I810_PTE_VALID, .type = 0},
  56. {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
  57. .type = INTEL_AGP_CACHED_MEMORY}
  58. };
  59. #define INTEL_AGP_UNCACHED_MEMORY 0
  60. #define INTEL_AGP_CACHED_MEMORY_LLC 1
  61. #define INTEL_AGP_CACHED_MEMORY_LLC_GFDT 2
  62. #define INTEL_AGP_CACHED_MEMORY_LLC_MLC 3
  63. #define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT 4
  64. static struct gatt_mask intel_gen6_masks[] =
  65. {
  66. {.mask = I810_PTE_VALID | GEN6_PTE_UNCACHED,
  67. .type = INTEL_AGP_UNCACHED_MEMORY },
  68. {.mask = I810_PTE_VALID | GEN6_PTE_LLC,
  69. .type = INTEL_AGP_CACHED_MEMORY_LLC },
  70. {.mask = I810_PTE_VALID | GEN6_PTE_LLC | GEN6_PTE_GFDT,
  71. .type = INTEL_AGP_CACHED_MEMORY_LLC_GFDT },
  72. {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC,
  73. .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC },
  74. {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC | GEN6_PTE_GFDT,
  75. .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT },
  76. };
  77. struct intel_gtt_driver {
  78. unsigned int gen : 8;
  79. unsigned int is_g33 : 1;
  80. unsigned int is_pineview : 1;
  81. unsigned int is_ironlake : 1;
  82. /* Chipset specific GTT setup */
  83. int (*setup)(void);
  84. void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
  85. /* Flags is a more or less chipset specific opaque value.
  86. * For chipsets that need to support old ums (non-gem) code, this
  87. * needs to be identical to the various supported agp memory types! */
  88. bool (*check_flags)(unsigned int flags);
  89. };
  90. static struct _intel_private {
  91. struct intel_gtt base;
  92. const struct intel_gtt_driver *driver;
  93. struct pci_dev *pcidev; /* device one */
  94. struct pci_dev *bridge_dev;
  95. u8 __iomem *registers;
  96. phys_addr_t gtt_bus_addr;
  97. phys_addr_t gma_bus_addr;
  98. phys_addr_t pte_bus_addr;
  99. u32 __iomem *gtt; /* I915G */
  100. int num_dcache_entries;
  101. union {
  102. void __iomem *i9xx_flush_page;
  103. void *i8xx_flush_page;
  104. };
  105. struct page *i8xx_page;
  106. struct resource ifp_resource;
  107. int resource_valid;
  108. struct page *scratch_page;
  109. dma_addr_t scratch_page_dma;
  110. } intel_private;
  111. #define INTEL_GTT_GEN intel_private.driver->gen
  112. #define IS_G33 intel_private.driver->is_g33
  113. #define IS_PINEVIEW intel_private.driver->is_pineview
  114. #define IS_IRONLAKE intel_private.driver->is_ironlake
  115. static void intel_agp_free_sglist(struct agp_memory *mem)
  116. {
  117. struct sg_table st;
  118. st.sgl = mem->sg_list;
  119. st.orig_nents = st.nents = mem->page_count;
  120. sg_free_table(&st);
  121. mem->sg_list = NULL;
  122. mem->num_sg = 0;
  123. }
  124. static int intel_agp_map_memory(struct agp_memory *mem)
  125. {
  126. struct sg_table st;
  127. struct scatterlist *sg;
  128. int i;
  129. if (mem->sg_list)
  130. return 0; /* already mapped (for e.g. resume */
  131. DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
  132. if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
  133. goto err;
  134. mem->sg_list = sg = st.sgl;
  135. for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
  136. sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
  137. mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
  138. mem->page_count, PCI_DMA_BIDIRECTIONAL);
  139. if (unlikely(!mem->num_sg))
  140. goto err;
  141. return 0;
  142. err:
  143. sg_free_table(&st);
  144. return -ENOMEM;
  145. }
  146. static void intel_agp_unmap_memory(struct agp_memory *mem)
  147. {
  148. DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
  149. pci_unmap_sg(intel_private.pcidev, mem->sg_list,
  150. mem->page_count, PCI_DMA_BIDIRECTIONAL);
  151. intel_agp_free_sglist(mem);
  152. }
  153. #if USE_PCI_DMA_API
  154. static void intel_agp_insert_sg_entries(struct agp_memory *mem,
  155. off_t pg_start, int mask_type)
  156. {
  157. struct scatterlist *sg;
  158. int i, j;
  159. j = pg_start;
  160. WARN_ON(!mem->num_sg);
  161. if (mem->num_sg == mem->page_count) {
  162. for_each_sg(mem->sg_list, sg, mem->page_count, i) {
  163. writel(agp_bridge->driver->mask_memory(agp_bridge,
  164. sg_dma_address(sg), mask_type),
  165. intel_private.gtt+j);
  166. j++;
  167. }
  168. } else {
  169. /* sg may merge pages, but we have to separate
  170. * per-page addr for GTT */
  171. unsigned int len, m;
  172. for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
  173. len = sg_dma_len(sg) / PAGE_SIZE;
  174. for (m = 0; m < len; m++) {
  175. writel(agp_bridge->driver->mask_memory(agp_bridge,
  176. sg_dma_address(sg) + m * PAGE_SIZE,
  177. mask_type),
  178. intel_private.gtt+j);
  179. j++;
  180. }
  181. }
  182. }
  183. readl(intel_private.gtt+j-1);
  184. }
  185. #else
  186. static void intel_agp_insert_sg_entries(struct agp_memory *mem,
  187. off_t pg_start, int mask_type)
  188. {
  189. int i, j;
  190. for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
  191. writel(agp_bridge->driver->mask_memory(agp_bridge,
  192. page_to_phys(mem->pages[i]), mask_type),
  193. intel_private.gtt+j);
  194. }
  195. readl(intel_private.gtt+j-1);
  196. }
  197. #endif
  198. static int intel_i810_fetch_size(void)
  199. {
  200. u32 smram_miscc;
  201. struct aper_size_info_fixed *values;
  202. pci_read_config_dword(intel_private.bridge_dev,
  203. I810_SMRAM_MISCC, &smram_miscc);
  204. values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
  205. if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
  206. dev_warn(&intel_private.bridge_dev->dev, "i810 is disabled\n");
  207. return 0;
  208. }
  209. if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
  210. agp_bridge->current_size = (void *) (values + 1);
  211. agp_bridge->aperture_size_idx = 1;
  212. return values[1].size;
  213. } else {
  214. agp_bridge->current_size = (void *) (values);
  215. agp_bridge->aperture_size_idx = 0;
  216. return values[0].size;
  217. }
  218. return 0;
  219. }
  220. static int intel_i810_configure(void)
  221. {
  222. struct aper_size_info_fixed *current_size;
  223. u32 temp;
  224. int i;
  225. current_size = A_SIZE_FIX(agp_bridge->current_size);
  226. if (!intel_private.registers) {
  227. pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
  228. temp &= 0xfff80000;
  229. intel_private.registers = ioremap(temp, 128 * 4096);
  230. if (!intel_private.registers) {
  231. dev_err(&intel_private.pcidev->dev,
  232. "can't remap memory\n");
  233. return -ENOMEM;
  234. }
  235. }
  236. if ((readl(intel_private.registers+I810_DRAM_CTL)
  237. & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
  238. /* This will need to be dynamically assigned */
  239. dev_info(&intel_private.pcidev->dev,
  240. "detected 4MB dedicated video ram\n");
  241. intel_private.num_dcache_entries = 1024;
  242. }
  243. pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
  244. agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
  245. writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
  246. readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
  247. if (agp_bridge->driver->needs_scratch_page) {
  248. for (i = 0; i < current_size->num_entries; i++) {
  249. writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
  250. }
  251. readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */
  252. }
  253. global_cache_flush();
  254. return 0;
  255. }
  256. static void intel_i810_cleanup(void)
  257. {
  258. writel(0, intel_private.registers+I810_PGETBL_CTL);
  259. readl(intel_private.registers); /* PCI Posting. */
  260. iounmap(intel_private.registers);
  261. }
  262. static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
  263. {
  264. return;
  265. }
  266. /* Exists to support ARGB cursors */
  267. static struct page *i8xx_alloc_pages(void)
  268. {
  269. struct page *page;
  270. page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
  271. if (page == NULL)
  272. return NULL;
  273. if (set_pages_uc(page, 4) < 0) {
  274. set_pages_wb(page, 4);
  275. __free_pages(page, 2);
  276. return NULL;
  277. }
  278. get_page(page);
  279. atomic_inc(&agp_bridge->current_memory_agp);
  280. return page;
  281. }
  282. static void i8xx_destroy_pages(struct page *page)
  283. {
  284. if (page == NULL)
  285. return;
  286. set_pages_wb(page, 4);
  287. put_page(page);
  288. __free_pages(page, 2);
  289. atomic_dec(&agp_bridge->current_memory_agp);
  290. }
  291. static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
  292. int type)
  293. {
  294. if (type < AGP_USER_TYPES)
  295. return type;
  296. else if (type == AGP_USER_CACHED_MEMORY)
  297. return INTEL_AGP_CACHED_MEMORY;
  298. else
  299. return 0;
  300. }
  301. static int intel_gen6_type_to_mask_type(struct agp_bridge_data *bridge,
  302. int type)
  303. {
  304. unsigned int type_mask = type & ~AGP_USER_CACHED_MEMORY_GFDT;
  305. unsigned int gfdt = type & AGP_USER_CACHED_MEMORY_GFDT;
  306. if (type_mask == AGP_USER_UNCACHED_MEMORY)
  307. return INTEL_AGP_UNCACHED_MEMORY;
  308. else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC)
  309. return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT :
  310. INTEL_AGP_CACHED_MEMORY_LLC_MLC;
  311. else /* set 'normal'/'cached' to LLC by default */
  312. return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_GFDT :
  313. INTEL_AGP_CACHED_MEMORY_LLC;
  314. }
  315. static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
  316. int type)
  317. {
  318. int i, j, num_entries;
  319. void *temp;
  320. int ret = -EINVAL;
  321. int mask_type;
  322. if (mem->page_count == 0)
  323. goto out;
  324. temp = agp_bridge->current_size;
  325. num_entries = A_SIZE_FIX(temp)->num_entries;
  326. if ((pg_start + mem->page_count) > num_entries)
  327. goto out_err;
  328. for (j = pg_start; j < (pg_start + mem->page_count); j++) {
  329. if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
  330. ret = -EBUSY;
  331. goto out_err;
  332. }
  333. }
  334. if (type != mem->type)
  335. goto out_err;
  336. mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
  337. switch (mask_type) {
  338. case AGP_DCACHE_MEMORY:
  339. if (!mem->is_flushed)
  340. global_cache_flush();
  341. for (i = pg_start; i < (pg_start + mem->page_count); i++) {
  342. writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
  343. intel_private.registers+I810_PTE_BASE+(i*4));
  344. }
  345. readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
  346. break;
  347. case AGP_PHYS_MEMORY:
  348. case AGP_NORMAL_MEMORY:
  349. if (!mem->is_flushed)
  350. global_cache_flush();
  351. for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
  352. writel(agp_bridge->driver->mask_memory(agp_bridge,
  353. page_to_phys(mem->pages[i]), mask_type),
  354. intel_private.registers+I810_PTE_BASE+(j*4));
  355. }
  356. readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
  357. break;
  358. default:
  359. goto out_err;
  360. }
  361. out:
  362. ret = 0;
  363. out_err:
  364. mem->is_flushed = true;
  365. return ret;
  366. }
  367. static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
  368. int type)
  369. {
  370. int i;
  371. if (mem->page_count == 0)
  372. return 0;
  373. for (i = pg_start; i < (mem->page_count + pg_start); i++) {
  374. writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
  375. }
  376. readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
  377. return 0;
  378. }
  379. /*
  380. * The i810/i830 requires a physical address to program its mouse
  381. * pointer into hardware.
  382. * However the Xserver still writes to it through the agp aperture.
  383. */
  384. static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
  385. {
  386. struct agp_memory *new;
  387. struct page *page;
  388. switch (pg_count) {
  389. case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
  390. break;
  391. case 4:
  392. /* kludge to get 4 physical pages for ARGB cursor */
  393. page = i8xx_alloc_pages();
  394. break;
  395. default:
  396. return NULL;
  397. }
  398. if (page == NULL)
  399. return NULL;
  400. new = agp_create_memory(pg_count);
  401. if (new == NULL)
  402. return NULL;
  403. new->pages[0] = page;
  404. if (pg_count == 4) {
  405. /* kludge to get 4 physical pages for ARGB cursor */
  406. new->pages[1] = new->pages[0] + 1;
  407. new->pages[2] = new->pages[1] + 1;
  408. new->pages[3] = new->pages[2] + 1;
  409. }
  410. new->page_count = pg_count;
  411. new->num_scratch_pages = pg_count;
  412. new->type = AGP_PHYS_MEMORY;
  413. new->physical = page_to_phys(new->pages[0]);
  414. return new;
  415. }
  416. static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
  417. {
  418. struct agp_memory *new;
  419. if (type == AGP_DCACHE_MEMORY) {
  420. if (pg_count != intel_private.num_dcache_entries)
  421. return NULL;
  422. new = agp_create_memory(1);
  423. if (new == NULL)
  424. return NULL;
  425. new->type = AGP_DCACHE_MEMORY;
  426. new->page_count = pg_count;
  427. new->num_scratch_pages = 0;
  428. agp_free_page_array(new);
  429. return new;
  430. }
  431. if (type == AGP_PHYS_MEMORY)
  432. return alloc_agpphysmem_i8xx(pg_count, type);
  433. return NULL;
  434. }
  435. static void intel_i810_free_by_type(struct agp_memory *curr)
  436. {
  437. agp_free_key(curr->key);
  438. if (curr->type == AGP_PHYS_MEMORY) {
  439. if (curr->page_count == 4)
  440. i8xx_destroy_pages(curr->pages[0]);
  441. else {
  442. agp_bridge->driver->agp_destroy_page(curr->pages[0],
  443. AGP_PAGE_DESTROY_UNMAP);
  444. agp_bridge->driver->agp_destroy_page(curr->pages[0],
  445. AGP_PAGE_DESTROY_FREE);
  446. }
  447. agp_free_page_array(curr);
  448. }
  449. kfree(curr);
  450. }
  451. static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
  452. dma_addr_t addr, int type)
  453. {
  454. /* Type checking must be done elsewhere */
  455. return addr | bridge->driver->masks[type].mask;
  456. }
  457. static int intel_gtt_setup_scratch_page(void)
  458. {
  459. struct page *page;
  460. dma_addr_t dma_addr;
  461. page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
  462. if (page == NULL)
  463. return -ENOMEM;
  464. get_page(page);
  465. set_pages_uc(page, 1);
  466. if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
  467. dma_addr = pci_map_page(intel_private.pcidev, page, 0,
  468. PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
  469. if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
  470. return -EINVAL;
  471. intel_private.scratch_page_dma = dma_addr;
  472. } else
  473. intel_private.scratch_page_dma = page_to_phys(page);
  474. intel_private.scratch_page = page;
  475. return 0;
  476. }
  477. static const struct aper_size_info_fixed const intel_fake_agp_sizes[] = {
  478. {128, 32768, 5},
  479. /* The 64M mode still requires a 128k gatt */
  480. {64, 16384, 5},
  481. {256, 65536, 6},
  482. {512, 131072, 7},
  483. };
  484. static unsigned int intel_gtt_stolen_entries(void)
  485. {
  486. u16 gmch_ctrl;
  487. u8 rdct;
  488. int local = 0;
  489. static const int ddt[4] = { 0, 16, 32, 64 };
  490. unsigned int overhead_entries, stolen_entries;
  491. unsigned int stolen_size = 0;
  492. pci_read_config_word(intel_private.bridge_dev,
  493. I830_GMCH_CTRL, &gmch_ctrl);
  494. if (INTEL_GTT_GEN > 4 || IS_PINEVIEW)
  495. overhead_entries = 0;
  496. else
  497. overhead_entries = intel_private.base.gtt_mappable_entries
  498. / 1024;
  499. overhead_entries += 1; /* BIOS popup */
  500. if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
  501. intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
  502. switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
  503. case I830_GMCH_GMS_STOLEN_512:
  504. stolen_size = KB(512);
  505. break;
  506. case I830_GMCH_GMS_STOLEN_1024:
  507. stolen_size = MB(1);
  508. break;
  509. case I830_GMCH_GMS_STOLEN_8192:
  510. stolen_size = MB(8);
  511. break;
  512. case I830_GMCH_GMS_LOCAL:
  513. rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
  514. stolen_size = (I830_RDRAM_ND(rdct) + 1) *
  515. MB(ddt[I830_RDRAM_DDT(rdct)]);
  516. local = 1;
  517. break;
  518. default:
  519. stolen_size = 0;
  520. break;
  521. }
  522. } else if (INTEL_GTT_GEN == 6) {
  523. /*
  524. * SandyBridge has new memory control reg at 0x50.w
  525. */
  526. u16 snb_gmch_ctl;
  527. pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
  528. switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
  529. case SNB_GMCH_GMS_STOLEN_32M:
  530. stolen_size = MB(32);
  531. break;
  532. case SNB_GMCH_GMS_STOLEN_64M:
  533. stolen_size = MB(64);
  534. break;
  535. case SNB_GMCH_GMS_STOLEN_96M:
  536. stolen_size = MB(96);
  537. break;
  538. case SNB_GMCH_GMS_STOLEN_128M:
  539. stolen_size = MB(128);
  540. break;
  541. case SNB_GMCH_GMS_STOLEN_160M:
  542. stolen_size = MB(160);
  543. break;
  544. case SNB_GMCH_GMS_STOLEN_192M:
  545. stolen_size = MB(192);
  546. break;
  547. case SNB_GMCH_GMS_STOLEN_224M:
  548. stolen_size = MB(224);
  549. break;
  550. case SNB_GMCH_GMS_STOLEN_256M:
  551. stolen_size = MB(256);
  552. break;
  553. case SNB_GMCH_GMS_STOLEN_288M:
  554. stolen_size = MB(288);
  555. break;
  556. case SNB_GMCH_GMS_STOLEN_320M:
  557. stolen_size = MB(320);
  558. break;
  559. case SNB_GMCH_GMS_STOLEN_352M:
  560. stolen_size = MB(352);
  561. break;
  562. case SNB_GMCH_GMS_STOLEN_384M:
  563. stolen_size = MB(384);
  564. break;
  565. case SNB_GMCH_GMS_STOLEN_416M:
  566. stolen_size = MB(416);
  567. break;
  568. case SNB_GMCH_GMS_STOLEN_448M:
  569. stolen_size = MB(448);
  570. break;
  571. case SNB_GMCH_GMS_STOLEN_480M:
  572. stolen_size = MB(480);
  573. break;
  574. case SNB_GMCH_GMS_STOLEN_512M:
  575. stolen_size = MB(512);
  576. break;
  577. }
  578. } else {
  579. switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
  580. case I855_GMCH_GMS_STOLEN_1M:
  581. stolen_size = MB(1);
  582. break;
  583. case I855_GMCH_GMS_STOLEN_4M:
  584. stolen_size = MB(4);
  585. break;
  586. case I855_GMCH_GMS_STOLEN_8M:
  587. stolen_size = MB(8);
  588. break;
  589. case I855_GMCH_GMS_STOLEN_16M:
  590. stolen_size = MB(16);
  591. break;
  592. case I855_GMCH_GMS_STOLEN_32M:
  593. stolen_size = MB(32);
  594. break;
  595. case I915_GMCH_GMS_STOLEN_48M:
  596. stolen_size = MB(48);
  597. break;
  598. case I915_GMCH_GMS_STOLEN_64M:
  599. stolen_size = MB(64);
  600. break;
  601. case G33_GMCH_GMS_STOLEN_128M:
  602. stolen_size = MB(128);
  603. break;
  604. case G33_GMCH_GMS_STOLEN_256M:
  605. stolen_size = MB(256);
  606. break;
  607. case INTEL_GMCH_GMS_STOLEN_96M:
  608. stolen_size = MB(96);
  609. break;
  610. case INTEL_GMCH_GMS_STOLEN_160M:
  611. stolen_size = MB(160);
  612. break;
  613. case INTEL_GMCH_GMS_STOLEN_224M:
  614. stolen_size = MB(224);
  615. break;
  616. case INTEL_GMCH_GMS_STOLEN_352M:
  617. stolen_size = MB(352);
  618. break;
  619. default:
  620. stolen_size = 0;
  621. break;
  622. }
  623. }
  624. if (!local && stolen_size > intel_max_stolen) {
  625. dev_info(&intel_private.bridge_dev->dev,
  626. "detected %dK stolen memory, trimming to %dK\n",
  627. stolen_size / KB(1), intel_max_stolen / KB(1));
  628. stolen_size = intel_max_stolen;
  629. } else if (stolen_size > 0) {
  630. dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
  631. stolen_size / KB(1), local ? "local" : "stolen");
  632. } else {
  633. dev_info(&intel_private.bridge_dev->dev,
  634. "no pre-allocated video memory detected\n");
  635. stolen_size = 0;
  636. }
  637. stolen_entries = stolen_size/KB(4) - overhead_entries;
  638. return stolen_entries;
  639. }
  640. static unsigned int intel_gtt_total_entries(void)
  641. {
  642. int size;
  643. if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) {
  644. u32 pgetbl_ctl;
  645. pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
  646. switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
  647. case I965_PGETBL_SIZE_128KB:
  648. size = KB(128);
  649. break;
  650. case I965_PGETBL_SIZE_256KB:
  651. size = KB(256);
  652. break;
  653. case I965_PGETBL_SIZE_512KB:
  654. size = KB(512);
  655. break;
  656. case I965_PGETBL_SIZE_1MB:
  657. size = KB(1024);
  658. break;
  659. case I965_PGETBL_SIZE_2MB:
  660. size = KB(2048);
  661. break;
  662. case I965_PGETBL_SIZE_1_5MB:
  663. size = KB(1024 + 512);
  664. break;
  665. default:
  666. dev_info(&intel_private.pcidev->dev,
  667. "unknown page table size, assuming 512KB\n");
  668. size = KB(512);
  669. }
  670. return size/4;
  671. } else if (INTEL_GTT_GEN == 6) {
  672. u16 snb_gmch_ctl;
  673. pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
  674. switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
  675. default:
  676. case SNB_GTT_SIZE_0M:
  677. printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
  678. size = MB(0);
  679. break;
  680. case SNB_GTT_SIZE_1M:
  681. size = MB(1);
  682. break;
  683. case SNB_GTT_SIZE_2M:
  684. size = MB(2);
  685. break;
  686. }
  687. return size/4;
  688. } else {
  689. /* On previous hardware, the GTT size was just what was
  690. * required to map the aperture.
  691. */
  692. return intel_private.base.gtt_mappable_entries;
  693. }
  694. }
  695. static unsigned int intel_gtt_mappable_entries(void)
  696. {
  697. unsigned int aperture_size;
  698. if (INTEL_GTT_GEN == 2) {
  699. u16 gmch_ctrl;
  700. pci_read_config_word(intel_private.bridge_dev,
  701. I830_GMCH_CTRL, &gmch_ctrl);
  702. if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
  703. aperture_size = MB(64);
  704. else
  705. aperture_size = MB(128);
  706. } else {
  707. /* 9xx supports large sizes, just look at the length */
  708. aperture_size = pci_resource_len(intel_private.pcidev, 2);
  709. }
  710. return aperture_size >> PAGE_SHIFT;
  711. }
  712. static void intel_gtt_teardown_scratch_page(void)
  713. {
  714. set_pages_wb(intel_private.scratch_page, 1);
  715. pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
  716. PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
  717. put_page(intel_private.scratch_page);
  718. __free_page(intel_private.scratch_page);
  719. }
  720. static void intel_gtt_cleanup(void)
  721. {
  722. if (intel_private.i9xx_flush_page)
  723. iounmap(intel_private.i9xx_flush_page);
  724. if (intel_private.resource_valid)
  725. release_resource(&intel_private.ifp_resource);
  726. intel_private.ifp_resource.start = 0;
  727. intel_private.resource_valid = 0;
  728. iounmap(intel_private.gtt);
  729. iounmap(intel_private.registers);
  730. intel_gtt_teardown_scratch_page();
  731. }
  732. static int intel_gtt_init(void)
  733. {
  734. u32 gtt_map_size;
  735. int ret;
  736. ret = intel_private.driver->setup();
  737. if (ret != 0)
  738. return ret;
  739. intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
  740. intel_private.base.gtt_total_entries = intel_gtt_total_entries();
  741. gtt_map_size = intel_private.base.gtt_total_entries * 4;
  742. intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
  743. gtt_map_size);
  744. if (!intel_private.gtt) {
  745. iounmap(intel_private.registers);
  746. return -ENOMEM;
  747. }
  748. global_cache_flush(); /* FIXME: ? */
  749. /* we have to call this as early as possible after the MMIO base address is known */
  750. intel_private.base.gtt_stolen_entries = intel_gtt_stolen_entries();
  751. if (intel_private.base.gtt_stolen_entries == 0) {
  752. iounmap(intel_private.registers);
  753. iounmap(intel_private.gtt);
  754. return -ENOMEM;
  755. }
  756. ret = intel_gtt_setup_scratch_page();
  757. if (ret != 0) {
  758. intel_gtt_cleanup();
  759. return ret;
  760. }
  761. return 0;
  762. }
  763. static int intel_fake_agp_fetch_size(void)
  764. {
  765. int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
  766. unsigned int aper_size;
  767. int i;
  768. aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT)
  769. / MB(1);
  770. for (i = 0; i < num_sizes; i++) {
  771. if (aper_size == intel_fake_agp_sizes[i].size) {
  772. agp_bridge->current_size =
  773. (void *) (intel_fake_agp_sizes + i);
  774. return aper_size;
  775. }
  776. }
  777. return 0;
  778. }
  779. static void intel_i830_fini_flush(void)
  780. {
  781. kunmap(intel_private.i8xx_page);
  782. intel_private.i8xx_flush_page = NULL;
  783. unmap_page_from_agp(intel_private.i8xx_page);
  784. __free_page(intel_private.i8xx_page);
  785. intel_private.i8xx_page = NULL;
  786. }
  787. static void intel_i830_setup_flush(void)
  788. {
  789. /* return if we've already set the flush mechanism up */
  790. if (intel_private.i8xx_page)
  791. return;
  792. intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
  793. if (!intel_private.i8xx_page)
  794. return;
  795. intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
  796. if (!intel_private.i8xx_flush_page)
  797. intel_i830_fini_flush();
  798. }
  799. /* The chipset_flush interface needs to get data that has already been
  800. * flushed out of the CPU all the way out to main memory, because the GPU
  801. * doesn't snoop those buffers.
  802. *
  803. * The 8xx series doesn't have the same lovely interface for flushing the
  804. * chipset write buffers that the later chips do. According to the 865
  805. * specs, it's 64 octwords, or 1KB. So, to get those previous things in
  806. * that buffer out, we just fill 1KB and clflush it out, on the assumption
  807. * that it'll push whatever was in there out. It appears to work.
  808. */
  809. static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
  810. {
  811. unsigned int *pg = intel_private.i8xx_flush_page;
  812. memset(pg, 0, 1024);
  813. if (cpu_has_clflush)
  814. clflush_cache_range(pg, 1024);
  815. else if (wbinvd_on_all_cpus() != 0)
  816. printk(KERN_ERR "Timed out waiting for cache flush.\n");
  817. }
  818. static void i830_write_entry(dma_addr_t addr, unsigned int entry,
  819. unsigned int flags)
  820. {
  821. u32 pte_flags = I810_PTE_VALID;
  822. switch (flags) {
  823. case AGP_DCACHE_MEMORY:
  824. pte_flags |= I810_PTE_LOCAL;
  825. break;
  826. case AGP_USER_CACHED_MEMORY:
  827. pte_flags |= I830_PTE_SYSTEM_CACHED;
  828. break;
  829. }
  830. writel(addr | pte_flags, intel_private.gtt + entry);
  831. }
  832. static void intel_enable_gtt(void)
  833. {
  834. u32 gma_addr;
  835. u16 gmch_ctrl;
  836. if (INTEL_GTT_GEN == 2)
  837. pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
  838. &gma_addr);
  839. else
  840. pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
  841. &gma_addr);
  842. intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
  843. pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl);
  844. gmch_ctrl |= I830_GMCH_ENABLED;
  845. pci_write_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, gmch_ctrl);
  846. writel(intel_private.pte_bus_addr|I810_PGETBL_ENABLED,
  847. intel_private.registers+I810_PGETBL_CTL);
  848. readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
  849. }
  850. static int i830_setup(void)
  851. {
  852. u32 reg_addr;
  853. pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &reg_addr);
  854. reg_addr &= 0xfff80000;
  855. intel_private.registers = ioremap(reg_addr, KB(64));
  856. if (!intel_private.registers)
  857. return -ENOMEM;
  858. intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
  859. intel_private.pte_bus_addr =
  860. readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
  861. intel_i830_setup_flush();
  862. return 0;
  863. }
  864. static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
  865. {
  866. agp_bridge->gatt_table_real = NULL;
  867. agp_bridge->gatt_table = NULL;
  868. agp_bridge->gatt_bus_addr = 0;
  869. return 0;
  870. }
  871. static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
  872. {
  873. return 0;
  874. }
  875. static int intel_fake_agp_configure(void)
  876. {
  877. int i;
  878. intel_enable_gtt();
  879. agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
  880. for (i = intel_private.base.gtt_stolen_entries;
  881. i < intel_private.base.gtt_total_entries; i++) {
  882. intel_private.driver->write_entry(intel_private.scratch_page_dma,
  883. i, 0);
  884. }
  885. readl(intel_private.gtt+i-1); /* PCI Posting. */
  886. global_cache_flush();
  887. return 0;
  888. }
  889. static bool i830_check_flags(unsigned int flags)
  890. {
  891. switch (flags) {
  892. case 0:
  893. case AGP_PHYS_MEMORY:
  894. case AGP_USER_CACHED_MEMORY:
  895. case AGP_USER_MEMORY:
  896. return true;
  897. }
  898. return false;
  899. }
  900. static void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
  901. unsigned int sg_len,
  902. unsigned int pg_start,
  903. unsigned int flags)
  904. {
  905. struct scatterlist *sg;
  906. unsigned int len, m;
  907. int i, j;
  908. j = pg_start;
  909. /* sg may merge pages, but we have to separate
  910. * per-page addr for GTT */
  911. for_each_sg(sg_list, sg, sg_len, i) {
  912. len = sg_dma_len(sg) >> PAGE_SHIFT;
  913. for (m = 0; m < len; m++) {
  914. dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
  915. intel_private.driver->write_entry(addr,
  916. j, flags);
  917. j++;
  918. }
  919. }
  920. readl(intel_private.gtt+j-1);
  921. }
  922. static int intel_fake_agp_insert_entries(struct agp_memory *mem,
  923. off_t pg_start, int type)
  924. {
  925. int i, j;
  926. int ret = -EINVAL;
  927. if (mem->page_count == 0)
  928. goto out;
  929. if (pg_start < intel_private.base.gtt_stolen_entries) {
  930. dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
  931. "pg_start == 0x%.8lx, gtt_stolen_entries == 0x%.8x\n",
  932. pg_start, intel_private.base.gtt_stolen_entries);
  933. dev_info(&intel_private.pcidev->dev,
  934. "trying to insert into local/stolen memory\n");
  935. goto out_err;
  936. }
  937. if ((pg_start + mem->page_count) > intel_private.base.gtt_total_entries)
  938. goto out_err;
  939. if (type != mem->type)
  940. goto out_err;
  941. if (!intel_private.driver->check_flags(type))
  942. goto out_err;
  943. if (!mem->is_flushed)
  944. global_cache_flush();
  945. if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
  946. ret = intel_agp_map_memory(mem);
  947. if (ret != 0)
  948. return ret;
  949. intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg,
  950. pg_start, type);
  951. } else {
  952. for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
  953. dma_addr_t addr = page_to_phys(mem->pages[i]);
  954. intel_private.driver->write_entry(addr,
  955. j, type);
  956. }
  957. readl(intel_private.gtt+j-1);
  958. }
  959. out:
  960. ret = 0;
  961. out_err:
  962. mem->is_flushed = true;
  963. return ret;
  964. }
  965. static int intel_fake_agp_remove_entries(struct agp_memory *mem,
  966. off_t pg_start, int type)
  967. {
  968. int i;
  969. if (mem->page_count == 0)
  970. return 0;
  971. if (pg_start < intel_private.base.gtt_stolen_entries) {
  972. dev_info(&intel_private.pcidev->dev,
  973. "trying to disable local/stolen memory\n");
  974. return -EINVAL;
  975. }
  976. if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2)
  977. intel_agp_unmap_memory(mem);
  978. for (i = pg_start; i < (mem->page_count + pg_start); i++) {
  979. intel_private.driver->write_entry(intel_private.scratch_page_dma,
  980. i, 0);
  981. }
  982. readl(intel_private.gtt+i-1);
  983. return 0;
  984. }
  985. static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
  986. int type)
  987. {
  988. if (type == AGP_PHYS_MEMORY)
  989. return alloc_agpphysmem_i8xx(pg_count, type);
  990. /* always return NULL for other allocation types for now */
  991. return NULL;
  992. }
  993. static int intel_alloc_chipset_flush_resource(void)
  994. {
  995. int ret;
  996. ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
  997. PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
  998. pcibios_align_resource, intel_private.bridge_dev);
  999. return ret;
  1000. }
  1001. static void intel_i915_setup_chipset_flush(void)
  1002. {
  1003. int ret;
  1004. u32 temp;
  1005. pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
  1006. if (!(temp & 0x1)) {
  1007. intel_alloc_chipset_flush_resource();
  1008. intel_private.resource_valid = 1;
  1009. pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
  1010. } else {
  1011. temp &= ~1;
  1012. intel_private.resource_valid = 1;
  1013. intel_private.ifp_resource.start = temp;
  1014. intel_private.ifp_resource.end = temp + PAGE_SIZE;
  1015. ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
  1016. /* some BIOSes reserve this area in a pnp some don't */
  1017. if (ret)
  1018. intel_private.resource_valid = 0;
  1019. }
  1020. }
  1021. static void intel_i965_g33_setup_chipset_flush(void)
  1022. {
  1023. u32 temp_hi, temp_lo;
  1024. int ret;
  1025. pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi);
  1026. pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo);
  1027. if (!(temp_lo & 0x1)) {
  1028. intel_alloc_chipset_flush_resource();
  1029. intel_private.resource_valid = 1;
  1030. pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4,
  1031. upper_32_bits(intel_private.ifp_resource.start));
  1032. pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
  1033. } else {
  1034. u64 l64;
  1035. temp_lo &= ~0x1;
  1036. l64 = ((u64)temp_hi << 32) | temp_lo;
  1037. intel_private.resource_valid = 1;
  1038. intel_private.ifp_resource.start = l64;
  1039. intel_private.ifp_resource.end = l64 + PAGE_SIZE;
  1040. ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
  1041. /* some BIOSes reserve this area in a pnp some don't */
  1042. if (ret)
  1043. intel_private.resource_valid = 0;
  1044. }
  1045. }
  1046. static void intel_i9xx_setup_flush(void)
  1047. {
  1048. /* return if already configured */
  1049. if (intel_private.ifp_resource.start)
  1050. return;
  1051. if (INTEL_GTT_GEN == 6)
  1052. return;
  1053. /* setup a resource for this object */
  1054. intel_private.ifp_resource.name = "Intel Flush Page";
  1055. intel_private.ifp_resource.flags = IORESOURCE_MEM;
  1056. /* Setup chipset flush for 915 */
  1057. if (IS_G33 || INTEL_GTT_GEN >= 4) {
  1058. intel_i965_g33_setup_chipset_flush();
  1059. } else {
  1060. intel_i915_setup_chipset_flush();
  1061. }
  1062. if (intel_private.ifp_resource.start)
  1063. intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
  1064. if (!intel_private.i9xx_flush_page)
  1065. dev_err(&intel_private.pcidev->dev,
  1066. "can't ioremap flush page - no chipset flushing\n");
  1067. }
  1068. static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
  1069. {
  1070. if (intel_private.i9xx_flush_page)
  1071. writel(1, intel_private.i9xx_flush_page);
  1072. }
  1073. static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
  1074. int type)
  1075. {
  1076. int num_entries;
  1077. void *temp;
  1078. int ret = -EINVAL;
  1079. int mask_type;
  1080. if (mem->page_count == 0)
  1081. goto out;
  1082. temp = agp_bridge->current_size;
  1083. num_entries = A_SIZE_FIX(temp)->num_entries;
  1084. if (pg_start < intel_private.base.gtt_stolen_entries) {
  1085. dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
  1086. "pg_start == 0x%.8lx, gtt_stolen_entries == 0x%.8x\n",
  1087. pg_start, intel_private.base.gtt_stolen_entries);
  1088. dev_info(&intel_private.pcidev->dev,
  1089. "trying to insert into local/stolen memory\n");
  1090. goto out_err;
  1091. }
  1092. if ((pg_start + mem->page_count) > num_entries)
  1093. goto out_err;
  1094. /* The i915 can't check the GTT for entries since it's read only;
  1095. * depend on the caller to make the correct offset decisions.
  1096. */
  1097. if (type != mem->type)
  1098. goto out_err;
  1099. mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
  1100. if (INTEL_GTT_GEN != 6 && mask_type != 0 &&
  1101. mask_type != AGP_PHYS_MEMORY &&
  1102. mask_type != INTEL_AGP_CACHED_MEMORY)
  1103. goto out_err;
  1104. if (!mem->is_flushed)
  1105. global_cache_flush();
  1106. intel_agp_insert_sg_entries(mem, pg_start, mask_type);
  1107. out:
  1108. ret = 0;
  1109. out_err:
  1110. mem->is_flushed = true;
  1111. return ret;
  1112. }
  1113. static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
  1114. int type)
  1115. {
  1116. int i;
  1117. if (mem->page_count == 0)
  1118. return 0;
  1119. if (pg_start < intel_private.base.gtt_stolen_entries) {
  1120. dev_info(&intel_private.pcidev->dev,
  1121. "trying to disable local/stolen memory\n");
  1122. return -EINVAL;
  1123. }
  1124. for (i = pg_start; i < (mem->page_count + pg_start); i++)
  1125. writel(agp_bridge->scratch_page, intel_private.gtt+i);
  1126. readl(intel_private.gtt+i-1);
  1127. return 0;
  1128. }
  1129. static void i965_write_entry(dma_addr_t addr, unsigned int entry,
  1130. unsigned int flags)
  1131. {
  1132. /* Shift high bits down */
  1133. addr |= (addr >> 28) & 0xf0;
  1134. writel(addr | I810_PTE_VALID, intel_private.gtt + entry);
  1135. }
  1136. static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
  1137. unsigned int flags)
  1138. {
  1139. unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
  1140. unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
  1141. u32 pte_flags;
  1142. if (type_mask == AGP_USER_UNCACHED_MEMORY)
  1143. pte_flags = GEN6_PTE_UNCACHED;
  1144. else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
  1145. pte_flags = GEN6_PTE_LLC;
  1146. if (gfdt)
  1147. pte_flags |= GEN6_PTE_GFDT;
  1148. } else { /* set 'normal'/'cached' to LLC by default */
  1149. pte_flags = GEN6_PTE_LLC_MLC;
  1150. if (gfdt)
  1151. pte_flags |= GEN6_PTE_GFDT;
  1152. }
  1153. /* gen6 has bit11-4 for physical addr bit39-32 */
  1154. addr |= (addr >> 28) & 0xff0;
  1155. writel(addr | pte_flags, intel_private.gtt + entry);
  1156. }
  1157. static int i9xx_setup(void)
  1158. {
  1159. u32 reg_addr;
  1160. pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
  1161. reg_addr &= 0xfff80000;
  1162. intel_private.registers = ioremap(reg_addr, 128 * 4096);
  1163. if (!intel_private.registers)
  1164. return -ENOMEM;
  1165. if (INTEL_GTT_GEN == 3) {
  1166. u32 gtt_addr;
  1167. pci_read_config_dword(intel_private.pcidev,
  1168. I915_PTEADDR, &gtt_addr);
  1169. intel_private.gtt_bus_addr = gtt_addr;
  1170. } else {
  1171. u32 gtt_offset;
  1172. switch (INTEL_GTT_GEN) {
  1173. case 5:
  1174. case 6:
  1175. gtt_offset = MB(2);
  1176. break;
  1177. case 4:
  1178. default:
  1179. gtt_offset = KB(512);
  1180. break;
  1181. }
  1182. intel_private.gtt_bus_addr = reg_addr + gtt_offset;
  1183. }
  1184. intel_private.pte_bus_addr =
  1185. readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
  1186. intel_i9xx_setup_flush();
  1187. return 0;
  1188. }
  1189. /*
  1190. * The i965 supports 36-bit physical addresses, but to keep
  1191. * the format of the GTT the same, the bits that don't fit
  1192. * in a 32-bit word are shifted down to bits 4..7.
  1193. *
  1194. * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
  1195. * is always zero on 32-bit architectures, so no need to make
  1196. * this conditional.
  1197. */
  1198. static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
  1199. dma_addr_t addr, int type)
  1200. {
  1201. /* Shift high bits down */
  1202. addr |= (addr >> 28) & 0xf0;
  1203. /* Type checking must be done elsewhere */
  1204. return addr | bridge->driver->masks[type].mask;
  1205. }
  1206. static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge,
  1207. dma_addr_t addr, int type)
  1208. {
  1209. /* gen6 has bit11-4 for physical addr bit39-32 */
  1210. addr |= (addr >> 28) & 0xff0;
  1211. /* Type checking must be done elsewhere */
  1212. return addr | bridge->driver->masks[type].mask;
  1213. }
  1214. static const struct agp_bridge_driver intel_810_driver = {
  1215. .owner = THIS_MODULE,
  1216. .aperture_sizes = intel_i810_sizes,
  1217. .size_type = FIXED_APER_SIZE,
  1218. .num_aperture_sizes = 2,
  1219. .needs_scratch_page = true,
  1220. .configure = intel_i810_configure,
  1221. .fetch_size = intel_i810_fetch_size,
  1222. .cleanup = intel_i810_cleanup,
  1223. .mask_memory = intel_i810_mask_memory,
  1224. .masks = intel_i810_masks,
  1225. .agp_enable = intel_fake_agp_enable,
  1226. .cache_flush = global_cache_flush,
  1227. .create_gatt_table = agp_generic_create_gatt_table,
  1228. .free_gatt_table = agp_generic_free_gatt_table,
  1229. .insert_memory = intel_i810_insert_entries,
  1230. .remove_memory = intel_i810_remove_entries,
  1231. .alloc_by_type = intel_i810_alloc_by_type,
  1232. .free_by_type = intel_i810_free_by_type,
  1233. .agp_alloc_page = agp_generic_alloc_page,
  1234. .agp_alloc_pages = agp_generic_alloc_pages,
  1235. .agp_destroy_page = agp_generic_destroy_page,
  1236. .agp_destroy_pages = agp_generic_destroy_pages,
  1237. .agp_type_to_mask_type = agp_generic_type_to_mask_type,
  1238. };
  1239. static const struct agp_bridge_driver intel_830_driver = {
  1240. .owner = THIS_MODULE,
  1241. .size_type = FIXED_APER_SIZE,
  1242. .aperture_sizes = intel_fake_agp_sizes,
  1243. .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes),
  1244. .configure = intel_fake_agp_configure,
  1245. .fetch_size = intel_fake_agp_fetch_size,
  1246. .cleanup = intel_gtt_cleanup,
  1247. .mask_memory = intel_i810_mask_memory,
  1248. .masks = intel_i810_masks,
  1249. .agp_enable = intel_fake_agp_enable,
  1250. .cache_flush = global_cache_flush,
  1251. .create_gatt_table = intel_fake_agp_create_gatt_table,
  1252. .free_gatt_table = intel_fake_agp_free_gatt_table,
  1253. .insert_memory = intel_fake_agp_insert_entries,
  1254. .remove_memory = intel_fake_agp_remove_entries,
  1255. .alloc_by_type = intel_fake_agp_alloc_by_type,
  1256. .free_by_type = intel_i810_free_by_type,
  1257. .agp_alloc_page = agp_generic_alloc_page,
  1258. .agp_alloc_pages = agp_generic_alloc_pages,
  1259. .agp_destroy_page = agp_generic_destroy_page,
  1260. .agp_destroy_pages = agp_generic_destroy_pages,
  1261. .agp_type_to_mask_type = intel_i830_type_to_mask_type,
  1262. .chipset_flush = intel_i830_chipset_flush,
  1263. };
  1264. static const struct agp_bridge_driver intel_915_driver = {
  1265. .owner = THIS_MODULE,
  1266. .size_type = FIXED_APER_SIZE,
  1267. .aperture_sizes = intel_fake_agp_sizes,
  1268. .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes),
  1269. .configure = intel_fake_agp_configure,
  1270. .fetch_size = intel_fake_agp_fetch_size,
  1271. .cleanup = intel_gtt_cleanup,
  1272. .mask_memory = intel_i810_mask_memory,
  1273. .masks = intel_i810_masks,
  1274. .agp_enable = intel_fake_agp_enable,
  1275. .cache_flush = global_cache_flush,
  1276. .create_gatt_table = intel_fake_agp_create_gatt_table,
  1277. .free_gatt_table = intel_fake_agp_free_gatt_table,
  1278. .insert_memory = intel_fake_agp_insert_entries,
  1279. .remove_memory = intel_fake_agp_remove_entries,
  1280. .alloc_by_type = intel_fake_agp_alloc_by_type,
  1281. .free_by_type = intel_i810_free_by_type,
  1282. .agp_alloc_page = agp_generic_alloc_page,
  1283. .agp_alloc_pages = agp_generic_alloc_pages,
  1284. .agp_destroy_page = agp_generic_destroy_page,
  1285. .agp_destroy_pages = agp_generic_destroy_pages,
  1286. .agp_type_to_mask_type = intel_i830_type_to_mask_type,
  1287. .chipset_flush = intel_i915_chipset_flush,
  1288. };
  1289. static const struct agp_bridge_driver intel_i965_driver = {
  1290. .owner = THIS_MODULE,
  1291. .size_type = FIXED_APER_SIZE,
  1292. .aperture_sizes = intel_fake_agp_sizes,
  1293. .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes),
  1294. .configure = intel_fake_agp_configure,
  1295. .fetch_size = intel_fake_agp_fetch_size,
  1296. .cleanup = intel_gtt_cleanup,
  1297. .mask_memory = intel_i965_mask_memory,
  1298. .masks = intel_i810_masks,
  1299. .agp_enable = intel_fake_agp_enable,
  1300. .cache_flush = global_cache_flush,
  1301. .create_gatt_table = intel_fake_agp_create_gatt_table,
  1302. .free_gatt_table = intel_fake_agp_free_gatt_table,
  1303. .insert_memory = intel_i915_insert_entries,
  1304. .remove_memory = intel_i915_remove_entries,
  1305. .alloc_by_type = intel_fake_agp_alloc_by_type,
  1306. .free_by_type = intel_i810_free_by_type,
  1307. .agp_alloc_page = agp_generic_alloc_page,
  1308. .agp_alloc_pages = agp_generic_alloc_pages,
  1309. .agp_destroy_page = agp_generic_destroy_page,
  1310. .agp_destroy_pages = agp_generic_destroy_pages,
  1311. .agp_type_to_mask_type = intel_i830_type_to_mask_type,
  1312. .chipset_flush = intel_i915_chipset_flush,
  1313. #if USE_PCI_DMA_API
  1314. .agp_map_memory = intel_agp_map_memory,
  1315. .agp_unmap_memory = intel_agp_unmap_memory,
  1316. #endif
  1317. };
  1318. static const struct agp_bridge_driver intel_gen6_driver = {
  1319. .owner = THIS_MODULE,
  1320. .size_type = FIXED_APER_SIZE,
  1321. .aperture_sizes = intel_fake_agp_sizes,
  1322. .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes),
  1323. .configure = intel_fake_agp_configure,
  1324. .fetch_size = intel_fake_agp_fetch_size,
  1325. .cleanup = intel_gtt_cleanup,
  1326. .mask_memory = intel_gen6_mask_memory,
  1327. .masks = intel_gen6_masks,
  1328. .agp_enable = intel_fake_agp_enable,
  1329. .cache_flush = global_cache_flush,
  1330. .create_gatt_table = intel_fake_agp_create_gatt_table,
  1331. .free_gatt_table = intel_fake_agp_free_gatt_table,
  1332. .insert_memory = intel_i915_insert_entries,
  1333. .remove_memory = intel_i915_remove_entries,
  1334. .alloc_by_type = intel_fake_agp_alloc_by_type,
  1335. .free_by_type = intel_i810_free_by_type,
  1336. .agp_alloc_page = agp_generic_alloc_page,
  1337. .agp_alloc_pages = agp_generic_alloc_pages,
  1338. .agp_destroy_page = agp_generic_destroy_page,
  1339. .agp_destroy_pages = agp_generic_destroy_pages,
  1340. .agp_type_to_mask_type = intel_gen6_type_to_mask_type,
  1341. .chipset_flush = intel_i915_chipset_flush,
  1342. #if USE_PCI_DMA_API
  1343. .agp_map_memory = intel_agp_map_memory,
  1344. .agp_unmap_memory = intel_agp_unmap_memory,
  1345. #endif
  1346. };
  1347. static const struct agp_bridge_driver intel_g33_driver = {
  1348. .owner = THIS_MODULE,
  1349. .size_type = FIXED_APER_SIZE,
  1350. .aperture_sizes = intel_fake_agp_sizes,
  1351. .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes),
  1352. .configure = intel_fake_agp_configure,
  1353. .fetch_size = intel_fake_agp_fetch_size,
  1354. .cleanup = intel_gtt_cleanup,
  1355. .mask_memory = intel_i965_mask_memory,
  1356. .masks = intel_i810_masks,
  1357. .agp_enable = intel_fake_agp_enable,
  1358. .cache_flush = global_cache_flush,
  1359. .create_gatt_table = intel_fake_agp_create_gatt_table,
  1360. .free_gatt_table = intel_fake_agp_free_gatt_table,
  1361. .insert_memory = intel_i915_insert_entries,
  1362. .remove_memory = intel_i915_remove_entries,
  1363. .alloc_by_type = intel_fake_agp_alloc_by_type,
  1364. .free_by_type = intel_i810_free_by_type,
  1365. .agp_alloc_page = agp_generic_alloc_page,
  1366. .agp_alloc_pages = agp_generic_alloc_pages,
  1367. .agp_destroy_page = agp_generic_destroy_page,
  1368. .agp_destroy_pages = agp_generic_destroy_pages,
  1369. .agp_type_to_mask_type = intel_i830_type_to_mask_type,
  1370. .chipset_flush = intel_i915_chipset_flush,
  1371. #if USE_PCI_DMA_API
  1372. .agp_map_memory = intel_agp_map_memory,
  1373. .agp_unmap_memory = intel_agp_unmap_memory,
  1374. #endif
  1375. };
  1376. static const struct intel_gtt_driver i8xx_gtt_driver = {
  1377. .gen = 2,
  1378. .setup = i830_setup,
  1379. .write_entry = i830_write_entry,
  1380. .check_flags = i830_check_flags,
  1381. };
  1382. static const struct intel_gtt_driver i915_gtt_driver = {
  1383. .gen = 3,
  1384. .setup = i9xx_setup,
  1385. /* i945 is the last gpu to need phys mem (for overlay and cursors). */
  1386. .write_entry = i830_write_entry,
  1387. .check_flags = i830_check_flags,
  1388. };
  1389. static const struct intel_gtt_driver g33_gtt_driver = {
  1390. .gen = 3,
  1391. .is_g33 = 1,
  1392. .setup = i9xx_setup,
  1393. .write_entry = i965_write_entry,
  1394. };
  1395. static const struct intel_gtt_driver pineview_gtt_driver = {
  1396. .gen = 3,
  1397. .is_pineview = 1, .is_g33 = 1,
  1398. .setup = i9xx_setup,
  1399. .write_entry = i965_write_entry,
  1400. };
  1401. static const struct intel_gtt_driver i965_gtt_driver = {
  1402. .gen = 4,
  1403. .setup = i9xx_setup,
  1404. .write_entry = i965_write_entry,
  1405. };
  1406. static const struct intel_gtt_driver g4x_gtt_driver = {
  1407. .gen = 5,
  1408. .setup = i9xx_setup,
  1409. .write_entry = i965_write_entry,
  1410. };
  1411. static const struct intel_gtt_driver ironlake_gtt_driver = {
  1412. .gen = 5,
  1413. .is_ironlake = 1,
  1414. .setup = i9xx_setup,
  1415. .write_entry = i965_write_entry,
  1416. };
  1417. static const struct intel_gtt_driver sandybridge_gtt_driver = {
  1418. .gen = 6,
  1419. .setup = i9xx_setup,
  1420. .write_entry = gen6_write_entry,
  1421. };
  1422. /* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
  1423. * driver and gmch_driver must be non-null, and find_gmch will determine
  1424. * which one should be used if a gmch_chip_id is present.
  1425. */
  1426. static const struct intel_gtt_driver_description {
  1427. unsigned int gmch_chip_id;
  1428. char *name;
  1429. const struct agp_bridge_driver *gmch_driver;
  1430. const struct intel_gtt_driver *gtt_driver;
  1431. } intel_gtt_chipsets[] = {
  1432. { PCI_DEVICE_ID_INTEL_82810_IG1, "i810", &intel_810_driver , NULL},
  1433. { PCI_DEVICE_ID_INTEL_82810_IG3, "i810", &intel_810_driver , NULL},
  1434. { PCI_DEVICE_ID_INTEL_82810E_IG, "i810", &intel_810_driver , NULL},
  1435. { PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &intel_810_driver , NULL},
  1436. { PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
  1437. &intel_830_driver , &i8xx_gtt_driver},
  1438. { PCI_DEVICE_ID_INTEL_82845G_IG, "830M",
  1439. &intel_830_driver , &i8xx_gtt_driver},
  1440. { PCI_DEVICE_ID_INTEL_82854_IG, "854",
  1441. &intel_830_driver , &i8xx_gtt_driver},
  1442. { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
  1443. &intel_830_driver , &i8xx_gtt_driver},
  1444. { PCI_DEVICE_ID_INTEL_82865_IG, "865",
  1445. &intel_830_driver , &i8xx_gtt_driver},
  1446. { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
  1447. &intel_915_driver , &i915_gtt_driver },
  1448. { PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
  1449. &intel_915_driver , &i915_gtt_driver },
  1450. { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
  1451. &intel_915_driver , &i915_gtt_driver },
  1452. { PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
  1453. &intel_915_driver , &i915_gtt_driver },
  1454. { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
  1455. &intel_915_driver , &i915_gtt_driver },
  1456. { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
  1457. &intel_915_driver , &i915_gtt_driver },
  1458. { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
  1459. &intel_i965_driver , &i965_gtt_driver },
  1460. { PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
  1461. &intel_i965_driver , &i965_gtt_driver },
  1462. { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
  1463. &intel_i965_driver , &i965_gtt_driver },
  1464. { PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
  1465. &intel_i965_driver , &i965_gtt_driver },
  1466. { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
  1467. &intel_i965_driver , &i965_gtt_driver },
  1468. { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
  1469. &intel_i965_driver , &i965_gtt_driver },
  1470. { PCI_DEVICE_ID_INTEL_G33_IG, "G33",
  1471. &intel_g33_driver , &g33_gtt_driver },
  1472. { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
  1473. &intel_g33_driver , &g33_gtt_driver },
  1474. { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
  1475. &intel_g33_driver , &g33_gtt_driver },
  1476. { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
  1477. &intel_g33_driver , &pineview_gtt_driver },
  1478. { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
  1479. &intel_g33_driver , &pineview_gtt_driver },
  1480. { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
  1481. &intel_i965_driver , &g4x_gtt_driver },
  1482. { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
  1483. &intel_i965_driver , &g4x_gtt_driver },
  1484. { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
  1485. &intel_i965_driver , &g4x_gtt_driver },
  1486. { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
  1487. &intel_i965_driver , &g4x_gtt_driver },
  1488. { PCI_DEVICE_ID_INTEL_B43_IG, "B43",
  1489. &intel_i965_driver , &g4x_gtt_driver },
  1490. { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
  1491. &intel_i965_driver , &g4x_gtt_driver },
  1492. { PCI_DEVICE_ID_INTEL_G41_IG, "G41",
  1493. &intel_i965_driver , &g4x_gtt_driver },
  1494. { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
  1495. "HD Graphics", &intel_i965_driver , &ironlake_gtt_driver },
  1496. { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
  1497. "HD Graphics", &intel_i965_driver , &ironlake_gtt_driver },
  1498. { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
  1499. "Sandybridge", &intel_gen6_driver , &sandybridge_gtt_driver },
  1500. { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
  1501. "Sandybridge", &intel_gen6_driver , &sandybridge_gtt_driver },
  1502. { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
  1503. "Sandybridge", &intel_gen6_driver , &sandybridge_gtt_driver },
  1504. { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
  1505. "Sandybridge", &intel_gen6_driver , &sandybridge_gtt_driver },
  1506. { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
  1507. "Sandybridge", &intel_gen6_driver , &sandybridge_gtt_driver },
  1508. { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
  1509. "Sandybridge", &intel_gen6_driver , &sandybridge_gtt_driver },
  1510. { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
  1511. "Sandybridge", &intel_gen6_driver , &sandybridge_gtt_driver },
  1512. { 0, NULL, NULL }
  1513. };
  1514. static int find_gmch(u16 device)
  1515. {
  1516. struct pci_dev *gmch_device;
  1517. gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
  1518. if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
  1519. gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
  1520. device, gmch_device);
  1521. }
  1522. if (!gmch_device)
  1523. return 0;
  1524. intel_private.pcidev = gmch_device;
  1525. return 1;
  1526. }
  1527. int intel_gmch_probe(struct pci_dev *pdev,
  1528. struct agp_bridge_data *bridge)
  1529. {
  1530. int i, mask;
  1531. bridge->driver = NULL;
  1532. for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
  1533. if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
  1534. bridge->driver =
  1535. intel_gtt_chipsets[i].gmch_driver;
  1536. intel_private.driver =
  1537. intel_gtt_chipsets[i].gtt_driver;
  1538. break;
  1539. }
  1540. }
  1541. if (!bridge->driver)
  1542. return 0;
  1543. bridge->dev_private_data = &intel_private;
  1544. bridge->dev = pdev;
  1545. intel_private.bridge_dev = pci_dev_get(pdev);
  1546. dev_info(&pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
  1547. if (bridge->driver->mask_memory == intel_gen6_mask_memory)
  1548. mask = 40;
  1549. else if (bridge->driver->mask_memory == intel_i965_mask_memory)
  1550. mask = 36;
  1551. else
  1552. mask = 32;
  1553. if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
  1554. dev_err(&intel_private.pcidev->dev,
  1555. "set gfx device dma mask %d-bit failed!\n", mask);
  1556. else
  1557. pci_set_consistent_dma_mask(intel_private.pcidev,
  1558. DMA_BIT_MASK(mask));
  1559. if (bridge->driver == &intel_810_driver)
  1560. return 1;
  1561. if (intel_gtt_init() != 0)
  1562. return 0;
  1563. return 1;
  1564. }
  1565. EXPORT_SYMBOL(intel_gmch_probe);
  1566. struct intel_gtt *intel_gtt_get(void)
  1567. {
  1568. return &intel_private.base;
  1569. }
  1570. EXPORT_SYMBOL(intel_gtt_get);
  1571. void intel_gmch_remove(struct pci_dev *pdev)
  1572. {
  1573. if (intel_private.pcidev)
  1574. pci_dev_put(intel_private.pcidev);
  1575. if (intel_private.bridge_dev)
  1576. pci_dev_put(intel_private.bridge_dev);
  1577. }
  1578. EXPORT_SYMBOL(intel_gmch_remove);
  1579. MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
  1580. MODULE_LICENSE("GPL and additional rights");