generic.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451
  1. /*
  2. * AGPGART driver.
  3. * Copyright (C) 2004 Silicon Graphics, Inc.
  4. * Copyright (C) 2002-2005 Dave Jones.
  5. * Copyright (C) 1999 Jeff Hartmann.
  6. * Copyright (C) 1999 Precision Insight, Inc.
  7. * Copyright (C) 1999 Xi Graphics, Inc.
  8. *
  9. * Permission is hereby granted, free of charge, to any person obtaining a
  10. * copy of this software and associated documentation files (the "Software"),
  11. * to deal in the Software without restriction, including without limitation
  12. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  13. * and/or sell copies of the Software, and to permit persons to whom the
  14. * Software is furnished to do so, subject to the following conditions:
  15. *
  16. * The above copyright notice and this permission notice shall be included
  17. * in all copies or substantial portions of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20. * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  22. * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
  23. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
  25. * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. * TODO:
  28. * - Allocate more than order 0 pages to avoid too much linear map splitting.
  29. */
  30. #include <linux/module.h>
  31. #include <linux/pci.h>
  32. #include <linux/init.h>
  33. #include <linux/pagemap.h>
  34. #include <linux/miscdevice.h>
  35. #include <linux/pm.h>
  36. #include <linux/agp_backend.h>
  37. #include <linux/vmalloc.h>
  38. #include <linux/dma-mapping.h>
  39. #include <linux/mm.h>
  40. #include <linux/sched.h>
  41. #include <asm/io.h>
  42. #include <asm/cacheflush.h>
  43. #include <asm/pgtable.h>
  44. #include "agp.h"
  45. __u32 *agp_gatt_table;
  46. int agp_memory_reserved;
  47. /*
  48. * Needed by the Nforce GART driver for the time being. Would be
  49. * nice to do this some other way instead of needing this export.
  50. */
  51. EXPORT_SYMBOL_GPL(agp_memory_reserved);
  52. /*
  53. * Generic routines for handling agp_memory structures -
  54. * They use the basic page allocation routines to do the brunt of the work.
  55. */
  56. void agp_free_key(int key)
  57. {
  58. if (key < 0)
  59. return;
  60. if (key < MAXKEY)
  61. clear_bit(key, agp_bridge->key_list);
  62. }
  63. EXPORT_SYMBOL(agp_free_key);
  64. static int agp_get_key(void)
  65. {
  66. int bit;
  67. bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY);
  68. if (bit < MAXKEY) {
  69. set_bit(bit, agp_bridge->key_list);
  70. return bit;
  71. }
  72. return -1;
  73. }
  74. void agp_flush_chipset(struct agp_bridge_data *bridge)
  75. {
  76. if (bridge->driver->chipset_flush)
  77. bridge->driver->chipset_flush(bridge);
  78. }
  79. EXPORT_SYMBOL(agp_flush_chipset);
  80. /*
  81. * Use kmalloc if possible for the page list. Otherwise fall back to
  82. * vmalloc. This speeds things up and also saves memory for small AGP
  83. * regions.
  84. */
  85. void agp_alloc_page_array(size_t size, struct agp_memory *mem)
  86. {
  87. mem->pages = NULL;
  88. mem->vmalloc_flag = false;
  89. if (size <= 2*PAGE_SIZE)
  90. mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NORETRY);
  91. if (mem->pages == NULL) {
  92. mem->pages = vmalloc(size);
  93. mem->vmalloc_flag = true;
  94. }
  95. }
  96. EXPORT_SYMBOL(agp_alloc_page_array);
  97. void agp_free_page_array(struct agp_memory *mem)
  98. {
  99. if (mem->vmalloc_flag) {
  100. vfree(mem->pages);
  101. } else {
  102. kfree(mem->pages);
  103. }
  104. }
  105. EXPORT_SYMBOL(agp_free_page_array);
  106. static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages)
  107. {
  108. struct agp_memory *new;
  109. unsigned long alloc_size = num_agp_pages*sizeof(struct page *);
  110. new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
  111. if (new == NULL)
  112. return NULL;
  113. new->key = agp_get_key();
  114. if (new->key < 0) {
  115. kfree(new);
  116. return NULL;
  117. }
  118. agp_alloc_page_array(alloc_size, new);
  119. if (new->pages == NULL) {
  120. agp_free_key(new->key);
  121. kfree(new);
  122. return NULL;
  123. }
  124. new->num_scratch_pages = 0;
  125. return new;
  126. }
  127. struct agp_memory *agp_create_memory(int scratch_pages)
  128. {
  129. struct agp_memory *new;
  130. new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
  131. if (new == NULL)
  132. return NULL;
  133. new->key = agp_get_key();
  134. if (new->key < 0) {
  135. kfree(new);
  136. return NULL;
  137. }
  138. agp_alloc_page_array(PAGE_SIZE * scratch_pages, new);
  139. if (new->pages == NULL) {
  140. agp_free_key(new->key);
  141. kfree(new);
  142. return NULL;
  143. }
  144. new->num_scratch_pages = scratch_pages;
  145. new->type = AGP_NORMAL_MEMORY;
  146. return new;
  147. }
  148. EXPORT_SYMBOL(agp_create_memory);
  149. /**
  150. * agp_free_memory - free memory associated with an agp_memory pointer.
  151. *
  152. * @curr: agp_memory pointer to be freed.
  153. *
  154. * It is the only function that can be called when the backend is not owned
  155. * by the caller. (So it can free memory on client death.)
  156. */
  157. void agp_free_memory(struct agp_memory *curr)
  158. {
  159. size_t i;
  160. if (curr == NULL)
  161. return;
  162. if (curr->is_bound)
  163. agp_unbind_memory(curr);
  164. if (curr->type >= AGP_USER_TYPES) {
  165. agp_generic_free_by_type(curr);
  166. return;
  167. }
  168. if (curr->type != 0) {
  169. curr->bridge->driver->free_by_type(curr);
  170. return;
  171. }
  172. if (curr->page_count != 0) {
  173. if (curr->bridge->driver->agp_destroy_pages) {
  174. curr->bridge->driver->agp_destroy_pages(curr);
  175. } else {
  176. for (i = 0; i < curr->page_count; i++) {
  177. curr->bridge->driver->agp_destroy_page(
  178. curr->pages[i],
  179. AGP_PAGE_DESTROY_UNMAP);
  180. }
  181. for (i = 0; i < curr->page_count; i++) {
  182. curr->bridge->driver->agp_destroy_page(
  183. curr->pages[i],
  184. AGP_PAGE_DESTROY_FREE);
  185. }
  186. }
  187. }
  188. agp_free_key(curr->key);
  189. agp_free_page_array(curr);
  190. kfree(curr);
  191. }
  192. EXPORT_SYMBOL(agp_free_memory);
  193. #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
  194. /**
  195. * agp_allocate_memory - allocate a group of pages of a certain type.
  196. *
  197. * @page_count: size_t argument of the number of pages
  198. * @type: u32 argument of the type of memory to be allocated.
  199. *
  200. * Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which
  201. * maps to physical ram. Any other type is device dependent.
  202. *
  203. * It returns NULL whenever memory is unavailable.
  204. */
  205. struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
  206. size_t page_count, u32 type)
  207. {
  208. int scratch_pages;
  209. struct agp_memory *new;
  210. size_t i;
  211. if (!bridge)
  212. return NULL;
  213. if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp)
  214. return NULL;
  215. if (type >= AGP_USER_TYPES) {
  216. new = agp_generic_alloc_user(page_count, type);
  217. if (new)
  218. new->bridge = bridge;
  219. return new;
  220. }
  221. if (type != 0) {
  222. new = bridge->driver->alloc_by_type(page_count, type);
  223. if (new)
  224. new->bridge = bridge;
  225. return new;
  226. }
  227. scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
  228. new = agp_create_memory(scratch_pages);
  229. if (new == NULL)
  230. return NULL;
  231. if (bridge->driver->agp_alloc_pages) {
  232. if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) {
  233. agp_free_memory(new);
  234. return NULL;
  235. }
  236. new->bridge = bridge;
  237. return new;
  238. }
  239. for (i = 0; i < page_count; i++) {
  240. struct page *page = bridge->driver->agp_alloc_page(bridge);
  241. if (page == NULL) {
  242. agp_free_memory(new);
  243. return NULL;
  244. }
  245. new->pages[i] = page;
  246. new->page_count++;
  247. }
  248. new->bridge = bridge;
  249. return new;
  250. }
  251. EXPORT_SYMBOL(agp_allocate_memory);
  252. /* End - Generic routines for handling agp_memory structures */
  253. static int agp_return_size(void)
  254. {
  255. int current_size;
  256. void *temp;
  257. temp = agp_bridge->current_size;
  258. switch (agp_bridge->driver->size_type) {
  259. case U8_APER_SIZE:
  260. current_size = A_SIZE_8(temp)->size;
  261. break;
  262. case U16_APER_SIZE:
  263. current_size = A_SIZE_16(temp)->size;
  264. break;
  265. case U32_APER_SIZE:
  266. current_size = A_SIZE_32(temp)->size;
  267. break;
  268. case LVL2_APER_SIZE:
  269. current_size = A_SIZE_LVL2(temp)->size;
  270. break;
  271. case FIXED_APER_SIZE:
  272. current_size = A_SIZE_FIX(temp)->size;
  273. break;
  274. default:
  275. current_size = 0;
  276. break;
  277. }
  278. current_size -= (agp_memory_reserved / (1024*1024));
  279. if (current_size <0)
  280. current_size = 0;
  281. return current_size;
  282. }
  283. int agp_num_entries(void)
  284. {
  285. int num_entries;
  286. void *temp;
  287. temp = agp_bridge->current_size;
  288. switch (agp_bridge->driver->size_type) {
  289. case U8_APER_SIZE:
  290. num_entries = A_SIZE_8(temp)->num_entries;
  291. break;
  292. case U16_APER_SIZE:
  293. num_entries = A_SIZE_16(temp)->num_entries;
  294. break;
  295. case U32_APER_SIZE:
  296. num_entries = A_SIZE_32(temp)->num_entries;
  297. break;
  298. case LVL2_APER_SIZE:
  299. num_entries = A_SIZE_LVL2(temp)->num_entries;
  300. break;
  301. case FIXED_APER_SIZE:
  302. num_entries = A_SIZE_FIX(temp)->num_entries;
  303. break;
  304. default:
  305. num_entries = 0;
  306. break;
  307. }
  308. num_entries -= agp_memory_reserved>>PAGE_SHIFT;
  309. if (num_entries<0)
  310. num_entries = 0;
  311. return num_entries;
  312. }
  313. EXPORT_SYMBOL_GPL(agp_num_entries);
  314. /**
  315. * agp_copy_info - copy bridge state information
  316. *
  317. * @info: agp_kern_info pointer. The caller should insure that this pointer is valid.
  318. *
  319. * This function copies information about the agp bridge device and the state of
  320. * the agp backend into an agp_kern_info pointer.
  321. */
  322. int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info)
  323. {
  324. memset(info, 0, sizeof(struct agp_kern_info));
  325. if (!bridge) {
  326. info->chipset = NOT_SUPPORTED;
  327. return -EIO;
  328. }
  329. info->version.major = bridge->version->major;
  330. info->version.minor = bridge->version->minor;
  331. info->chipset = SUPPORTED;
  332. info->device = bridge->dev;
  333. if (bridge->mode & AGPSTAT_MODE_3_0)
  334. info->mode = bridge->mode & ~AGP3_RESERVED_MASK;
  335. else
  336. info->mode = bridge->mode & ~AGP2_RESERVED_MASK;
  337. info->aper_base = bridge->gart_bus_addr;
  338. info->aper_size = agp_return_size();
  339. info->max_memory = bridge->max_memory_agp;
  340. info->current_memory = atomic_read(&bridge->current_memory_agp);
  341. info->cant_use_aperture = bridge->driver->cant_use_aperture;
  342. info->vm_ops = bridge->vm_ops;
  343. info->page_mask = ~0UL;
  344. return 0;
  345. }
  346. EXPORT_SYMBOL(agp_copy_info);
  347. /* End - Routine to copy over information structure */
  348. /*
  349. * Routines for handling swapping of agp_memory into the GATT -
  350. * These routines take agp_memory and insert them into the GATT.
  351. * They call device specific routines to actually write to the GATT.
  352. */
  353. /**
  354. * agp_bind_memory - Bind an agp_memory structure into the GATT.
  355. *
  356. * @curr: agp_memory pointer
  357. * @pg_start: an offset into the graphics aperture translation table
  358. *
  359. * It returns -EINVAL if the pointer == NULL.
  360. * It returns -EBUSY if the area of the table requested is already in use.
  361. */
  362. int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
  363. {
  364. int ret_val;
  365. if (curr == NULL)
  366. return -EINVAL;
  367. if (curr->is_bound) {
  368. printk(KERN_INFO PFX "memory %p is already bound!\n", curr);
  369. return -EINVAL;
  370. }
  371. if (!curr->is_flushed) {
  372. curr->bridge->driver->cache_flush();
  373. curr->is_flushed = true;
  374. }
  375. ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
  376. if (ret_val != 0)
  377. return ret_val;
  378. curr->is_bound = true;
  379. curr->pg_start = pg_start;
  380. spin_lock(&agp_bridge->mapped_lock);
  381. list_add(&curr->mapped_list, &agp_bridge->mapped_list);
  382. spin_unlock(&agp_bridge->mapped_lock);
  383. return 0;
  384. }
  385. EXPORT_SYMBOL(agp_bind_memory);
  386. /**
  387. * agp_unbind_memory - Removes an agp_memory structure from the GATT
  388. *
  389. * @curr: agp_memory pointer to be removed from the GATT.
  390. *
  391. * It returns -EINVAL if this piece of agp_memory is not currently bound to
  392. * the graphics aperture translation table or if the agp_memory pointer == NULL
  393. */
  394. int agp_unbind_memory(struct agp_memory *curr)
  395. {
  396. int ret_val;
  397. if (curr == NULL)
  398. return -EINVAL;
  399. if (!curr->is_bound) {
  400. printk(KERN_INFO PFX "memory %p was not bound!\n", curr);
  401. return -EINVAL;
  402. }
  403. ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type);
  404. if (ret_val != 0)
  405. return ret_val;
  406. curr->is_bound = false;
  407. curr->pg_start = 0;
  408. spin_lock(&curr->bridge->mapped_lock);
  409. list_del(&curr->mapped_list);
  410. spin_unlock(&curr->bridge->mapped_lock);
  411. return 0;
  412. }
  413. EXPORT_SYMBOL(agp_unbind_memory);
  414. /**
  415. * agp_rebind_emmory - Rewrite the entire GATT, useful on resume
  416. */
  417. int agp_rebind_memory(void)
  418. {
  419. struct agp_memory *curr;
  420. int ret_val = 0;
  421. spin_lock(&agp_bridge->mapped_lock);
  422. list_for_each_entry(curr, &agp_bridge->mapped_list, mapped_list) {
  423. ret_val = curr->bridge->driver->insert_memory(curr,
  424. curr->pg_start,
  425. curr->type);
  426. if (ret_val != 0)
  427. break;
  428. }
  429. spin_unlock(&agp_bridge->mapped_lock);
  430. return ret_val;
  431. }
  432. EXPORT_SYMBOL(agp_rebind_memory);
  433. /* End - Routines for handling swapping of agp_memory into the GATT */
  434. /* Generic Agp routines - Start */
  435. static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
  436. {
  437. u32 tmp;
  438. if (*requested_mode & AGP2_RESERVED_MASK) {
  439. printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
  440. *requested_mode & AGP2_RESERVED_MASK, *requested_mode);
  441. *requested_mode &= ~AGP2_RESERVED_MASK;
  442. }
  443. /*
  444. * Some dumb bridges are programmed to disobey the AGP2 spec.
  445. * This is likely a BIOS misprogramming rather than poweron default, or
  446. * it would be a lot more common.
  447. * https://bugs.freedesktop.org/show_bug.cgi?id=8816
  448. * AGPv2 spec 6.1.9 states:
  449. * The RATE field indicates the data transfer rates supported by this
  450. * device. A.G.P. devices must report all that apply.
  451. * Fix them up as best we can.
  452. */
  453. switch (*bridge_agpstat & 7) {
  454. case 4:
  455. *bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X);
  456. printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate"
  457. "Fixing up support for x2 & x1\n");
  458. break;
  459. case 2:
  460. *bridge_agpstat |= AGPSTAT2_1X;
  461. printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate"
  462. "Fixing up support for x1\n");
  463. break;
  464. default:
  465. break;
  466. }
  467. /* Check the speed bits make sense. Only one should be set. */
  468. tmp = *requested_mode & 7;
  469. switch (tmp) {
  470. case 0:
  471. printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to x1 mode.\n", current->comm);
  472. *requested_mode |= AGPSTAT2_1X;
  473. break;
  474. case 1:
  475. case 2:
  476. break;
  477. case 3:
  478. *requested_mode &= ~(AGPSTAT2_1X); /* rate=2 */
  479. break;
  480. case 4:
  481. break;
  482. case 5:
  483. case 6:
  484. case 7:
  485. *requested_mode &= ~(AGPSTAT2_1X|AGPSTAT2_2X); /* rate=4*/
  486. break;
  487. }
  488. /* disable SBA if it's not supported */
  489. if (!((*bridge_agpstat & AGPSTAT_SBA) && (*vga_agpstat & AGPSTAT_SBA) && (*requested_mode & AGPSTAT_SBA)))
  490. *bridge_agpstat &= ~AGPSTAT_SBA;
  491. /* Set rate */
  492. if (!((*bridge_agpstat & AGPSTAT2_4X) && (*vga_agpstat & AGPSTAT2_4X) && (*requested_mode & AGPSTAT2_4X)))
  493. *bridge_agpstat &= ~AGPSTAT2_4X;
  494. if (!((*bridge_agpstat & AGPSTAT2_2X) && (*vga_agpstat & AGPSTAT2_2X) && (*requested_mode & AGPSTAT2_2X)))
  495. *bridge_agpstat &= ~AGPSTAT2_2X;
  496. if (!((*bridge_agpstat & AGPSTAT2_1X) && (*vga_agpstat & AGPSTAT2_1X) && (*requested_mode & AGPSTAT2_1X)))
  497. *bridge_agpstat &= ~AGPSTAT2_1X;
  498. /* Now we know what mode it should be, clear out the unwanted bits. */
  499. if (*bridge_agpstat & AGPSTAT2_4X)
  500. *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_2X); /* 4X */
  501. if (*bridge_agpstat & AGPSTAT2_2X)
  502. *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_4X); /* 2X */
  503. if (*bridge_agpstat & AGPSTAT2_1X)
  504. *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); /* 1X */
  505. /* Apply any errata. */
  506. if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
  507. *bridge_agpstat &= ~AGPSTAT_FW;
  508. if (agp_bridge->flags & AGP_ERRATA_SBA)
  509. *bridge_agpstat &= ~AGPSTAT_SBA;
  510. if (agp_bridge->flags & AGP_ERRATA_1X) {
  511. *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
  512. *bridge_agpstat |= AGPSTAT2_1X;
  513. }
  514. /* If we've dropped down to 1X, disable fast writes. */
  515. if (*bridge_agpstat & AGPSTAT2_1X)
  516. *bridge_agpstat &= ~AGPSTAT_FW;
  517. }
  518. /*
  519. * requested_mode = Mode requested by (typically) X.
  520. * bridge_agpstat = PCI_AGP_STATUS from agp bridge.
  521. * vga_agpstat = PCI_AGP_STATUS from graphic card.
  522. */
  523. static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
  524. {
  525. u32 origbridge=*bridge_agpstat, origvga=*vga_agpstat;
  526. u32 tmp;
  527. if (*requested_mode & AGP3_RESERVED_MASK) {
  528. printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
  529. *requested_mode & AGP3_RESERVED_MASK, *requested_mode);
  530. *requested_mode &= ~AGP3_RESERVED_MASK;
  531. }
  532. /* Check the speed bits make sense. */
  533. tmp = *requested_mode & 7;
  534. if (tmp == 0) {
  535. printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current->comm);
  536. *requested_mode |= AGPSTAT3_4X;
  537. }
  538. if (tmp >= 3) {
  539. printk(KERN_INFO PFX "%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current->comm, tmp * 4);
  540. *requested_mode = (*requested_mode & ~7) | AGPSTAT3_8X;
  541. }
  542. /* ARQSZ - Set the value to the maximum one.
  543. * Don't allow the mode register to override values. */
  544. *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_ARQSZ) |
  545. max_t(u32,(*bridge_agpstat & AGPSTAT_ARQSZ),(*vga_agpstat & AGPSTAT_ARQSZ)));
  546. /* Calibration cycle.
  547. * Don't allow the mode register to override values. */
  548. *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_CAL_MASK) |
  549. min_t(u32,(*bridge_agpstat & AGPSTAT_CAL_MASK),(*vga_agpstat & AGPSTAT_CAL_MASK)));
  550. /* SBA *must* be supported for AGP v3 */
  551. *bridge_agpstat |= AGPSTAT_SBA;
  552. /*
  553. * Set speed.
  554. * Check for invalid speeds. This can happen when applications
  555. * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware
  556. */
  557. if (*requested_mode & AGPSTAT_MODE_3_0) {
  558. /*
  559. * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode,
  560. * have been passed a 3.0 mode, but with 2.x speed bits set.
  561. * AGP2.x 4x -> AGP3.0 4x.
  562. */
  563. if (*requested_mode & AGPSTAT2_4X) {
  564. printk(KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n",
  565. current->comm, *requested_mode);
  566. *requested_mode &= ~AGPSTAT2_4X;
  567. *requested_mode |= AGPSTAT3_4X;
  568. }
  569. } else {
  570. /*
  571. * The caller doesn't know what they are doing. We are in 3.0 mode,
  572. * but have been passed an AGP 2.x mode.
  573. * Convert AGP 1x,2x,4x -> AGP 3.0 4x.
  574. */
  575. printk(KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n",
  576. current->comm, *requested_mode);
  577. *requested_mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X);
  578. *requested_mode |= AGPSTAT3_4X;
  579. }
  580. if (*requested_mode & AGPSTAT3_8X) {
  581. if (!(*bridge_agpstat & AGPSTAT3_8X)) {
  582. *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
  583. *bridge_agpstat |= AGPSTAT3_4X;
  584. printk(KERN_INFO PFX "%s requested AGPx8 but bridge not capable.\n", current->comm);
  585. return;
  586. }
  587. if (!(*vga_agpstat & AGPSTAT3_8X)) {
  588. *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
  589. *bridge_agpstat |= AGPSTAT3_4X;
  590. printk(KERN_INFO PFX "%s requested AGPx8 but graphic card not capable.\n", current->comm);
  591. return;
  592. }
  593. /* All set, bridge & device can do AGP x8*/
  594. *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
  595. goto done;
  596. } else if (*requested_mode & AGPSTAT3_4X) {
  597. *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
  598. *bridge_agpstat |= AGPSTAT3_4X;
  599. goto done;
  600. } else {
  601. /*
  602. * If we didn't specify an AGP mode, we see if both
  603. * the graphics card, and the bridge can do x8, and use if so.
  604. * If not, we fall back to x4 mode.
  605. */
  606. if ((*bridge_agpstat & AGPSTAT3_8X) && (*vga_agpstat & AGPSTAT3_8X)) {
  607. printk(KERN_INFO PFX "No AGP mode specified. Setting to highest mode "
  608. "supported by bridge & card (x8).\n");
  609. *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
  610. *vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
  611. } else {
  612. printk(KERN_INFO PFX "Fell back to AGPx4 mode because");
  613. if (!(*bridge_agpstat & AGPSTAT3_8X)) {
  614. printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n",
  615. *bridge_agpstat, origbridge);
  616. *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
  617. *bridge_agpstat |= AGPSTAT3_4X;
  618. }
  619. if (!(*vga_agpstat & AGPSTAT3_8X)) {
  620. printk(KERN_INFO PFX "graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n",
  621. *vga_agpstat, origvga);
  622. *vga_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
  623. *vga_agpstat |= AGPSTAT3_4X;
  624. }
  625. }
  626. }
  627. done:
  628. /* Apply any errata. */
  629. if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
  630. *bridge_agpstat &= ~AGPSTAT_FW;
  631. if (agp_bridge->flags & AGP_ERRATA_SBA)
  632. *bridge_agpstat &= ~AGPSTAT_SBA;
  633. if (agp_bridge->flags & AGP_ERRATA_1X) {
  634. *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
  635. *bridge_agpstat |= AGPSTAT2_1X;
  636. }
  637. }
  638. /**
  639. * agp_collect_device_status - determine correct agp_cmd from various agp_stat's
  640. * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
  641. * @requested_mode: requested agp_stat from userspace (Typically from X)
  642. * @bridge_agpstat: current agp_stat from AGP bridge.
  643. *
  644. * This function will hunt for an AGP graphics card, and try to match
  645. * the requested mode to the capabilities of both the bridge and the card.
  646. */
  647. u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode, u32 bridge_agpstat)
  648. {
  649. struct pci_dev *device = NULL;
  650. u32 vga_agpstat;
  651. u8 cap_ptr;
  652. for (;;) {
  653. device = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, device);
  654. if (!device) {
  655. printk(KERN_INFO PFX "Couldn't find an AGP VGA controller.\n");
  656. return 0;
  657. }
  658. cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
  659. if (cap_ptr)
  660. break;
  661. }
  662. /*
  663. * Ok, here we have a AGP device. Disable impossible
  664. * settings, and adjust the readqueue to the minimum.
  665. */
  666. pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &vga_agpstat);
  667. /* adjust RQ depth */
  668. bridge_agpstat = ((bridge_agpstat & ~AGPSTAT_RQ_DEPTH) |
  669. min_t(u32, (requested_mode & AGPSTAT_RQ_DEPTH),
  670. min_t(u32, (bridge_agpstat & AGPSTAT_RQ_DEPTH), (vga_agpstat & AGPSTAT_RQ_DEPTH))));
  671. /* disable FW if it's not supported */
  672. if (!((bridge_agpstat & AGPSTAT_FW) &&
  673. (vga_agpstat & AGPSTAT_FW) &&
  674. (requested_mode & AGPSTAT_FW)))
  675. bridge_agpstat &= ~AGPSTAT_FW;
  676. /* Check to see if we are operating in 3.0 mode */
  677. if (agp_bridge->mode & AGPSTAT_MODE_3_0)
  678. agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
  679. else
  680. agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
  681. pci_dev_put(device);
  682. return bridge_agpstat;
  683. }
  684. EXPORT_SYMBOL(agp_collect_device_status);
  685. void agp_device_command(u32 bridge_agpstat, bool agp_v3)
  686. {
  687. struct pci_dev *device = NULL;
  688. int mode;
  689. mode = bridge_agpstat & 0x7;
  690. if (agp_v3)
  691. mode *= 4;
  692. for_each_pci_dev(device) {
  693. u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
  694. if (!agp)
  695. continue;
  696. dev_info(&device->dev, "putting AGP V%d device into %dx mode\n",
  697. agp_v3 ? 3 : 2, mode);
  698. pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat);
  699. }
  700. }
  701. EXPORT_SYMBOL(agp_device_command);
  702. void get_agp_version(struct agp_bridge_data *bridge)
  703. {
  704. u32 ncapid;
  705. /* Exit early if already set by errata workarounds. */
  706. if (bridge->major_version != 0)
  707. return;
  708. pci_read_config_dword(bridge->dev, bridge->capndx, &ncapid);
  709. bridge->major_version = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
  710. bridge->minor_version = (ncapid >> AGP_MINOR_VERSION_SHIFT) & 0xf;
  711. }
  712. EXPORT_SYMBOL(get_agp_version);
  713. void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
  714. {
  715. u32 bridge_agpstat, temp;
  716. get_agp_version(agp_bridge);
  717. dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n",
  718. agp_bridge->major_version, agp_bridge->minor_version);
  719. pci_read_config_dword(agp_bridge->dev,
  720. agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat);
  721. bridge_agpstat = agp_collect_device_status(agp_bridge, requested_mode, bridge_agpstat);
  722. if (bridge_agpstat == 0)
  723. /* Something bad happened. FIXME: Return error code? */
  724. return;
  725. bridge_agpstat |= AGPSTAT_AGP_ENABLE;
  726. /* Do AGP version specific frobbing. */
  727. if (bridge->major_version >= 3) {
  728. if (bridge->mode & AGPSTAT_MODE_3_0) {
  729. /* If we have 3.5, we can do the isoch stuff. */
  730. if (bridge->minor_version >= 5)
  731. agp_3_5_enable(bridge);
  732. agp_device_command(bridge_agpstat, true);
  733. return;
  734. } else {
  735. /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/
  736. bridge_agpstat &= ~(7<<10) ;
  737. pci_read_config_dword(bridge->dev,
  738. bridge->capndx+AGPCTRL, &temp);
  739. temp |= (1<<9);
  740. pci_write_config_dword(bridge->dev,
  741. bridge->capndx+AGPCTRL, temp);
  742. dev_info(&bridge->dev->dev, "bridge is in legacy mode, falling back to 2.x\n");
  743. }
  744. }
  745. /* AGP v<3 */
  746. agp_device_command(bridge_agpstat, false);
  747. }
  748. EXPORT_SYMBOL(agp_generic_enable);
  749. int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
  750. {
  751. char *table;
  752. char *table_end;
  753. int size;
  754. int page_order;
  755. int num_entries;
  756. int i;
  757. void *temp;
  758. struct page *page;
  759. /* The generic routines can't handle 2 level gatt's */
  760. if (bridge->driver->size_type == LVL2_APER_SIZE)
  761. return -EINVAL;
  762. table = NULL;
  763. i = bridge->aperture_size_idx;
  764. temp = bridge->current_size;
  765. size = page_order = num_entries = 0;
  766. if (bridge->driver->size_type != FIXED_APER_SIZE) {
  767. do {
  768. switch (bridge->driver->size_type) {
  769. case U8_APER_SIZE:
  770. size = A_SIZE_8(temp)->size;
  771. page_order =
  772. A_SIZE_8(temp)->page_order;
  773. num_entries =
  774. A_SIZE_8(temp)->num_entries;
  775. break;
  776. case U16_APER_SIZE:
  777. size = A_SIZE_16(temp)->size;
  778. page_order = A_SIZE_16(temp)->page_order;
  779. num_entries = A_SIZE_16(temp)->num_entries;
  780. break;
  781. case U32_APER_SIZE:
  782. size = A_SIZE_32(temp)->size;
  783. page_order = A_SIZE_32(temp)->page_order;
  784. num_entries = A_SIZE_32(temp)->num_entries;
  785. break;
  786. /* This case will never really happen. */
  787. case FIXED_APER_SIZE:
  788. case LVL2_APER_SIZE:
  789. default:
  790. size = page_order = num_entries = 0;
  791. break;
  792. }
  793. table = alloc_gatt_pages(page_order);
  794. if (table == NULL) {
  795. i++;
  796. switch (bridge->driver->size_type) {
  797. case U8_APER_SIZE:
  798. bridge->current_size = A_IDX8(bridge);
  799. break;
  800. case U16_APER_SIZE:
  801. bridge->current_size = A_IDX16(bridge);
  802. break;
  803. case U32_APER_SIZE:
  804. bridge->current_size = A_IDX32(bridge);
  805. break;
  806. /* These cases will never really happen. */
  807. case FIXED_APER_SIZE:
  808. case LVL2_APER_SIZE:
  809. default:
  810. break;
  811. }
  812. temp = bridge->current_size;
  813. } else {
  814. bridge->aperture_size_idx = i;
  815. }
  816. } while (!table && (i < bridge->driver->num_aperture_sizes));
  817. } else {
  818. size = ((struct aper_size_info_fixed *) temp)->size;
  819. page_order = ((struct aper_size_info_fixed *) temp)->page_order;
  820. num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
  821. table = alloc_gatt_pages(page_order);
  822. }
  823. if (table == NULL)
  824. return -ENOMEM;
  825. table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
  826. for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
  827. SetPageReserved(page);
  828. bridge->gatt_table_real = (u32 *) table;
  829. agp_gatt_table = (void *)table;
  830. bridge->driver->cache_flush();
  831. #ifdef CONFIG_X86
  832. set_memory_uc((unsigned long)table, 1 << page_order);
  833. bridge->gatt_table = (void *)table;
  834. #else
  835. bridge->gatt_table = ioremap_nocache(virt_to_gart(table),
  836. (PAGE_SIZE * (1 << page_order)));
  837. bridge->driver->cache_flush();
  838. #endif
  839. if (bridge->gatt_table == NULL) {
  840. for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
  841. ClearPageReserved(page);
  842. free_gatt_pages(table, page_order);
  843. return -ENOMEM;
  844. }
  845. bridge->gatt_bus_addr = virt_to_gart(bridge->gatt_table_real);
  846. /* AK: bogus, should encode addresses > 4GB */
  847. for (i = 0; i < num_entries; i++) {
  848. writel(bridge->scratch_page, bridge->gatt_table+i);
  849. readl(bridge->gatt_table+i); /* PCI Posting. */
  850. }
  851. return 0;
  852. }
  853. EXPORT_SYMBOL(agp_generic_create_gatt_table);
  854. int agp_generic_free_gatt_table(struct agp_bridge_data *bridge)
  855. {
  856. int page_order;
  857. char *table, *table_end;
  858. void *temp;
  859. struct page *page;
  860. temp = bridge->current_size;
  861. switch (bridge->driver->size_type) {
  862. case U8_APER_SIZE:
  863. page_order = A_SIZE_8(temp)->page_order;
  864. break;
  865. case U16_APER_SIZE:
  866. page_order = A_SIZE_16(temp)->page_order;
  867. break;
  868. case U32_APER_SIZE:
  869. page_order = A_SIZE_32(temp)->page_order;
  870. break;
  871. case FIXED_APER_SIZE:
  872. page_order = A_SIZE_FIX(temp)->page_order;
  873. break;
  874. case LVL2_APER_SIZE:
  875. /* The generic routines can't deal with 2 level gatt's */
  876. return -EINVAL;
  877. break;
  878. default:
  879. page_order = 0;
  880. break;
  881. }
  882. /* Do not worry about freeing memory, because if this is
  883. * called, then all agp memory is deallocated and removed
  884. * from the table. */
  885. #ifdef CONFIG_X86
  886. set_memory_wb((unsigned long)bridge->gatt_table, 1 << page_order);
  887. #else
  888. iounmap(bridge->gatt_table);
  889. #endif
  890. table = (char *) bridge->gatt_table_real;
  891. table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
  892. for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
  893. ClearPageReserved(page);
  894. free_gatt_pages(bridge->gatt_table_real, page_order);
  895. agp_gatt_table = NULL;
  896. bridge->gatt_table = NULL;
  897. bridge->gatt_table_real = NULL;
  898. bridge->gatt_bus_addr = 0;
  899. return 0;
  900. }
  901. EXPORT_SYMBOL(agp_generic_free_gatt_table);
  902. int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
  903. {
  904. int num_entries;
  905. size_t i;
  906. off_t j;
  907. void *temp;
  908. struct agp_bridge_data *bridge;
  909. int mask_type;
  910. bridge = mem->bridge;
  911. if (!bridge)
  912. return -EINVAL;
  913. if (mem->page_count == 0)
  914. return 0;
  915. temp = bridge->current_size;
  916. switch (bridge->driver->size_type) {
  917. case U8_APER_SIZE:
  918. num_entries = A_SIZE_8(temp)->num_entries;
  919. break;
  920. case U16_APER_SIZE:
  921. num_entries = A_SIZE_16(temp)->num_entries;
  922. break;
  923. case U32_APER_SIZE:
  924. num_entries = A_SIZE_32(temp)->num_entries;
  925. break;
  926. case FIXED_APER_SIZE:
  927. num_entries = A_SIZE_FIX(temp)->num_entries;
  928. break;
  929. case LVL2_APER_SIZE:
  930. /* The generic routines can't deal with 2 level gatt's */
  931. return -EINVAL;
  932. break;
  933. default:
  934. num_entries = 0;
  935. break;
  936. }
  937. num_entries -= agp_memory_reserved/PAGE_SIZE;
  938. if (num_entries < 0) num_entries = 0;
  939. if (type != mem->type)
  940. return -EINVAL;
  941. mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
  942. if (mask_type != 0) {
  943. /* The generic routines know nothing of memory types */
  944. return -EINVAL;
  945. }
  946. /* AK: could wrap */
  947. if ((pg_start + mem->page_count) > num_entries)
  948. return -EINVAL;
  949. j = pg_start;
  950. while (j < (pg_start + mem->page_count)) {
  951. if (!PGE_EMPTY(bridge, readl(bridge->gatt_table+j)))
  952. return -EBUSY;
  953. j++;
  954. }
  955. if (!mem->is_flushed) {
  956. bridge->driver->cache_flush();
  957. mem->is_flushed = true;
  958. }
  959. for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
  960. writel(bridge->driver->mask_memory(bridge, mem->pages[i], mask_type),
  961. bridge->gatt_table+j);
  962. }
  963. readl(bridge->gatt_table+j-1); /* PCI Posting. */
  964. bridge->driver->tlb_flush(mem);
  965. return 0;
  966. }
  967. EXPORT_SYMBOL(agp_generic_insert_memory);
  968. int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
  969. {
  970. size_t i;
  971. struct agp_bridge_data *bridge;
  972. int mask_type;
  973. bridge = mem->bridge;
  974. if (!bridge)
  975. return -EINVAL;
  976. if (mem->page_count == 0)
  977. return 0;
  978. if (type != mem->type)
  979. return -EINVAL;
  980. mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
  981. if (mask_type != 0) {
  982. /* The generic routines know nothing of memory types */
  983. return -EINVAL;
  984. }
  985. /* AK: bogus, should encode addresses > 4GB */
  986. for (i = pg_start; i < (mem->page_count + pg_start); i++) {
  987. writel(bridge->scratch_page, bridge->gatt_table+i);
  988. }
  989. readl(bridge->gatt_table+i-1); /* PCI Posting. */
  990. bridge->driver->tlb_flush(mem);
  991. return 0;
  992. }
  993. EXPORT_SYMBOL(agp_generic_remove_memory);
  994. struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
  995. {
  996. return NULL;
  997. }
  998. EXPORT_SYMBOL(agp_generic_alloc_by_type);
  999. void agp_generic_free_by_type(struct agp_memory *curr)
  1000. {
  1001. agp_free_page_array(curr);
  1002. agp_free_key(curr->key);
  1003. kfree(curr);
  1004. }
  1005. EXPORT_SYMBOL(agp_generic_free_by_type);
  1006. struct agp_memory *agp_generic_alloc_user(size_t page_count, int type)
  1007. {
  1008. struct agp_memory *new;
  1009. int i;
  1010. int pages;
  1011. pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
  1012. new = agp_create_user_memory(page_count);
  1013. if (new == NULL)
  1014. return NULL;
  1015. for (i = 0; i < page_count; i++)
  1016. new->pages[i] = 0;
  1017. new->page_count = 0;
  1018. new->type = type;
  1019. new->num_scratch_pages = pages;
  1020. return new;
  1021. }
  1022. EXPORT_SYMBOL(agp_generic_alloc_user);
  1023. /*
  1024. * Basic Page Allocation Routines -
  1025. * These routines handle page allocation and by default they reserve the allocated
  1026. * memory. They also handle incrementing the current_memory_agp value, Which is checked
  1027. * against a maximum value.
  1028. */
  1029. int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *mem, size_t num_pages)
  1030. {
  1031. struct page * page;
  1032. int i, ret = -ENOMEM;
  1033. for (i = 0; i < num_pages; i++) {
  1034. page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
  1035. /* agp_free_memory() needs gart address */
  1036. if (page == NULL)
  1037. goto out;
  1038. #ifndef CONFIG_X86
  1039. map_page_into_agp(page);
  1040. #endif
  1041. get_page(page);
  1042. atomic_inc(&agp_bridge->current_memory_agp);
  1043. mem->pages[i] = page;
  1044. mem->page_count++;
  1045. }
  1046. #ifdef CONFIG_X86
  1047. set_pages_array_uc(mem->pages, num_pages);
  1048. #endif
  1049. ret = 0;
  1050. out:
  1051. return ret;
  1052. }
  1053. EXPORT_SYMBOL(agp_generic_alloc_pages);
  1054. struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge)
  1055. {
  1056. struct page * page;
  1057. page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
  1058. if (page == NULL)
  1059. return NULL;
  1060. map_page_into_agp(page);
  1061. get_page(page);
  1062. atomic_inc(&agp_bridge->current_memory_agp);
  1063. return page;
  1064. }
  1065. EXPORT_SYMBOL(agp_generic_alloc_page);
  1066. void agp_generic_destroy_pages(struct agp_memory *mem)
  1067. {
  1068. int i;
  1069. struct page *page;
  1070. if (!mem)
  1071. return;
  1072. #ifdef CONFIG_X86
  1073. set_pages_array_wb(mem->pages, mem->page_count);
  1074. #endif
  1075. for (i = 0; i < mem->page_count; i++) {
  1076. page = mem->pages[i];
  1077. #ifndef CONFIG_X86
  1078. unmap_page_from_agp(page);
  1079. #endif
  1080. put_page(page);
  1081. __free_page(page);
  1082. atomic_dec(&agp_bridge->current_memory_agp);
  1083. mem->pages[i] = NULL;
  1084. }
  1085. }
  1086. EXPORT_SYMBOL(agp_generic_destroy_pages);
  1087. void agp_generic_destroy_page(struct page *page, int flags)
  1088. {
  1089. if (page == NULL)
  1090. return;
  1091. if (flags & AGP_PAGE_DESTROY_UNMAP)
  1092. unmap_page_from_agp(page);
  1093. if (flags & AGP_PAGE_DESTROY_FREE) {
  1094. put_page(page);
  1095. __free_page(page);
  1096. atomic_dec(&agp_bridge->current_memory_agp);
  1097. }
  1098. }
  1099. EXPORT_SYMBOL(agp_generic_destroy_page);
  1100. /* End Basic Page Allocation Routines */
  1101. /**
  1102. * agp_enable - initialise the agp point-to-point connection.
  1103. *
  1104. * @mode: agp mode register value to configure with.
  1105. */
  1106. void agp_enable(struct agp_bridge_data *bridge, u32 mode)
  1107. {
  1108. if (!bridge)
  1109. return;
  1110. bridge->driver->agp_enable(bridge, mode);
  1111. }
  1112. EXPORT_SYMBOL(agp_enable);
  1113. /* When we remove the global variable agp_bridge from all drivers
  1114. * then agp_alloc_bridge and agp_generic_find_bridge need to be updated
  1115. */
  1116. struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev)
  1117. {
  1118. if (list_empty(&agp_bridges))
  1119. return NULL;
  1120. return agp_bridge;
  1121. }
  1122. static void ipi_handler(void *null)
  1123. {
  1124. flush_agp_cache();
  1125. }
  1126. void global_cache_flush(void)
  1127. {
  1128. if (on_each_cpu(ipi_handler, NULL, 1) != 0)
  1129. panic(PFX "timed out waiting for the other CPUs!\n");
  1130. }
  1131. EXPORT_SYMBOL(global_cache_flush);
  1132. unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
  1133. struct page *page, int type)
  1134. {
  1135. unsigned long addr = phys_to_gart(page_to_phys(page));
  1136. /* memory type is ignored in the generic routine */
  1137. if (bridge->driver->masks)
  1138. return addr | bridge->driver->masks[0].mask;
  1139. else
  1140. return addr;
  1141. }
  1142. EXPORT_SYMBOL(agp_generic_mask_memory);
  1143. int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge,
  1144. int type)
  1145. {
  1146. if (type >= AGP_USER_TYPES)
  1147. return 0;
  1148. return type;
  1149. }
  1150. EXPORT_SYMBOL(agp_generic_type_to_mask_type);
  1151. /*
  1152. * These functions are implemented according to the AGPv3 spec,
  1153. * which covers implementation details that had previously been
  1154. * left open.
  1155. */
  1156. int agp3_generic_fetch_size(void)
  1157. {
  1158. u16 temp_size;
  1159. int i;
  1160. struct aper_size_info_16 *values;
  1161. pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size);
  1162. values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
  1163. for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
  1164. if (temp_size == values[i].size_value) {
  1165. agp_bridge->previous_size =
  1166. agp_bridge->current_size = (void *) (values + i);
  1167. agp_bridge->aperture_size_idx = i;
  1168. return values[i].size;
  1169. }
  1170. }
  1171. return 0;
  1172. }
  1173. EXPORT_SYMBOL(agp3_generic_fetch_size);
  1174. void agp3_generic_tlbflush(struct agp_memory *mem)
  1175. {
  1176. u32 ctrl;
  1177. pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
  1178. pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN);
  1179. pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl);
  1180. }
  1181. EXPORT_SYMBOL(agp3_generic_tlbflush);
  1182. int agp3_generic_configure(void)
  1183. {
  1184. u32 temp;
  1185. struct aper_size_info_16 *current_size;
  1186. current_size = A_SIZE_16(agp_bridge->current_size);
  1187. pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
  1188. agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
  1189. /* set aperture size */
  1190. pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value);
  1191. /* set gart pointer */
  1192. pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr);
  1193. /* enable aperture and GTLB */
  1194. pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp);
  1195. pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN);
  1196. return 0;
  1197. }
  1198. EXPORT_SYMBOL(agp3_generic_configure);
  1199. void agp3_generic_cleanup(void)
  1200. {
  1201. u32 ctrl;
  1202. pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
  1203. pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB);
  1204. }
  1205. EXPORT_SYMBOL(agp3_generic_cleanup);
  1206. const struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] =
  1207. {
  1208. {4096, 1048576, 10,0x000},
  1209. {2048, 524288, 9, 0x800},
  1210. {1024, 262144, 8, 0xc00},
  1211. { 512, 131072, 7, 0xe00},
  1212. { 256, 65536, 6, 0xf00},
  1213. { 128, 32768, 5, 0xf20},
  1214. { 64, 16384, 4, 0xf30},
  1215. { 32, 8192, 3, 0xf38},
  1216. { 16, 4096, 2, 0xf3c},
  1217. { 8, 2048, 1, 0xf3e},
  1218. { 4, 1024, 0, 0xf3f}
  1219. };
  1220. EXPORT_SYMBOL(agp3_generic_sizes);