generic.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454
  1. /*
  2. * AGPGART driver.
  3. * Copyright (C) 2004 Silicon Graphics, Inc.
  4. * Copyright (C) 2002-2005 Dave Jones.
  5. * Copyright (C) 1999 Jeff Hartmann.
  6. * Copyright (C) 1999 Precision Insight, Inc.
  7. * Copyright (C) 1999 Xi Graphics, Inc.
  8. *
  9. * Permission is hereby granted, free of charge, to any person obtaining a
  10. * copy of this software and associated documentation files (the "Software"),
  11. * to deal in the Software without restriction, including without limitation
  12. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  13. * and/or sell copies of the Software, and to permit persons to whom the
  14. * Software is furnished to do so, subject to the following conditions:
  15. *
  16. * The above copyright notice and this permission notice shall be included
  17. * in all copies or substantial portions of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20. * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  22. * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
  23. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
  25. * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. * TODO:
  28. * - Allocate more than order 0 pages to avoid too much linear map splitting.
  29. */
  30. #include <linux/module.h>
  31. #include <linux/pci.h>
  32. #include <linux/init.h>
  33. #include <linux/pagemap.h>
  34. #include <linux/miscdevice.h>
  35. #include <linux/pm.h>
  36. #include <linux/agp_backend.h>
  37. #include <linux/vmalloc.h>
  38. #include <linux/dma-mapping.h>
  39. #include <linux/mm.h>
  40. #include <linux/sched.h>
  41. #include <linux/slab.h>
  42. #include <asm/io.h>
  43. #include <asm/cacheflush.h>
  44. #include <asm/pgtable.h>
  45. #include "agp.h"
  46. __u32 *agp_gatt_table;
  47. int agp_memory_reserved;
  48. /*
  49. * Needed by the Nforce GART driver for the time being. Would be
  50. * nice to do this some other way instead of needing this export.
  51. */
  52. EXPORT_SYMBOL_GPL(agp_memory_reserved);
  53. /*
  54. * Generic routines for handling agp_memory structures -
  55. * They use the basic page allocation routines to do the brunt of the work.
  56. */
  57. void agp_free_key(int key)
  58. {
  59. if (key < 0)
  60. return;
  61. if (key < MAXKEY)
  62. clear_bit(key, agp_bridge->key_list);
  63. }
  64. EXPORT_SYMBOL(agp_free_key);
  65. static int agp_get_key(void)
  66. {
  67. int bit;
  68. bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY);
  69. if (bit < MAXKEY) {
  70. set_bit(bit, agp_bridge->key_list);
  71. return bit;
  72. }
  73. return -1;
  74. }
  75. void agp_flush_chipset(struct agp_bridge_data *bridge)
  76. {
  77. if (bridge->driver->chipset_flush)
  78. bridge->driver->chipset_flush(bridge);
  79. }
  80. EXPORT_SYMBOL(agp_flush_chipset);
  81. /*
  82. * Use kmalloc if possible for the page list. Otherwise fall back to
  83. * vmalloc. This speeds things up and also saves memory for small AGP
  84. * regions.
  85. */
  86. void agp_alloc_page_array(size_t size, struct agp_memory *mem)
  87. {
  88. mem->pages = NULL;
  89. if (size <= 2*PAGE_SIZE)
  90. mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
  91. if (mem->pages == NULL) {
  92. mem->pages = vmalloc(size);
  93. }
  94. }
  95. EXPORT_SYMBOL(agp_alloc_page_array);
  96. void agp_free_page_array(struct agp_memory *mem)
  97. {
  98. if (is_vmalloc_addr(mem->pages)) {
  99. vfree(mem->pages);
  100. } else {
  101. kfree(mem->pages);
  102. }
  103. }
  104. EXPORT_SYMBOL(agp_free_page_array);
  105. static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages)
  106. {
  107. struct agp_memory *new;
  108. unsigned long alloc_size = num_agp_pages*sizeof(struct page *);
  109. new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
  110. if (new == NULL)
  111. return NULL;
  112. new->key = agp_get_key();
  113. if (new->key < 0) {
  114. kfree(new);
  115. return NULL;
  116. }
  117. agp_alloc_page_array(alloc_size, new);
  118. if (new->pages == NULL) {
  119. agp_free_key(new->key);
  120. kfree(new);
  121. return NULL;
  122. }
  123. new->num_scratch_pages = 0;
  124. return new;
  125. }
  126. struct agp_memory *agp_create_memory(int scratch_pages)
  127. {
  128. struct agp_memory *new;
  129. new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
  130. if (new == NULL)
  131. return NULL;
  132. new->key = agp_get_key();
  133. if (new->key < 0) {
  134. kfree(new);
  135. return NULL;
  136. }
  137. agp_alloc_page_array(PAGE_SIZE * scratch_pages, new);
  138. if (new->pages == NULL) {
  139. agp_free_key(new->key);
  140. kfree(new);
  141. return NULL;
  142. }
  143. new->num_scratch_pages = scratch_pages;
  144. new->type = AGP_NORMAL_MEMORY;
  145. return new;
  146. }
  147. EXPORT_SYMBOL(agp_create_memory);
  148. /**
  149. * agp_free_memory - free memory associated with an agp_memory pointer.
  150. *
  151. * @curr: agp_memory pointer to be freed.
  152. *
  153. * It is the only function that can be called when the backend is not owned
  154. * by the caller. (So it can free memory on client death.)
  155. */
  156. void agp_free_memory(struct agp_memory *curr)
  157. {
  158. size_t i;
  159. if (curr == NULL)
  160. return;
  161. if (curr->is_bound)
  162. agp_unbind_memory(curr);
  163. if (curr->type >= AGP_USER_TYPES) {
  164. agp_generic_free_by_type(curr);
  165. return;
  166. }
  167. if (curr->type != 0) {
  168. curr->bridge->driver->free_by_type(curr);
  169. return;
  170. }
  171. if (curr->page_count != 0) {
  172. if (curr->bridge->driver->agp_destroy_pages) {
  173. curr->bridge->driver->agp_destroy_pages(curr);
  174. } else {
  175. for (i = 0; i < curr->page_count; i++) {
  176. curr->bridge->driver->agp_destroy_page(
  177. curr->pages[i],
  178. AGP_PAGE_DESTROY_UNMAP);
  179. }
  180. for (i = 0; i < curr->page_count; i++) {
  181. curr->bridge->driver->agp_destroy_page(
  182. curr->pages[i],
  183. AGP_PAGE_DESTROY_FREE);
  184. }
  185. }
  186. }
  187. agp_free_key(curr->key);
  188. agp_free_page_array(curr);
  189. kfree(curr);
  190. }
  191. EXPORT_SYMBOL(agp_free_memory);
  192. #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
  193. /**
  194. * agp_allocate_memory - allocate a group of pages of a certain type.
  195. *
  196. * @page_count: size_t argument of the number of pages
  197. * @type: u32 argument of the type of memory to be allocated.
  198. *
  199. * Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which
  200. * maps to physical ram. Any other type is device dependent.
  201. *
  202. * It returns NULL whenever memory is unavailable.
  203. */
  204. struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
  205. size_t page_count, u32 type)
  206. {
  207. int scratch_pages;
  208. struct agp_memory *new;
  209. size_t i;
  210. if (!bridge)
  211. return NULL;
  212. if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp)
  213. return NULL;
  214. if (type >= AGP_USER_TYPES) {
  215. new = agp_generic_alloc_user(page_count, type);
  216. if (new)
  217. new->bridge = bridge;
  218. return new;
  219. }
  220. if (type != 0) {
  221. new = bridge->driver->alloc_by_type(page_count, type);
  222. if (new)
  223. new->bridge = bridge;
  224. return new;
  225. }
  226. scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
  227. new = agp_create_memory(scratch_pages);
  228. if (new == NULL)
  229. return NULL;
  230. if (bridge->driver->agp_alloc_pages) {
  231. if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) {
  232. agp_free_memory(new);
  233. return NULL;
  234. }
  235. new->bridge = bridge;
  236. return new;
  237. }
  238. for (i = 0; i < page_count; i++) {
  239. struct page *page = bridge->driver->agp_alloc_page(bridge);
  240. if (page == NULL) {
  241. agp_free_memory(new);
  242. return NULL;
  243. }
  244. new->pages[i] = page;
  245. new->page_count++;
  246. }
  247. new->bridge = bridge;
  248. return new;
  249. }
  250. EXPORT_SYMBOL(agp_allocate_memory);
  251. /* End - Generic routines for handling agp_memory structures */
  252. static int agp_return_size(void)
  253. {
  254. int current_size;
  255. void *temp;
  256. temp = agp_bridge->current_size;
  257. switch (agp_bridge->driver->size_type) {
  258. case U8_APER_SIZE:
  259. current_size = A_SIZE_8(temp)->size;
  260. break;
  261. case U16_APER_SIZE:
  262. current_size = A_SIZE_16(temp)->size;
  263. break;
  264. case U32_APER_SIZE:
  265. current_size = A_SIZE_32(temp)->size;
  266. break;
  267. case LVL2_APER_SIZE:
  268. current_size = A_SIZE_LVL2(temp)->size;
  269. break;
  270. case FIXED_APER_SIZE:
  271. current_size = A_SIZE_FIX(temp)->size;
  272. break;
  273. default:
  274. current_size = 0;
  275. break;
  276. }
  277. current_size -= (agp_memory_reserved / (1024*1024));
  278. if (current_size <0)
  279. current_size = 0;
  280. return current_size;
  281. }
  282. int agp_num_entries(void)
  283. {
  284. int num_entries;
  285. void *temp;
  286. temp = agp_bridge->current_size;
  287. switch (agp_bridge->driver->size_type) {
  288. case U8_APER_SIZE:
  289. num_entries = A_SIZE_8(temp)->num_entries;
  290. break;
  291. case U16_APER_SIZE:
  292. num_entries = A_SIZE_16(temp)->num_entries;
  293. break;
  294. case U32_APER_SIZE:
  295. num_entries = A_SIZE_32(temp)->num_entries;
  296. break;
  297. case LVL2_APER_SIZE:
  298. num_entries = A_SIZE_LVL2(temp)->num_entries;
  299. break;
  300. case FIXED_APER_SIZE:
  301. num_entries = A_SIZE_FIX(temp)->num_entries;
  302. break;
  303. default:
  304. num_entries = 0;
  305. break;
  306. }
  307. num_entries -= agp_memory_reserved>>PAGE_SHIFT;
  308. if (num_entries<0)
  309. num_entries = 0;
  310. return num_entries;
  311. }
  312. EXPORT_SYMBOL_GPL(agp_num_entries);
  313. /**
  314. * agp_copy_info - copy bridge state information
  315. *
  316. * @info: agp_kern_info pointer. The caller should insure that this pointer is valid.
  317. *
  318. * This function copies information about the agp bridge device and the state of
  319. * the agp backend into an agp_kern_info pointer.
  320. */
  321. int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info)
  322. {
  323. memset(info, 0, sizeof(struct agp_kern_info));
  324. if (!bridge) {
  325. info->chipset = NOT_SUPPORTED;
  326. return -EIO;
  327. }
  328. info->version.major = bridge->version->major;
  329. info->version.minor = bridge->version->minor;
  330. info->chipset = SUPPORTED;
  331. info->device = bridge->dev;
  332. if (bridge->mode & AGPSTAT_MODE_3_0)
  333. info->mode = bridge->mode & ~AGP3_RESERVED_MASK;
  334. else
  335. info->mode = bridge->mode & ~AGP2_RESERVED_MASK;
  336. info->aper_base = bridge->gart_bus_addr;
  337. info->aper_size = agp_return_size();
  338. info->max_memory = bridge->max_memory_agp;
  339. info->current_memory = atomic_read(&bridge->current_memory_agp);
  340. info->cant_use_aperture = bridge->driver->cant_use_aperture;
  341. info->vm_ops = bridge->vm_ops;
  342. info->page_mask = ~0UL;
  343. return 0;
  344. }
  345. EXPORT_SYMBOL(agp_copy_info);
  346. /* End - Routine to copy over information structure */
  347. /*
  348. * Routines for handling swapping of agp_memory into the GATT -
  349. * These routines take agp_memory and insert them into the GATT.
  350. * They call device specific routines to actually write to the GATT.
  351. */
  352. /**
  353. * agp_bind_memory - Bind an agp_memory structure into the GATT.
  354. *
  355. * @curr: agp_memory pointer
  356. * @pg_start: an offset into the graphics aperture translation table
  357. *
  358. * It returns -EINVAL if the pointer == NULL.
  359. * It returns -EBUSY if the area of the table requested is already in use.
  360. */
  361. int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
  362. {
  363. int ret_val;
  364. if (curr == NULL)
  365. return -EINVAL;
  366. if (curr->is_bound) {
  367. printk(KERN_INFO PFX "memory %p is already bound!\n", curr);
  368. return -EINVAL;
  369. }
  370. if (!curr->is_flushed) {
  371. curr->bridge->driver->cache_flush();
  372. curr->is_flushed = true;
  373. }
  374. ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
  375. if (ret_val != 0)
  376. return ret_val;
  377. curr->is_bound = true;
  378. curr->pg_start = pg_start;
  379. spin_lock(&agp_bridge->mapped_lock);
  380. list_add(&curr->mapped_list, &agp_bridge->mapped_list);
  381. spin_unlock(&agp_bridge->mapped_lock);
  382. return 0;
  383. }
  384. EXPORT_SYMBOL(agp_bind_memory);
  385. /**
  386. * agp_unbind_memory - Removes an agp_memory structure from the GATT
  387. *
  388. * @curr: agp_memory pointer to be removed from the GATT.
  389. *
  390. * It returns -EINVAL if this piece of agp_memory is not currently bound to
  391. * the graphics aperture translation table or if the agp_memory pointer == NULL
  392. */
  393. int agp_unbind_memory(struct agp_memory *curr)
  394. {
  395. int ret_val;
  396. if (curr == NULL)
  397. return -EINVAL;
  398. if (!curr->is_bound) {
  399. printk(KERN_INFO PFX "memory %p was not bound!\n", curr);
  400. return -EINVAL;
  401. }
  402. ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type);
  403. if (ret_val != 0)
  404. return ret_val;
  405. curr->is_bound = false;
  406. curr->pg_start = 0;
  407. spin_lock(&curr->bridge->mapped_lock);
  408. list_del(&curr->mapped_list);
  409. spin_unlock(&curr->bridge->mapped_lock);
  410. return 0;
  411. }
  412. EXPORT_SYMBOL(agp_unbind_memory);
  413. /**
  414. * agp_rebind_emmory - Rewrite the entire GATT, useful on resume
  415. */
  416. int agp_rebind_memory(void)
  417. {
  418. struct agp_memory *curr;
  419. int ret_val = 0;
  420. spin_lock(&agp_bridge->mapped_lock);
  421. list_for_each_entry(curr, &agp_bridge->mapped_list, mapped_list) {
  422. ret_val = curr->bridge->driver->insert_memory(curr,
  423. curr->pg_start,
  424. curr->type);
  425. if (ret_val != 0)
  426. break;
  427. }
  428. spin_unlock(&agp_bridge->mapped_lock);
  429. return ret_val;
  430. }
  431. EXPORT_SYMBOL(agp_rebind_memory);
  432. /* End - Routines for handling swapping of agp_memory into the GATT */
  433. /* Generic Agp routines - Start */
  434. static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
  435. {
  436. u32 tmp;
  437. if (*requested_mode & AGP2_RESERVED_MASK) {
  438. printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
  439. *requested_mode & AGP2_RESERVED_MASK, *requested_mode);
  440. *requested_mode &= ~AGP2_RESERVED_MASK;
  441. }
  442. /*
  443. * Some dumb bridges are programmed to disobey the AGP2 spec.
  444. * This is likely a BIOS misprogramming rather than poweron default, or
  445. * it would be a lot more common.
  446. * https://bugs.freedesktop.org/show_bug.cgi?id=8816
  447. * AGPv2 spec 6.1.9 states:
  448. * The RATE field indicates the data transfer rates supported by this
  449. * device. A.G.P. devices must report all that apply.
  450. * Fix them up as best we can.
  451. */
  452. switch (*bridge_agpstat & 7) {
  453. case 4:
  454. *bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X);
  455. printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate"
  456. "Fixing up support for x2 & x1\n");
  457. break;
  458. case 2:
  459. *bridge_agpstat |= AGPSTAT2_1X;
  460. printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate"
  461. "Fixing up support for x1\n");
  462. break;
  463. default:
  464. break;
  465. }
  466. /* Check the speed bits make sense. Only one should be set. */
  467. tmp = *requested_mode & 7;
  468. switch (tmp) {
  469. case 0:
  470. printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to x1 mode.\n", current->comm);
  471. *requested_mode |= AGPSTAT2_1X;
  472. break;
  473. case 1:
  474. case 2:
  475. break;
  476. case 3:
  477. *requested_mode &= ~(AGPSTAT2_1X); /* rate=2 */
  478. break;
  479. case 4:
  480. break;
  481. case 5:
  482. case 6:
  483. case 7:
  484. *requested_mode &= ~(AGPSTAT2_1X|AGPSTAT2_2X); /* rate=4*/
  485. break;
  486. }
  487. /* disable SBA if it's not supported */
  488. if (!((*bridge_agpstat & AGPSTAT_SBA) && (*vga_agpstat & AGPSTAT_SBA) && (*requested_mode & AGPSTAT_SBA)))
  489. *bridge_agpstat &= ~AGPSTAT_SBA;
  490. /* Set rate */
  491. if (!((*bridge_agpstat & AGPSTAT2_4X) && (*vga_agpstat & AGPSTAT2_4X) && (*requested_mode & AGPSTAT2_4X)))
  492. *bridge_agpstat &= ~AGPSTAT2_4X;
  493. if (!((*bridge_agpstat & AGPSTAT2_2X) && (*vga_agpstat & AGPSTAT2_2X) && (*requested_mode & AGPSTAT2_2X)))
  494. *bridge_agpstat &= ~AGPSTAT2_2X;
  495. if (!((*bridge_agpstat & AGPSTAT2_1X) && (*vga_agpstat & AGPSTAT2_1X) && (*requested_mode & AGPSTAT2_1X)))
  496. *bridge_agpstat &= ~AGPSTAT2_1X;
  497. /* Now we know what mode it should be, clear out the unwanted bits. */
  498. if (*bridge_agpstat & AGPSTAT2_4X)
  499. *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_2X); /* 4X */
  500. if (*bridge_agpstat & AGPSTAT2_2X)
  501. *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_4X); /* 2X */
  502. if (*bridge_agpstat & AGPSTAT2_1X)
  503. *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); /* 1X */
  504. /* Apply any errata. */
  505. if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
  506. *bridge_agpstat &= ~AGPSTAT_FW;
  507. if (agp_bridge->flags & AGP_ERRATA_SBA)
  508. *bridge_agpstat &= ~AGPSTAT_SBA;
  509. if (agp_bridge->flags & AGP_ERRATA_1X) {
  510. *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
  511. *bridge_agpstat |= AGPSTAT2_1X;
  512. }
  513. /* If we've dropped down to 1X, disable fast writes. */
  514. if (*bridge_agpstat & AGPSTAT2_1X)
  515. *bridge_agpstat &= ~AGPSTAT_FW;
  516. }
  517. /*
  518. * requested_mode = Mode requested by (typically) X.
  519. * bridge_agpstat = PCI_AGP_STATUS from agp bridge.
  520. * vga_agpstat = PCI_AGP_STATUS from graphic card.
  521. */
  522. static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
  523. {
  524. u32 origbridge=*bridge_agpstat, origvga=*vga_agpstat;
  525. u32 tmp;
  526. if (*requested_mode & AGP3_RESERVED_MASK) {
  527. printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
  528. *requested_mode & AGP3_RESERVED_MASK, *requested_mode);
  529. *requested_mode &= ~AGP3_RESERVED_MASK;
  530. }
  531. /* Check the speed bits make sense. */
  532. tmp = *requested_mode & 7;
  533. if (tmp == 0) {
  534. printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current->comm);
  535. *requested_mode |= AGPSTAT3_4X;
  536. }
  537. if (tmp >= 3) {
  538. printk(KERN_INFO PFX "%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current->comm, tmp * 4);
  539. *requested_mode = (*requested_mode & ~7) | AGPSTAT3_8X;
  540. }
  541. /* ARQSZ - Set the value to the maximum one.
  542. * Don't allow the mode register to override values. */
  543. *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_ARQSZ) |
  544. max_t(u32,(*bridge_agpstat & AGPSTAT_ARQSZ),(*vga_agpstat & AGPSTAT_ARQSZ)));
  545. /* Calibration cycle.
  546. * Don't allow the mode register to override values. */
  547. *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_CAL_MASK) |
  548. min_t(u32,(*bridge_agpstat & AGPSTAT_CAL_MASK),(*vga_agpstat & AGPSTAT_CAL_MASK)));
  549. /* SBA *must* be supported for AGP v3 */
  550. *bridge_agpstat |= AGPSTAT_SBA;
  551. /*
  552. * Set speed.
  553. * Check for invalid speeds. This can happen when applications
  554. * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware
  555. */
  556. if (*requested_mode & AGPSTAT_MODE_3_0) {
  557. /*
  558. * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode,
  559. * have been passed a 3.0 mode, but with 2.x speed bits set.
  560. * AGP2.x 4x -> AGP3.0 4x.
  561. */
  562. if (*requested_mode & AGPSTAT2_4X) {
  563. printk(KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n",
  564. current->comm, *requested_mode);
  565. *requested_mode &= ~AGPSTAT2_4X;
  566. *requested_mode |= AGPSTAT3_4X;
  567. }
  568. } else {
  569. /*
  570. * The caller doesn't know what they are doing. We are in 3.0 mode,
  571. * but have been passed an AGP 2.x mode.
  572. * Convert AGP 1x,2x,4x -> AGP 3.0 4x.
  573. */
  574. printk(KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n",
  575. current->comm, *requested_mode);
  576. *requested_mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X);
  577. *requested_mode |= AGPSTAT3_4X;
  578. }
  579. if (*requested_mode & AGPSTAT3_8X) {
  580. if (!(*bridge_agpstat & AGPSTAT3_8X)) {
  581. *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
  582. *bridge_agpstat |= AGPSTAT3_4X;
  583. printk(KERN_INFO PFX "%s requested AGPx8 but bridge not capable.\n", current->comm);
  584. return;
  585. }
  586. if (!(*vga_agpstat & AGPSTAT3_8X)) {
  587. *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
  588. *bridge_agpstat |= AGPSTAT3_4X;
  589. printk(KERN_INFO PFX "%s requested AGPx8 but graphic card not capable.\n", current->comm);
  590. return;
  591. }
  592. /* All set, bridge & device can do AGP x8*/
  593. *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
  594. goto done;
  595. } else if (*requested_mode & AGPSTAT3_4X) {
  596. *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
  597. *bridge_agpstat |= AGPSTAT3_4X;
  598. goto done;
  599. } else {
  600. /*
  601. * If we didn't specify an AGP mode, we see if both
  602. * the graphics card, and the bridge can do x8, and use if so.
  603. * If not, we fall back to x4 mode.
  604. */
  605. if ((*bridge_agpstat & AGPSTAT3_8X) && (*vga_agpstat & AGPSTAT3_8X)) {
  606. printk(KERN_INFO PFX "No AGP mode specified. Setting to highest mode "
  607. "supported by bridge & card (x8).\n");
  608. *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
  609. *vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
  610. } else {
  611. printk(KERN_INFO PFX "Fell back to AGPx4 mode because");
  612. if (!(*bridge_agpstat & AGPSTAT3_8X)) {
  613. printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n",
  614. *bridge_agpstat, origbridge);
  615. *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
  616. *bridge_agpstat |= AGPSTAT3_4X;
  617. }
  618. if (!(*vga_agpstat & AGPSTAT3_8X)) {
  619. printk(KERN_INFO PFX "graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n",
  620. *vga_agpstat, origvga);
  621. *vga_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
  622. *vga_agpstat |= AGPSTAT3_4X;
  623. }
  624. }
  625. }
  626. done:
  627. /* Apply any errata. */
  628. if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
  629. *bridge_agpstat &= ~AGPSTAT_FW;
  630. if (agp_bridge->flags & AGP_ERRATA_SBA)
  631. *bridge_agpstat &= ~AGPSTAT_SBA;
  632. if (agp_bridge->flags & AGP_ERRATA_1X) {
  633. *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
  634. *bridge_agpstat |= AGPSTAT2_1X;
  635. }
  636. }
  637. /**
  638. * agp_collect_device_status - determine correct agp_cmd from various agp_stat's
  639. * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
  640. * @requested_mode: requested agp_stat from userspace (Typically from X)
  641. * @bridge_agpstat: current agp_stat from AGP bridge.
  642. *
  643. * This function will hunt for an AGP graphics card, and try to match
  644. * the requested mode to the capabilities of both the bridge and the card.
  645. */
  646. u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode, u32 bridge_agpstat)
  647. {
  648. struct pci_dev *device = NULL;
  649. u32 vga_agpstat;
  650. u8 cap_ptr;
  651. for (;;) {
  652. device = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, device);
  653. if (!device) {
  654. printk(KERN_INFO PFX "Couldn't find an AGP VGA controller.\n");
  655. return 0;
  656. }
  657. cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
  658. if (cap_ptr)
  659. break;
  660. }
  661. /*
  662. * Ok, here we have a AGP device. Disable impossible
  663. * settings, and adjust the readqueue to the minimum.
  664. */
  665. pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &vga_agpstat);
  666. /* adjust RQ depth */
  667. bridge_agpstat = ((bridge_agpstat & ~AGPSTAT_RQ_DEPTH) |
  668. min_t(u32, (requested_mode & AGPSTAT_RQ_DEPTH),
  669. min_t(u32, (bridge_agpstat & AGPSTAT_RQ_DEPTH), (vga_agpstat & AGPSTAT_RQ_DEPTH))));
  670. /* disable FW if it's not supported */
  671. if (!((bridge_agpstat & AGPSTAT_FW) &&
  672. (vga_agpstat & AGPSTAT_FW) &&
  673. (requested_mode & AGPSTAT_FW)))
  674. bridge_agpstat &= ~AGPSTAT_FW;
  675. /* Check to see if we are operating in 3.0 mode */
  676. if (agp_bridge->mode & AGPSTAT_MODE_3_0)
  677. agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
  678. else
  679. agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
  680. pci_dev_put(device);
  681. return bridge_agpstat;
  682. }
  683. EXPORT_SYMBOL(agp_collect_device_status);
  684. void agp_device_command(u32 bridge_agpstat, bool agp_v3)
  685. {
  686. struct pci_dev *device = NULL;
  687. int mode;
  688. mode = bridge_agpstat & 0x7;
  689. if (agp_v3)
  690. mode *= 4;
  691. for_each_pci_dev(device) {
  692. u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
  693. if (!agp)
  694. continue;
  695. dev_info(&device->dev, "putting AGP V%d device into %dx mode\n",
  696. agp_v3 ? 3 : 2, mode);
  697. pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat);
  698. }
  699. }
  700. EXPORT_SYMBOL(agp_device_command);
  701. void get_agp_version(struct agp_bridge_data *bridge)
  702. {
  703. u32 ncapid;
  704. /* Exit early if already set by errata workarounds. */
  705. if (bridge->major_version != 0)
  706. return;
  707. pci_read_config_dword(bridge->dev, bridge->capndx, &ncapid);
  708. bridge->major_version = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
  709. bridge->minor_version = (ncapid >> AGP_MINOR_VERSION_SHIFT) & 0xf;
  710. }
  711. EXPORT_SYMBOL(get_agp_version);
  712. void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
  713. {
  714. u32 bridge_agpstat, temp;
  715. get_agp_version(agp_bridge);
  716. dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n",
  717. agp_bridge->major_version, agp_bridge->minor_version);
  718. pci_read_config_dword(agp_bridge->dev,
  719. agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat);
  720. bridge_agpstat = agp_collect_device_status(agp_bridge, requested_mode, bridge_agpstat);
  721. if (bridge_agpstat == 0)
  722. /* Something bad happened. FIXME: Return error code? */
  723. return;
  724. bridge_agpstat |= AGPSTAT_AGP_ENABLE;
  725. /* Do AGP version specific frobbing. */
  726. if (bridge->major_version >= 3) {
  727. if (bridge->mode & AGPSTAT_MODE_3_0) {
  728. /* If we have 3.5, we can do the isoch stuff. */
  729. if (bridge->minor_version >= 5)
  730. agp_3_5_enable(bridge);
  731. agp_device_command(bridge_agpstat, true);
  732. return;
  733. } else {
  734. /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/
  735. bridge_agpstat &= ~(7<<10) ;
  736. pci_read_config_dword(bridge->dev,
  737. bridge->capndx+AGPCTRL, &temp);
  738. temp |= (1<<9);
  739. pci_write_config_dword(bridge->dev,
  740. bridge->capndx+AGPCTRL, temp);
  741. dev_info(&bridge->dev->dev, "bridge is in legacy mode, falling back to 2.x\n");
  742. }
  743. }
  744. /* AGP v<3 */
  745. agp_device_command(bridge_agpstat, false);
  746. }
  747. EXPORT_SYMBOL(agp_generic_enable);
  748. int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
  749. {
  750. char *table;
  751. char *table_end;
  752. int size;
  753. int page_order;
  754. int num_entries;
  755. int i;
  756. void *temp;
  757. struct page *page;
  758. /* The generic routines can't handle 2 level gatt's */
  759. if (bridge->driver->size_type == LVL2_APER_SIZE)
  760. return -EINVAL;
  761. table = NULL;
  762. i = bridge->aperture_size_idx;
  763. temp = bridge->current_size;
  764. size = page_order = num_entries = 0;
  765. if (bridge->driver->size_type != FIXED_APER_SIZE) {
  766. do {
  767. switch (bridge->driver->size_type) {
  768. case U8_APER_SIZE:
  769. size = A_SIZE_8(temp)->size;
  770. page_order =
  771. A_SIZE_8(temp)->page_order;
  772. num_entries =
  773. A_SIZE_8(temp)->num_entries;
  774. break;
  775. case U16_APER_SIZE:
  776. size = A_SIZE_16(temp)->size;
  777. page_order = A_SIZE_16(temp)->page_order;
  778. num_entries = A_SIZE_16(temp)->num_entries;
  779. break;
  780. case U32_APER_SIZE:
  781. size = A_SIZE_32(temp)->size;
  782. page_order = A_SIZE_32(temp)->page_order;
  783. num_entries = A_SIZE_32(temp)->num_entries;
  784. break;
  785. /* This case will never really happen. */
  786. case FIXED_APER_SIZE:
  787. case LVL2_APER_SIZE:
  788. default:
  789. size = page_order = num_entries = 0;
  790. break;
  791. }
  792. table = alloc_gatt_pages(page_order);
  793. if (table == NULL) {
  794. i++;
  795. switch (bridge->driver->size_type) {
  796. case U8_APER_SIZE:
  797. bridge->current_size = A_IDX8(bridge);
  798. break;
  799. case U16_APER_SIZE:
  800. bridge->current_size = A_IDX16(bridge);
  801. break;
  802. case U32_APER_SIZE:
  803. bridge->current_size = A_IDX32(bridge);
  804. break;
  805. /* These cases will never really happen. */
  806. case FIXED_APER_SIZE:
  807. case LVL2_APER_SIZE:
  808. default:
  809. break;
  810. }
  811. temp = bridge->current_size;
  812. } else {
  813. bridge->aperture_size_idx = i;
  814. }
  815. } while (!table && (i < bridge->driver->num_aperture_sizes));
  816. } else {
  817. size = ((struct aper_size_info_fixed *) temp)->size;
  818. page_order = ((struct aper_size_info_fixed *) temp)->page_order;
  819. num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
  820. table = alloc_gatt_pages(page_order);
  821. }
  822. if (table == NULL)
  823. return -ENOMEM;
  824. table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
  825. for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
  826. SetPageReserved(page);
  827. bridge->gatt_table_real = (u32 *) table;
  828. agp_gatt_table = (void *)table;
  829. bridge->driver->cache_flush();
  830. #ifdef CONFIG_X86
  831. if (set_memory_uc((unsigned long)table, 1 << page_order))
  832. printk(KERN_WARNING "Could not set GATT table memory to UC!");
  833. bridge->gatt_table = (void *)table;
  834. #else
  835. bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
  836. (PAGE_SIZE * (1 << page_order)));
  837. bridge->driver->cache_flush();
  838. #endif
  839. if (bridge->gatt_table == NULL) {
  840. for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
  841. ClearPageReserved(page);
  842. free_gatt_pages(table, page_order);
  843. return -ENOMEM;
  844. }
  845. bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real);
  846. /* AK: bogus, should encode addresses > 4GB */
  847. for (i = 0; i < num_entries; i++) {
  848. writel(bridge->scratch_page, bridge->gatt_table+i);
  849. readl(bridge->gatt_table+i); /* PCI Posting. */
  850. }
  851. return 0;
  852. }
  853. EXPORT_SYMBOL(agp_generic_create_gatt_table);
  854. int agp_generic_free_gatt_table(struct agp_bridge_data *bridge)
  855. {
  856. int page_order;
  857. char *table, *table_end;
  858. void *temp;
  859. struct page *page;
  860. temp = bridge->current_size;
  861. switch (bridge->driver->size_type) {
  862. case U8_APER_SIZE:
  863. page_order = A_SIZE_8(temp)->page_order;
  864. break;
  865. case U16_APER_SIZE:
  866. page_order = A_SIZE_16(temp)->page_order;
  867. break;
  868. case U32_APER_SIZE:
  869. page_order = A_SIZE_32(temp)->page_order;
  870. break;
  871. case FIXED_APER_SIZE:
  872. page_order = A_SIZE_FIX(temp)->page_order;
  873. break;
  874. case LVL2_APER_SIZE:
  875. /* The generic routines can't deal with 2 level gatt's */
  876. return -EINVAL;
  877. break;
  878. default:
  879. page_order = 0;
  880. break;
  881. }
  882. /* Do not worry about freeing memory, because if this is
  883. * called, then all agp memory is deallocated and removed
  884. * from the table. */
  885. #ifdef CONFIG_X86
  886. set_memory_wb((unsigned long)bridge->gatt_table, 1 << page_order);
  887. #else
  888. iounmap(bridge->gatt_table);
  889. #endif
  890. table = (char *) bridge->gatt_table_real;
  891. table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
  892. for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
  893. ClearPageReserved(page);
  894. free_gatt_pages(bridge->gatt_table_real, page_order);
  895. agp_gatt_table = NULL;
  896. bridge->gatt_table = NULL;
  897. bridge->gatt_table_real = NULL;
  898. bridge->gatt_bus_addr = 0;
  899. return 0;
  900. }
  901. EXPORT_SYMBOL(agp_generic_free_gatt_table);
  902. int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
  903. {
  904. int num_entries;
  905. size_t i;
  906. off_t j;
  907. void *temp;
  908. struct agp_bridge_data *bridge;
  909. int mask_type;
  910. bridge = mem->bridge;
  911. if (!bridge)
  912. return -EINVAL;
  913. if (mem->page_count == 0)
  914. return 0;
  915. temp = bridge->current_size;
  916. switch (bridge->driver->size_type) {
  917. case U8_APER_SIZE:
  918. num_entries = A_SIZE_8(temp)->num_entries;
  919. break;
  920. case U16_APER_SIZE:
  921. num_entries = A_SIZE_16(temp)->num_entries;
  922. break;
  923. case U32_APER_SIZE:
  924. num_entries = A_SIZE_32(temp)->num_entries;
  925. break;
  926. case FIXED_APER_SIZE:
  927. num_entries = A_SIZE_FIX(temp)->num_entries;
  928. break;
  929. case LVL2_APER_SIZE:
  930. /* The generic routines can't deal with 2 level gatt's */
  931. return -EINVAL;
  932. break;
  933. default:
  934. num_entries = 0;
  935. break;
  936. }
  937. num_entries -= agp_memory_reserved/PAGE_SIZE;
  938. if (num_entries < 0) num_entries = 0;
  939. if (type != mem->type)
  940. return -EINVAL;
  941. mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
  942. if (mask_type != 0) {
  943. /* The generic routines know nothing of memory types */
  944. return -EINVAL;
  945. }
  946. /* AK: could wrap */
  947. if ((pg_start + mem->page_count) > num_entries)
  948. return -EINVAL;
  949. j = pg_start;
  950. while (j < (pg_start + mem->page_count)) {
  951. if (!PGE_EMPTY(bridge, readl(bridge->gatt_table+j)))
  952. return -EBUSY;
  953. j++;
  954. }
  955. if (!mem->is_flushed) {
  956. bridge->driver->cache_flush();
  957. mem->is_flushed = true;
  958. }
  959. for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
  960. writel(bridge->driver->mask_memory(bridge,
  961. page_to_phys(mem->pages[i]),
  962. mask_type),
  963. bridge->gatt_table+j);
  964. }
  965. readl(bridge->gatt_table+j-1); /* PCI Posting. */
  966. bridge->driver->tlb_flush(mem);
  967. return 0;
  968. }
  969. EXPORT_SYMBOL(agp_generic_insert_memory);
  970. int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
  971. {
  972. size_t i;
  973. struct agp_bridge_data *bridge;
  974. int mask_type;
  975. bridge = mem->bridge;
  976. if (!bridge)
  977. return -EINVAL;
  978. if (mem->page_count == 0)
  979. return 0;
  980. if (type != mem->type)
  981. return -EINVAL;
  982. mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
  983. if (mask_type != 0) {
  984. /* The generic routines know nothing of memory types */
  985. return -EINVAL;
  986. }
  987. /* AK: bogus, should encode addresses > 4GB */
  988. for (i = pg_start; i < (mem->page_count + pg_start); i++) {
  989. writel(bridge->scratch_page, bridge->gatt_table+i);
  990. }
  991. readl(bridge->gatt_table+i-1); /* PCI Posting. */
  992. bridge->driver->tlb_flush(mem);
  993. return 0;
  994. }
  995. EXPORT_SYMBOL(agp_generic_remove_memory);
  996. struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
  997. {
  998. return NULL;
  999. }
  1000. EXPORT_SYMBOL(agp_generic_alloc_by_type);
  1001. void agp_generic_free_by_type(struct agp_memory *curr)
  1002. {
  1003. agp_free_page_array(curr);
  1004. agp_free_key(curr->key);
  1005. kfree(curr);
  1006. }
  1007. EXPORT_SYMBOL(agp_generic_free_by_type);
  1008. struct agp_memory *agp_generic_alloc_user(size_t page_count, int type)
  1009. {
  1010. struct agp_memory *new;
  1011. int i;
  1012. int pages;
  1013. pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
  1014. new = agp_create_user_memory(page_count);
  1015. if (new == NULL)
  1016. return NULL;
  1017. for (i = 0; i < page_count; i++)
  1018. new->pages[i] = NULL;
  1019. new->page_count = 0;
  1020. new->type = type;
  1021. new->num_scratch_pages = pages;
  1022. return new;
  1023. }
  1024. EXPORT_SYMBOL(agp_generic_alloc_user);
  1025. /*
  1026. * Basic Page Allocation Routines -
  1027. * These routines handle page allocation and by default they reserve the allocated
  1028. * memory. They also handle incrementing the current_memory_agp value, Which is checked
  1029. * against a maximum value.
  1030. */
  1031. int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *mem, size_t num_pages)
  1032. {
  1033. struct page * page;
  1034. int i, ret = -ENOMEM;
  1035. for (i = 0; i < num_pages; i++) {
  1036. page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
  1037. /* agp_free_memory() needs gart address */
  1038. if (page == NULL)
  1039. goto out;
  1040. #ifndef CONFIG_X86
  1041. map_page_into_agp(page);
  1042. #endif
  1043. get_page(page);
  1044. atomic_inc(&agp_bridge->current_memory_agp);
  1045. mem->pages[i] = page;
  1046. mem->page_count++;
  1047. }
  1048. #ifdef CONFIG_X86
  1049. set_pages_array_uc(mem->pages, num_pages);
  1050. #endif
  1051. ret = 0;
  1052. out:
  1053. return ret;
  1054. }
  1055. EXPORT_SYMBOL(agp_generic_alloc_pages);
  1056. struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge)
  1057. {
  1058. struct page * page;
  1059. page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
  1060. if (page == NULL)
  1061. return NULL;
  1062. map_page_into_agp(page);
  1063. get_page(page);
  1064. atomic_inc(&agp_bridge->current_memory_agp);
  1065. return page;
  1066. }
  1067. EXPORT_SYMBOL(agp_generic_alloc_page);
  1068. void agp_generic_destroy_pages(struct agp_memory *mem)
  1069. {
  1070. int i;
  1071. struct page *page;
  1072. if (!mem)
  1073. return;
  1074. #ifdef CONFIG_X86
  1075. set_pages_array_wb(mem->pages, mem->page_count);
  1076. #endif
  1077. for (i = 0; i < mem->page_count; i++) {
  1078. page = mem->pages[i];
  1079. #ifndef CONFIG_X86
  1080. unmap_page_from_agp(page);
  1081. #endif
  1082. put_page(page);
  1083. __free_page(page);
  1084. atomic_dec(&agp_bridge->current_memory_agp);
  1085. mem->pages[i] = NULL;
  1086. }
  1087. }
  1088. EXPORT_SYMBOL(agp_generic_destroy_pages);
  1089. void agp_generic_destroy_page(struct page *page, int flags)
  1090. {
  1091. if (page == NULL)
  1092. return;
  1093. if (flags & AGP_PAGE_DESTROY_UNMAP)
  1094. unmap_page_from_agp(page);
  1095. if (flags & AGP_PAGE_DESTROY_FREE) {
  1096. put_page(page);
  1097. __free_page(page);
  1098. atomic_dec(&agp_bridge->current_memory_agp);
  1099. }
  1100. }
  1101. EXPORT_SYMBOL(agp_generic_destroy_page);
  1102. /* End Basic Page Allocation Routines */
  1103. /**
  1104. * agp_enable - initialise the agp point-to-point connection.
  1105. *
  1106. * @mode: agp mode register value to configure with.
  1107. */
  1108. void agp_enable(struct agp_bridge_data *bridge, u32 mode)
  1109. {
  1110. if (!bridge)
  1111. return;
  1112. bridge->driver->agp_enable(bridge, mode);
  1113. }
  1114. EXPORT_SYMBOL(agp_enable);
  1115. /* When we remove the global variable agp_bridge from all drivers
  1116. * then agp_alloc_bridge and agp_generic_find_bridge need to be updated
  1117. */
  1118. struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev)
  1119. {
  1120. if (list_empty(&agp_bridges))
  1121. return NULL;
  1122. return agp_bridge;
  1123. }
  1124. static void ipi_handler(void *null)
  1125. {
  1126. flush_agp_cache();
  1127. }
  1128. void global_cache_flush(void)
  1129. {
  1130. if (on_each_cpu(ipi_handler, NULL, 1) != 0)
  1131. panic(PFX "timed out waiting for the other CPUs!\n");
  1132. }
  1133. EXPORT_SYMBOL(global_cache_flush);
  1134. unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
  1135. dma_addr_t addr, int type)
  1136. {
  1137. /* memory type is ignored in the generic routine */
  1138. if (bridge->driver->masks)
  1139. return addr | bridge->driver->masks[0].mask;
  1140. else
  1141. return addr;
  1142. }
  1143. EXPORT_SYMBOL(agp_generic_mask_memory);
  1144. int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge,
  1145. int type)
  1146. {
  1147. if (type >= AGP_USER_TYPES)
  1148. return 0;
  1149. return type;
  1150. }
  1151. EXPORT_SYMBOL(agp_generic_type_to_mask_type);
  1152. /*
  1153. * These functions are implemented according to the AGPv3 spec,
  1154. * which covers implementation details that had previously been
  1155. * left open.
  1156. */
  1157. int agp3_generic_fetch_size(void)
  1158. {
  1159. u16 temp_size;
  1160. int i;
  1161. struct aper_size_info_16 *values;
  1162. pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size);
  1163. values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
  1164. for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
  1165. if (temp_size == values[i].size_value) {
  1166. agp_bridge->previous_size =
  1167. agp_bridge->current_size = (void *) (values + i);
  1168. agp_bridge->aperture_size_idx = i;
  1169. return values[i].size;
  1170. }
  1171. }
  1172. return 0;
  1173. }
  1174. EXPORT_SYMBOL(agp3_generic_fetch_size);
  1175. void agp3_generic_tlbflush(struct agp_memory *mem)
  1176. {
  1177. u32 ctrl;
  1178. pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
  1179. pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN);
  1180. pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl);
  1181. }
  1182. EXPORT_SYMBOL(agp3_generic_tlbflush);
  1183. int agp3_generic_configure(void)
  1184. {
  1185. u32 temp;
  1186. struct aper_size_info_16 *current_size;
  1187. current_size = A_SIZE_16(agp_bridge->current_size);
  1188. pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
  1189. agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
  1190. /* set aperture size */
  1191. pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value);
  1192. /* set gart pointer */
  1193. pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr);
  1194. /* enable aperture and GTLB */
  1195. pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp);
  1196. pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN);
  1197. return 0;
  1198. }
  1199. EXPORT_SYMBOL(agp3_generic_configure);
  1200. void agp3_generic_cleanup(void)
  1201. {
  1202. u32 ctrl;
  1203. pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
  1204. pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB);
  1205. }
  1206. EXPORT_SYMBOL(agp3_generic_cleanup);
  1207. const struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] =
  1208. {
  1209. {4096, 1048576, 10,0x000},
  1210. {2048, 524288, 9, 0x800},
  1211. {1024, 262144, 8, 0xc00},
  1212. { 512, 131072, 7, 0xe00},
  1213. { 256, 65536, 6, 0xf00},
  1214. { 128, 32768, 5, 0xf20},
  1215. { 64, 16384, 4, 0xf30},
  1216. { 32, 8192, 3, 0xf38},
  1217. { 16, 4096, 2, 0xf3c},
  1218. { 8, 2048, 1, 0xf3e},
  1219. { 4, 1024, 0, 0xf3f}
  1220. };
  1221. EXPORT_SYMBOL(agp3_generic_sizes);