generic.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452
  1. /*
  2. * AGPGART driver.
  3. * Copyright (C) 2004 Silicon Graphics, Inc.
  4. * Copyright (C) 2002-2005 Dave Jones.
  5. * Copyright (C) 1999 Jeff Hartmann.
  6. * Copyright (C) 1999 Precision Insight, Inc.
  7. * Copyright (C) 1999 Xi Graphics, Inc.
  8. *
  9. * Permission is hereby granted, free of charge, to any person obtaining a
  10. * copy of this software and associated documentation files (the "Software"),
  11. * to deal in the Software without restriction, including without limitation
  12. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  13. * and/or sell copies of the Software, and to permit persons to whom the
  14. * Software is furnished to do so, subject to the following conditions:
  15. *
  16. * The above copyright notice and this permission notice shall be included
  17. * in all copies or substantial portions of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20. * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  22. * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
  23. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
  25. * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. * TODO:
  28. * - Allocate more than order 0 pages to avoid too much linear map splitting.
  29. */
  30. #include <linux/module.h>
  31. #include <linux/pci.h>
  32. #include <linux/init.h>
  33. #include <linux/pagemap.h>
  34. #include <linux/miscdevice.h>
  35. #include <linux/pm.h>
  36. #include <linux/agp_backend.h>
  37. #include <linux/vmalloc.h>
  38. #include <linux/dma-mapping.h>
  39. #include <linux/mm.h>
  40. #include <linux/sched.h>
  41. #include <asm/io.h>
  42. #include <asm/cacheflush.h>
  43. #include <asm/pgtable.h>
  44. #include "agp.h"
  45. __u32 *agp_gatt_table;
  46. int agp_memory_reserved;
  47. /*
  48. * Needed by the Nforce GART driver for the time being. Would be
  49. * nice to do this some other way instead of needing this export.
  50. */
  51. EXPORT_SYMBOL_GPL(agp_memory_reserved);
  52. /*
  53. * Generic routines for handling agp_memory structures -
  54. * They use the basic page allocation routines to do the brunt of the work.
  55. */
  56. void agp_free_key(int key)
  57. {
  58. if (key < 0)
  59. return;
  60. if (key < MAXKEY)
  61. clear_bit(key, agp_bridge->key_list);
  62. }
  63. EXPORT_SYMBOL(agp_free_key);
  64. static int agp_get_key(void)
  65. {
  66. int bit;
  67. bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY);
  68. if (bit < MAXKEY) {
  69. set_bit(bit, agp_bridge->key_list);
  70. return bit;
  71. }
  72. return -1;
  73. }
  74. void agp_flush_chipset(struct agp_bridge_data *bridge)
  75. {
  76. if (bridge->driver->chipset_flush)
  77. bridge->driver->chipset_flush(bridge);
  78. }
  79. EXPORT_SYMBOL(agp_flush_chipset);
  80. /*
  81. * Use kmalloc if possible for the page list. Otherwise fall back to
  82. * vmalloc. This speeds things up and also saves memory for small AGP
  83. * regions.
  84. */
  85. void agp_alloc_page_array(size_t size, struct agp_memory *mem)
  86. {
  87. mem->pages = NULL;
  88. mem->vmalloc_flag = false;
  89. if (size <= 2*PAGE_SIZE)
  90. mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NORETRY);
  91. if (mem->pages == NULL) {
  92. mem->pages = vmalloc(size);
  93. mem->vmalloc_flag = true;
  94. }
  95. }
  96. EXPORT_SYMBOL(agp_alloc_page_array);
  97. void agp_free_page_array(struct agp_memory *mem)
  98. {
  99. if (mem->vmalloc_flag) {
  100. vfree(mem->pages);
  101. } else {
  102. kfree(mem->pages);
  103. }
  104. }
  105. EXPORT_SYMBOL(agp_free_page_array);
  106. static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages)
  107. {
  108. struct agp_memory *new;
  109. unsigned long alloc_size = num_agp_pages*sizeof(struct page *);
  110. new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
  111. if (new == NULL)
  112. return NULL;
  113. new->key = agp_get_key();
  114. if (new->key < 0) {
  115. kfree(new);
  116. return NULL;
  117. }
  118. agp_alloc_page_array(alloc_size, new);
  119. if (new->pages == NULL) {
  120. agp_free_key(new->key);
  121. kfree(new);
  122. return NULL;
  123. }
  124. new->num_scratch_pages = 0;
  125. return new;
  126. }
  127. struct agp_memory *agp_create_memory(int scratch_pages)
  128. {
  129. struct agp_memory *new;
  130. new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
  131. if (new == NULL)
  132. return NULL;
  133. new->key = agp_get_key();
  134. if (new->key < 0) {
  135. kfree(new);
  136. return NULL;
  137. }
  138. agp_alloc_page_array(PAGE_SIZE * scratch_pages, new);
  139. if (new->pages == NULL) {
  140. agp_free_key(new->key);
  141. kfree(new);
  142. return NULL;
  143. }
  144. new->num_scratch_pages = scratch_pages;
  145. new->type = AGP_NORMAL_MEMORY;
  146. return new;
  147. }
  148. EXPORT_SYMBOL(agp_create_memory);
  149. /**
  150. * agp_free_memory - free memory associated with an agp_memory pointer.
  151. *
  152. * @curr: agp_memory pointer to be freed.
  153. *
  154. * It is the only function that can be called when the backend is not owned
  155. * by the caller. (So it can free memory on client death.)
  156. */
  157. void agp_free_memory(struct agp_memory *curr)
  158. {
  159. size_t i;
  160. if (curr == NULL)
  161. return;
  162. if (curr->is_bound)
  163. agp_unbind_memory(curr);
  164. if (curr->type >= AGP_USER_TYPES) {
  165. agp_generic_free_by_type(curr);
  166. return;
  167. }
  168. if (curr->type != 0) {
  169. curr->bridge->driver->free_by_type(curr);
  170. return;
  171. }
  172. if (curr->page_count != 0) {
  173. if (curr->bridge->driver->agp_destroy_pages) {
  174. curr->bridge->driver->agp_destroy_pages(curr);
  175. } else {
  176. for (i = 0; i < curr->page_count; i++) {
  177. curr->bridge->driver->agp_destroy_page(
  178. curr->pages[i],
  179. AGP_PAGE_DESTROY_UNMAP);
  180. }
  181. for (i = 0; i < curr->page_count; i++) {
  182. curr->bridge->driver->agp_destroy_page(
  183. curr->pages[i],
  184. AGP_PAGE_DESTROY_FREE);
  185. }
  186. }
  187. }
  188. agp_free_key(curr->key);
  189. agp_free_page_array(curr);
  190. kfree(curr);
  191. }
  192. EXPORT_SYMBOL(agp_free_memory);
  193. #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
  194. /**
  195. * agp_allocate_memory - allocate a group of pages of a certain type.
  196. *
  197. * @page_count: size_t argument of the number of pages
  198. * @type: u32 argument of the type of memory to be allocated.
  199. *
  200. * Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which
  201. * maps to physical ram. Any other type is device dependent.
  202. *
  203. * It returns NULL whenever memory is unavailable.
  204. */
  205. struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
  206. size_t page_count, u32 type)
  207. {
  208. int scratch_pages;
  209. struct agp_memory *new;
  210. size_t i;
  211. if (!bridge)
  212. return NULL;
  213. if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp)
  214. return NULL;
  215. if (type >= AGP_USER_TYPES) {
  216. new = agp_generic_alloc_user(page_count, type);
  217. if (new)
  218. new->bridge = bridge;
  219. return new;
  220. }
  221. if (type != 0) {
  222. new = bridge->driver->alloc_by_type(page_count, type);
  223. if (new)
  224. new->bridge = bridge;
  225. return new;
  226. }
  227. scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
  228. new = agp_create_memory(scratch_pages);
  229. if (new == NULL)
  230. return NULL;
  231. if (bridge->driver->agp_alloc_pages) {
  232. if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) {
  233. agp_free_memory(new);
  234. return NULL;
  235. }
  236. new->bridge = bridge;
  237. return new;
  238. }
  239. for (i = 0; i < page_count; i++) {
  240. struct page *page = bridge->driver->agp_alloc_page(bridge);
  241. if (page == NULL) {
  242. agp_free_memory(new);
  243. return NULL;
  244. }
  245. new->pages[i] = page;
  246. new->page_count++;
  247. }
  248. new->bridge = bridge;
  249. return new;
  250. }
  251. EXPORT_SYMBOL(agp_allocate_memory);
  252. /* End - Generic routines for handling agp_memory structures */
  253. static int agp_return_size(void)
  254. {
  255. int current_size;
  256. void *temp;
  257. temp = agp_bridge->current_size;
  258. switch (agp_bridge->driver->size_type) {
  259. case U8_APER_SIZE:
  260. current_size = A_SIZE_8(temp)->size;
  261. break;
  262. case U16_APER_SIZE:
  263. current_size = A_SIZE_16(temp)->size;
  264. break;
  265. case U32_APER_SIZE:
  266. current_size = A_SIZE_32(temp)->size;
  267. break;
  268. case LVL2_APER_SIZE:
  269. current_size = A_SIZE_LVL2(temp)->size;
  270. break;
  271. case FIXED_APER_SIZE:
  272. current_size = A_SIZE_FIX(temp)->size;
  273. break;
  274. default:
  275. current_size = 0;
  276. break;
  277. }
  278. current_size -= (agp_memory_reserved / (1024*1024));
  279. if (current_size <0)
  280. current_size = 0;
  281. return current_size;
  282. }
  283. int agp_num_entries(void)
  284. {
  285. int num_entries;
  286. void *temp;
  287. temp = agp_bridge->current_size;
  288. switch (agp_bridge->driver->size_type) {
  289. case U8_APER_SIZE:
  290. num_entries = A_SIZE_8(temp)->num_entries;
  291. break;
  292. case U16_APER_SIZE:
  293. num_entries = A_SIZE_16(temp)->num_entries;
  294. break;
  295. case U32_APER_SIZE:
  296. num_entries = A_SIZE_32(temp)->num_entries;
  297. break;
  298. case LVL2_APER_SIZE:
  299. num_entries = A_SIZE_LVL2(temp)->num_entries;
  300. break;
  301. case FIXED_APER_SIZE:
  302. num_entries = A_SIZE_FIX(temp)->num_entries;
  303. break;
  304. default:
  305. num_entries = 0;
  306. break;
  307. }
  308. num_entries -= agp_memory_reserved>>PAGE_SHIFT;
  309. if (num_entries<0)
  310. num_entries = 0;
  311. return num_entries;
  312. }
  313. EXPORT_SYMBOL_GPL(agp_num_entries);
  314. /**
  315. * agp_copy_info - copy bridge state information
  316. *
  317. * @info: agp_kern_info pointer. The caller should insure that this pointer is valid.
  318. *
  319. * This function copies information about the agp bridge device and the state of
  320. * the agp backend into an agp_kern_info pointer.
  321. */
  322. int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info)
  323. {
  324. memset(info, 0, sizeof(struct agp_kern_info));
  325. if (!bridge) {
  326. info->chipset = NOT_SUPPORTED;
  327. return -EIO;
  328. }
  329. info->version.major = bridge->version->major;
  330. info->version.minor = bridge->version->minor;
  331. info->chipset = SUPPORTED;
  332. info->device = bridge->dev;
  333. if (bridge->mode & AGPSTAT_MODE_3_0)
  334. info->mode = bridge->mode & ~AGP3_RESERVED_MASK;
  335. else
  336. info->mode = bridge->mode & ~AGP2_RESERVED_MASK;
  337. info->aper_base = bridge->gart_bus_addr;
  338. info->aper_size = agp_return_size();
  339. info->max_memory = bridge->max_memory_agp;
  340. info->current_memory = atomic_read(&bridge->current_memory_agp);
  341. info->cant_use_aperture = bridge->driver->cant_use_aperture;
  342. info->vm_ops = bridge->vm_ops;
  343. info->page_mask = ~0UL;
  344. return 0;
  345. }
  346. EXPORT_SYMBOL(agp_copy_info);
  347. /* End - Routine to copy over information structure */
  348. /*
  349. * Routines for handling swapping of agp_memory into the GATT -
  350. * These routines take agp_memory and insert them into the GATT.
  351. * They call device specific routines to actually write to the GATT.
  352. */
  353. /**
  354. * agp_bind_memory - Bind an agp_memory structure into the GATT.
  355. *
  356. * @curr: agp_memory pointer
  357. * @pg_start: an offset into the graphics aperture translation table
  358. *
  359. * It returns -EINVAL if the pointer == NULL.
  360. * It returns -EBUSY if the area of the table requested is already in use.
  361. */
  362. int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
  363. {
  364. int ret_val;
  365. if (curr == NULL)
  366. return -EINVAL;
  367. if (curr->is_bound) {
  368. printk(KERN_INFO PFX "memory %p is already bound!\n", curr);
  369. return -EINVAL;
  370. }
  371. if (!curr->is_flushed) {
  372. curr->bridge->driver->cache_flush();
  373. curr->is_flushed = true;
  374. }
  375. ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
  376. if (ret_val != 0)
  377. return ret_val;
  378. curr->is_bound = true;
  379. curr->pg_start = pg_start;
  380. spin_lock(&agp_bridge->mapped_lock);
  381. list_add(&curr->mapped_list, &agp_bridge->mapped_list);
  382. spin_unlock(&agp_bridge->mapped_lock);
  383. return 0;
  384. }
  385. EXPORT_SYMBOL(agp_bind_memory);
  386. /**
  387. * agp_unbind_memory - Removes an agp_memory structure from the GATT
  388. *
  389. * @curr: agp_memory pointer to be removed from the GATT.
  390. *
  391. * It returns -EINVAL if this piece of agp_memory is not currently bound to
  392. * the graphics aperture translation table or if the agp_memory pointer == NULL
  393. */
  394. int agp_unbind_memory(struct agp_memory *curr)
  395. {
  396. int ret_val;
  397. if (curr == NULL)
  398. return -EINVAL;
  399. if (!curr->is_bound) {
  400. printk(KERN_INFO PFX "memory %p was not bound!\n", curr);
  401. return -EINVAL;
  402. }
  403. ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type);
  404. if (ret_val != 0)
  405. return ret_val;
  406. curr->is_bound = false;
  407. curr->pg_start = 0;
  408. spin_lock(&curr->bridge->mapped_lock);
  409. list_del(&curr->mapped_list);
  410. spin_unlock(&curr->bridge->mapped_lock);
  411. return 0;
  412. }
  413. EXPORT_SYMBOL(agp_unbind_memory);
  414. /**
  415. * agp_rebind_emmory - Rewrite the entire GATT, useful on resume
  416. */
  417. int agp_rebind_memory(void)
  418. {
  419. struct agp_memory *curr;
  420. int ret_val = 0;
  421. spin_lock(&agp_bridge->mapped_lock);
  422. list_for_each_entry(curr, &agp_bridge->mapped_list, mapped_list) {
  423. ret_val = curr->bridge->driver->insert_memory(curr,
  424. curr->pg_start,
  425. curr->type);
  426. if (ret_val != 0)
  427. break;
  428. }
  429. spin_unlock(&agp_bridge->mapped_lock);
  430. return ret_val;
  431. }
  432. EXPORT_SYMBOL(agp_rebind_memory);
  433. /* End - Routines for handling swapping of agp_memory into the GATT */
  434. /* Generic Agp routines - Start */
  435. static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
  436. {
  437. u32 tmp;
  438. if (*requested_mode & AGP2_RESERVED_MASK) {
  439. printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
  440. *requested_mode & AGP2_RESERVED_MASK, *requested_mode);
  441. *requested_mode &= ~AGP2_RESERVED_MASK;
  442. }
  443. /*
  444. * Some dumb bridges are programmed to disobey the AGP2 spec.
  445. * This is likely a BIOS misprogramming rather than poweron default, or
  446. * it would be a lot more common.
  447. * https://bugs.freedesktop.org/show_bug.cgi?id=8816
  448. * AGPv2 spec 6.1.9 states:
  449. * The RATE field indicates the data transfer rates supported by this
  450. * device. A.G.P. devices must report all that apply.
  451. * Fix them up as best we can.
  452. */
  453. switch (*bridge_agpstat & 7) {
  454. case 4:
  455. *bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X);
  456. printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate"
  457. "Fixing up support for x2 & x1\n");
  458. break;
  459. case 2:
  460. *bridge_agpstat |= AGPSTAT2_1X;
  461. printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate"
  462. "Fixing up support for x1\n");
  463. break;
  464. default:
  465. break;
  466. }
  467. /* Check the speed bits make sense. Only one should be set. */
  468. tmp = *requested_mode & 7;
  469. switch (tmp) {
  470. case 0:
  471. printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to x1 mode.\n", current->comm);
  472. *requested_mode |= AGPSTAT2_1X;
  473. break;
  474. case 1:
  475. case 2:
  476. break;
  477. case 3:
  478. *requested_mode &= ~(AGPSTAT2_1X); /* rate=2 */
  479. break;
  480. case 4:
  481. break;
  482. case 5:
  483. case 6:
  484. case 7:
  485. *requested_mode &= ~(AGPSTAT2_1X|AGPSTAT2_2X); /* rate=4*/
  486. break;
  487. }
  488. /* disable SBA if it's not supported */
  489. if (!((*bridge_agpstat & AGPSTAT_SBA) && (*vga_agpstat & AGPSTAT_SBA) && (*requested_mode & AGPSTAT_SBA)))
  490. *bridge_agpstat &= ~AGPSTAT_SBA;
  491. /* Set rate */
  492. if (!((*bridge_agpstat & AGPSTAT2_4X) && (*vga_agpstat & AGPSTAT2_4X) && (*requested_mode & AGPSTAT2_4X)))
  493. *bridge_agpstat &= ~AGPSTAT2_4X;
  494. if (!((*bridge_agpstat & AGPSTAT2_2X) && (*vga_agpstat & AGPSTAT2_2X) && (*requested_mode & AGPSTAT2_2X)))
  495. *bridge_agpstat &= ~AGPSTAT2_2X;
  496. if (!((*bridge_agpstat & AGPSTAT2_1X) && (*vga_agpstat & AGPSTAT2_1X) && (*requested_mode & AGPSTAT2_1X)))
  497. *bridge_agpstat &= ~AGPSTAT2_1X;
  498. /* Now we know what mode it should be, clear out the unwanted bits. */
  499. if (*bridge_agpstat & AGPSTAT2_4X)
  500. *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_2X); /* 4X */
  501. if (*bridge_agpstat & AGPSTAT2_2X)
  502. *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_4X); /* 2X */
  503. if (*bridge_agpstat & AGPSTAT2_1X)
  504. *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); /* 1X */
  505. /* Apply any errata. */
  506. if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
  507. *bridge_agpstat &= ~AGPSTAT_FW;
  508. if (agp_bridge->flags & AGP_ERRATA_SBA)
  509. *bridge_agpstat &= ~AGPSTAT_SBA;
  510. if (agp_bridge->flags & AGP_ERRATA_1X) {
  511. *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
  512. *bridge_agpstat |= AGPSTAT2_1X;
  513. }
  514. /* If we've dropped down to 1X, disable fast writes. */
  515. if (*bridge_agpstat & AGPSTAT2_1X)
  516. *bridge_agpstat &= ~AGPSTAT_FW;
  517. }
  518. /*
  519. * requested_mode = Mode requested by (typically) X.
  520. * bridge_agpstat = PCI_AGP_STATUS from agp bridge.
  521. * vga_agpstat = PCI_AGP_STATUS from graphic card.
  522. */
  523. static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
  524. {
  525. u32 origbridge=*bridge_agpstat, origvga=*vga_agpstat;
  526. u32 tmp;
  527. if (*requested_mode & AGP3_RESERVED_MASK) {
  528. printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
  529. *requested_mode & AGP3_RESERVED_MASK, *requested_mode);
  530. *requested_mode &= ~AGP3_RESERVED_MASK;
  531. }
  532. /* Check the speed bits make sense. */
  533. tmp = *requested_mode & 7;
  534. if (tmp == 0) {
  535. printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current->comm);
  536. *requested_mode |= AGPSTAT3_4X;
  537. }
  538. if (tmp >= 3) {
  539. printk(KERN_INFO PFX "%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current->comm, tmp * 4);
  540. *requested_mode = (*requested_mode & ~7) | AGPSTAT3_8X;
  541. }
  542. /* ARQSZ - Set the value to the maximum one.
  543. * Don't allow the mode register to override values. */
  544. *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_ARQSZ) |
  545. max_t(u32,(*bridge_agpstat & AGPSTAT_ARQSZ),(*vga_agpstat & AGPSTAT_ARQSZ)));
  546. /* Calibration cycle.
  547. * Don't allow the mode register to override values. */
  548. *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_CAL_MASK) |
  549. min_t(u32,(*bridge_agpstat & AGPSTAT_CAL_MASK),(*vga_agpstat & AGPSTAT_CAL_MASK)));
  550. /* SBA *must* be supported for AGP v3 */
  551. *bridge_agpstat |= AGPSTAT_SBA;
  552. /*
  553. * Set speed.
  554. * Check for invalid speeds. This can happen when applications
  555. * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware
  556. */
  557. if (*requested_mode & AGPSTAT_MODE_3_0) {
  558. /*
  559. * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode,
  560. * have been passed a 3.0 mode, but with 2.x speed bits set.
  561. * AGP2.x 4x -> AGP3.0 4x.
  562. */
  563. if (*requested_mode & AGPSTAT2_4X) {
  564. printk(KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n",
  565. current->comm, *requested_mode);
  566. *requested_mode &= ~AGPSTAT2_4X;
  567. *requested_mode |= AGPSTAT3_4X;
  568. }
  569. } else {
  570. /*
  571. * The caller doesn't know what they are doing. We are in 3.0 mode,
  572. * but have been passed an AGP 2.x mode.
  573. * Convert AGP 1x,2x,4x -> AGP 3.0 4x.
  574. */
  575. printk(KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n",
  576. current->comm, *requested_mode);
  577. *requested_mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X);
  578. *requested_mode |= AGPSTAT3_4X;
  579. }
  580. if (*requested_mode & AGPSTAT3_8X) {
  581. if (!(*bridge_agpstat & AGPSTAT3_8X)) {
  582. *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
  583. *bridge_agpstat |= AGPSTAT3_4X;
  584. printk(KERN_INFO PFX "%s requested AGPx8 but bridge not capable.\n", current->comm);
  585. return;
  586. }
  587. if (!(*vga_agpstat & AGPSTAT3_8X)) {
  588. *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
  589. *bridge_agpstat |= AGPSTAT3_4X;
  590. printk(KERN_INFO PFX "%s requested AGPx8 but graphic card not capable.\n", current->comm);
  591. return;
  592. }
  593. /* All set, bridge & device can do AGP x8*/
  594. *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
  595. goto done;
  596. } else if (*requested_mode & AGPSTAT3_4X) {
  597. *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
  598. *bridge_agpstat |= AGPSTAT3_4X;
  599. goto done;
  600. } else {
  601. /*
  602. * If we didn't specify an AGP mode, we see if both
  603. * the graphics card, and the bridge can do x8, and use if so.
  604. * If not, we fall back to x4 mode.
  605. */
  606. if ((*bridge_agpstat & AGPSTAT3_8X) && (*vga_agpstat & AGPSTAT3_8X)) {
  607. printk(KERN_INFO PFX "No AGP mode specified. Setting to highest mode "
  608. "supported by bridge & card (x8).\n");
  609. *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
  610. *vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
  611. } else {
  612. printk(KERN_INFO PFX "Fell back to AGPx4 mode because");
  613. if (!(*bridge_agpstat & AGPSTAT3_8X)) {
  614. printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n",
  615. *bridge_agpstat, origbridge);
  616. *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
  617. *bridge_agpstat |= AGPSTAT3_4X;
  618. }
  619. if (!(*vga_agpstat & AGPSTAT3_8X)) {
  620. printk(KERN_INFO PFX "graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n",
  621. *vga_agpstat, origvga);
  622. *vga_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
  623. *vga_agpstat |= AGPSTAT3_4X;
  624. }
  625. }
  626. }
  627. done:
  628. /* Apply any errata. */
  629. if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
  630. *bridge_agpstat &= ~AGPSTAT_FW;
  631. if (agp_bridge->flags & AGP_ERRATA_SBA)
  632. *bridge_agpstat &= ~AGPSTAT_SBA;
  633. if (agp_bridge->flags & AGP_ERRATA_1X) {
  634. *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
  635. *bridge_agpstat |= AGPSTAT2_1X;
  636. }
  637. }
  638. /**
  639. * agp_collect_device_status - determine correct agp_cmd from various agp_stat's
  640. * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
  641. * @requested_mode: requested agp_stat from userspace (Typically from X)
  642. * @bridge_agpstat: current agp_stat from AGP bridge.
  643. *
  644. * This function will hunt for an AGP graphics card, and try to match
  645. * the requested mode to the capabilities of both the bridge and the card.
  646. */
  647. u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode, u32 bridge_agpstat)
  648. {
  649. struct pci_dev *device = NULL;
  650. u32 vga_agpstat;
  651. u8 cap_ptr;
  652. for (;;) {
  653. device = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, device);
  654. if (!device) {
  655. printk(KERN_INFO PFX "Couldn't find an AGP VGA controller.\n");
  656. return 0;
  657. }
  658. cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
  659. if (cap_ptr)
  660. break;
  661. }
  662. /*
  663. * Ok, here we have a AGP device. Disable impossible
  664. * settings, and adjust the readqueue to the minimum.
  665. */
  666. pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &vga_agpstat);
  667. /* adjust RQ depth */
  668. bridge_agpstat = ((bridge_agpstat & ~AGPSTAT_RQ_DEPTH) |
  669. min_t(u32, (requested_mode & AGPSTAT_RQ_DEPTH),
  670. min_t(u32, (bridge_agpstat & AGPSTAT_RQ_DEPTH), (vga_agpstat & AGPSTAT_RQ_DEPTH))));
  671. /* disable FW if it's not supported */
  672. if (!((bridge_agpstat & AGPSTAT_FW) &&
  673. (vga_agpstat & AGPSTAT_FW) &&
  674. (requested_mode & AGPSTAT_FW)))
  675. bridge_agpstat &= ~AGPSTAT_FW;
  676. /* Check to see if we are operating in 3.0 mode */
  677. if (agp_bridge->mode & AGPSTAT_MODE_3_0)
  678. agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
  679. else
  680. agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
  681. pci_dev_put(device);
  682. return bridge_agpstat;
  683. }
  684. EXPORT_SYMBOL(agp_collect_device_status);
  685. void agp_device_command(u32 bridge_agpstat, bool agp_v3)
  686. {
  687. struct pci_dev *device = NULL;
  688. int mode;
  689. mode = bridge_agpstat & 0x7;
  690. if (agp_v3)
  691. mode *= 4;
  692. for_each_pci_dev(device) {
  693. u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
  694. if (!agp)
  695. continue;
  696. dev_info(&device->dev, "putting AGP V%d device into %dx mode\n",
  697. agp_v3 ? 3 : 2, mode);
  698. pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat);
  699. }
  700. }
  701. EXPORT_SYMBOL(agp_device_command);
  702. void get_agp_version(struct agp_bridge_data *bridge)
  703. {
  704. u32 ncapid;
  705. /* Exit early if already set by errata workarounds. */
  706. if (bridge->major_version != 0)
  707. return;
  708. pci_read_config_dword(bridge->dev, bridge->capndx, &ncapid);
  709. bridge->major_version = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
  710. bridge->minor_version = (ncapid >> AGP_MINOR_VERSION_SHIFT) & 0xf;
  711. }
  712. EXPORT_SYMBOL(get_agp_version);
  713. void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
  714. {
  715. u32 bridge_agpstat, temp;
  716. get_agp_version(agp_bridge);
  717. dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n",
  718. agp_bridge->major_version, agp_bridge->minor_version);
  719. pci_read_config_dword(agp_bridge->dev,
  720. agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat);
  721. bridge_agpstat = agp_collect_device_status(agp_bridge, requested_mode, bridge_agpstat);
  722. if (bridge_agpstat == 0)
  723. /* Something bad happened. FIXME: Return error code? */
  724. return;
  725. bridge_agpstat |= AGPSTAT_AGP_ENABLE;
  726. /* Do AGP version specific frobbing. */
  727. if (bridge->major_version >= 3) {
  728. if (bridge->mode & AGPSTAT_MODE_3_0) {
  729. /* If we have 3.5, we can do the isoch stuff. */
  730. if (bridge->minor_version >= 5)
  731. agp_3_5_enable(bridge);
  732. agp_device_command(bridge_agpstat, true);
  733. return;
  734. } else {
  735. /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/
  736. bridge_agpstat &= ~(7<<10) ;
  737. pci_read_config_dword(bridge->dev,
  738. bridge->capndx+AGPCTRL, &temp);
  739. temp |= (1<<9);
  740. pci_write_config_dword(bridge->dev,
  741. bridge->capndx+AGPCTRL, temp);
  742. dev_info(&bridge->dev->dev, "bridge is in legacy mode, falling back to 2.x\n");
  743. }
  744. }
  745. /* AGP v<3 */
  746. agp_device_command(bridge_agpstat, false);
  747. }
  748. EXPORT_SYMBOL(agp_generic_enable);
  749. int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
  750. {
  751. char *table;
  752. char *table_end;
  753. int size;
  754. int page_order;
  755. int num_entries;
  756. int i;
  757. void *temp;
  758. struct page *page;
  759. /* The generic routines can't handle 2 level gatt's */
  760. if (bridge->driver->size_type == LVL2_APER_SIZE)
  761. return -EINVAL;
  762. table = NULL;
  763. i = bridge->aperture_size_idx;
  764. temp = bridge->current_size;
  765. size = page_order = num_entries = 0;
  766. if (bridge->driver->size_type != FIXED_APER_SIZE) {
  767. do {
  768. switch (bridge->driver->size_type) {
  769. case U8_APER_SIZE:
  770. size = A_SIZE_8(temp)->size;
  771. page_order =
  772. A_SIZE_8(temp)->page_order;
  773. num_entries =
  774. A_SIZE_8(temp)->num_entries;
  775. break;
  776. case U16_APER_SIZE:
  777. size = A_SIZE_16(temp)->size;
  778. page_order = A_SIZE_16(temp)->page_order;
  779. num_entries = A_SIZE_16(temp)->num_entries;
  780. break;
  781. case U32_APER_SIZE:
  782. size = A_SIZE_32(temp)->size;
  783. page_order = A_SIZE_32(temp)->page_order;
  784. num_entries = A_SIZE_32(temp)->num_entries;
  785. break;
  786. /* This case will never really happen. */
  787. case FIXED_APER_SIZE:
  788. case LVL2_APER_SIZE:
  789. default:
  790. size = page_order = num_entries = 0;
  791. break;
  792. }
  793. table = alloc_gatt_pages(page_order);
  794. if (table == NULL) {
  795. i++;
  796. switch (bridge->driver->size_type) {
  797. case U8_APER_SIZE:
  798. bridge->current_size = A_IDX8(bridge);
  799. break;
  800. case U16_APER_SIZE:
  801. bridge->current_size = A_IDX16(bridge);
  802. break;
  803. case U32_APER_SIZE:
  804. bridge->current_size = A_IDX32(bridge);
  805. break;
  806. /* These cases will never really happen. */
  807. case FIXED_APER_SIZE:
  808. case LVL2_APER_SIZE:
  809. default:
  810. break;
  811. }
  812. temp = bridge->current_size;
  813. } else {
  814. bridge->aperture_size_idx = i;
  815. }
  816. } while (!table && (i < bridge->driver->num_aperture_sizes));
  817. } else {
  818. size = ((struct aper_size_info_fixed *) temp)->size;
  819. page_order = ((struct aper_size_info_fixed *) temp)->page_order;
  820. num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
  821. table = alloc_gatt_pages(page_order);
  822. }
  823. if (table == NULL)
  824. return -ENOMEM;
  825. table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
  826. for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
  827. SetPageReserved(page);
  828. bridge->gatt_table_real = (u32 *) table;
  829. agp_gatt_table = (void *)table;
  830. bridge->driver->cache_flush();
  831. #ifdef CONFIG_X86
  832. set_memory_uc((unsigned long)table, 1 << page_order);
  833. bridge->gatt_table = (void *)table;
  834. #else
  835. bridge->gatt_table = ioremap_nocache(virt_to_gart(table),
  836. (PAGE_SIZE * (1 << page_order)));
  837. bridge->driver->cache_flush();
  838. #endif
  839. if (bridge->gatt_table == NULL) {
  840. for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
  841. ClearPageReserved(page);
  842. free_gatt_pages(table, page_order);
  843. return -ENOMEM;
  844. }
  845. bridge->gatt_bus_addr = virt_to_gart(bridge->gatt_table_real);
  846. /* AK: bogus, should encode addresses > 4GB */
  847. for (i = 0; i < num_entries; i++) {
  848. writel(bridge->scratch_page, bridge->gatt_table+i);
  849. readl(bridge->gatt_table+i); /* PCI Posting. */
  850. }
  851. return 0;
  852. }
  853. EXPORT_SYMBOL(agp_generic_create_gatt_table);
  854. int agp_generic_free_gatt_table(struct agp_bridge_data *bridge)
  855. {
  856. int page_order;
  857. char *table, *table_end;
  858. void *temp;
  859. struct page *page;
  860. temp = bridge->current_size;
  861. switch (bridge->driver->size_type) {
  862. case U8_APER_SIZE:
  863. page_order = A_SIZE_8(temp)->page_order;
  864. break;
  865. case U16_APER_SIZE:
  866. page_order = A_SIZE_16(temp)->page_order;
  867. break;
  868. case U32_APER_SIZE:
  869. page_order = A_SIZE_32(temp)->page_order;
  870. break;
  871. case FIXED_APER_SIZE:
  872. page_order = A_SIZE_FIX(temp)->page_order;
  873. break;
  874. case LVL2_APER_SIZE:
  875. /* The generic routines can't deal with 2 level gatt's */
  876. return -EINVAL;
  877. break;
  878. default:
  879. page_order = 0;
  880. break;
  881. }
  882. /* Do not worry about freeing memory, because if this is
  883. * called, then all agp memory is deallocated and removed
  884. * from the table. */
  885. #ifdef CONFIG_X86
  886. set_memory_wb((unsigned long)bridge->gatt_table, 1 << page_order);
  887. #else
  888. iounmap(bridge->gatt_table);
  889. #endif
  890. table = (char *) bridge->gatt_table_real;
  891. table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
  892. for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
  893. ClearPageReserved(page);
  894. free_gatt_pages(bridge->gatt_table_real, page_order);
  895. agp_gatt_table = NULL;
  896. bridge->gatt_table = NULL;
  897. bridge->gatt_table_real = NULL;
  898. bridge->gatt_bus_addr = 0;
  899. return 0;
  900. }
  901. EXPORT_SYMBOL(agp_generic_free_gatt_table);
  902. int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
  903. {
  904. int num_entries;
  905. size_t i;
  906. off_t j;
  907. void *temp;
  908. struct agp_bridge_data *bridge;
  909. int mask_type;
  910. bridge = mem->bridge;
  911. if (!bridge)
  912. return -EINVAL;
  913. if (mem->page_count == 0)
  914. return 0;
  915. temp = bridge->current_size;
  916. switch (bridge->driver->size_type) {
  917. case U8_APER_SIZE:
  918. num_entries = A_SIZE_8(temp)->num_entries;
  919. break;
  920. case U16_APER_SIZE:
  921. num_entries = A_SIZE_16(temp)->num_entries;
  922. break;
  923. case U32_APER_SIZE:
  924. num_entries = A_SIZE_32(temp)->num_entries;
  925. break;
  926. case FIXED_APER_SIZE:
  927. num_entries = A_SIZE_FIX(temp)->num_entries;
  928. break;
  929. case LVL2_APER_SIZE:
  930. /* The generic routines can't deal with 2 level gatt's */
  931. return -EINVAL;
  932. break;
  933. default:
  934. num_entries = 0;
  935. break;
  936. }
  937. num_entries -= agp_memory_reserved/PAGE_SIZE;
  938. if (num_entries < 0) num_entries = 0;
  939. if (type != mem->type)
  940. return -EINVAL;
  941. mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
  942. if (mask_type != 0) {
  943. /* The generic routines know nothing of memory types */
  944. return -EINVAL;
  945. }
  946. /* AK: could wrap */
  947. if ((pg_start + mem->page_count) > num_entries)
  948. return -EINVAL;
  949. j = pg_start;
  950. while (j < (pg_start + mem->page_count)) {
  951. if (!PGE_EMPTY(bridge, readl(bridge->gatt_table+j)))
  952. return -EBUSY;
  953. j++;
  954. }
  955. if (!mem->is_flushed) {
  956. bridge->driver->cache_flush();
  957. mem->is_flushed = true;
  958. }
  959. for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
  960. writel(bridge->driver->mask_memory(bridge,
  961. phys_to_gart(page_to_phys(mem->pages[i])),
  962. mask_type),
  963. bridge->gatt_table+j);
  964. }
  965. readl(bridge->gatt_table+j-1); /* PCI Posting. */
  966. bridge->driver->tlb_flush(mem);
  967. return 0;
  968. }
  969. EXPORT_SYMBOL(agp_generic_insert_memory);
  970. int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
  971. {
  972. size_t i;
  973. struct agp_bridge_data *bridge;
  974. int mask_type;
  975. bridge = mem->bridge;
  976. if (!bridge)
  977. return -EINVAL;
  978. if (mem->page_count == 0)
  979. return 0;
  980. if (type != mem->type)
  981. return -EINVAL;
  982. mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
  983. if (mask_type != 0) {
  984. /* The generic routines know nothing of memory types */
  985. return -EINVAL;
  986. }
  987. /* AK: bogus, should encode addresses > 4GB */
  988. for (i = pg_start; i < (mem->page_count + pg_start); i++) {
  989. writel(bridge->scratch_page, bridge->gatt_table+i);
  990. }
  991. readl(bridge->gatt_table+i-1); /* PCI Posting. */
  992. bridge->driver->tlb_flush(mem);
  993. return 0;
  994. }
  995. EXPORT_SYMBOL(agp_generic_remove_memory);
  996. struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
  997. {
  998. return NULL;
  999. }
  1000. EXPORT_SYMBOL(agp_generic_alloc_by_type);
  1001. void agp_generic_free_by_type(struct agp_memory *curr)
  1002. {
  1003. agp_free_page_array(curr);
  1004. agp_free_key(curr->key);
  1005. kfree(curr);
  1006. }
  1007. EXPORT_SYMBOL(agp_generic_free_by_type);
  1008. struct agp_memory *agp_generic_alloc_user(size_t page_count, int type)
  1009. {
  1010. struct agp_memory *new;
  1011. int i;
  1012. int pages;
  1013. pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
  1014. new = agp_create_user_memory(page_count);
  1015. if (new == NULL)
  1016. return NULL;
  1017. for (i = 0; i < page_count; i++)
  1018. new->pages[i] = 0;
  1019. new->page_count = 0;
  1020. new->type = type;
  1021. new->num_scratch_pages = pages;
  1022. return new;
  1023. }
  1024. EXPORT_SYMBOL(agp_generic_alloc_user);
  1025. /*
  1026. * Basic Page Allocation Routines -
  1027. * These routines handle page allocation and by default they reserve the allocated
  1028. * memory. They also handle incrementing the current_memory_agp value, Which is checked
  1029. * against a maximum value.
  1030. */
  1031. int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *mem, size_t num_pages)
  1032. {
  1033. struct page * page;
  1034. int i, ret = -ENOMEM;
  1035. for (i = 0; i < num_pages; i++) {
  1036. page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
  1037. /* agp_free_memory() needs gart address */
  1038. if (page == NULL)
  1039. goto out;
  1040. #ifndef CONFIG_X86
  1041. map_page_into_agp(page);
  1042. #endif
  1043. get_page(page);
  1044. atomic_inc(&agp_bridge->current_memory_agp);
  1045. mem->pages[i] = page;
  1046. mem->page_count++;
  1047. }
  1048. #ifdef CONFIG_X86
  1049. set_pages_array_uc(mem->pages, num_pages);
  1050. #endif
  1051. ret = 0;
  1052. out:
  1053. return ret;
  1054. }
  1055. EXPORT_SYMBOL(agp_generic_alloc_pages);
  1056. struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge)
  1057. {
  1058. struct page * page;
  1059. page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
  1060. if (page == NULL)
  1061. return NULL;
  1062. map_page_into_agp(page);
  1063. get_page(page);
  1064. atomic_inc(&agp_bridge->current_memory_agp);
  1065. return page;
  1066. }
  1067. EXPORT_SYMBOL(agp_generic_alloc_page);
  1068. void agp_generic_destroy_pages(struct agp_memory *mem)
  1069. {
  1070. int i;
  1071. struct page *page;
  1072. if (!mem)
  1073. return;
  1074. #ifdef CONFIG_X86
  1075. set_pages_array_wb(mem->pages, mem->page_count);
  1076. #endif
  1077. for (i = 0; i < mem->page_count; i++) {
  1078. page = mem->pages[i];
  1079. #ifndef CONFIG_X86
  1080. unmap_page_from_agp(page);
  1081. #endif
  1082. put_page(page);
  1083. __free_page(page);
  1084. atomic_dec(&agp_bridge->current_memory_agp);
  1085. mem->pages[i] = NULL;
  1086. }
  1087. }
  1088. EXPORT_SYMBOL(agp_generic_destroy_pages);
  1089. void agp_generic_destroy_page(struct page *page, int flags)
  1090. {
  1091. if (page == NULL)
  1092. return;
  1093. if (flags & AGP_PAGE_DESTROY_UNMAP)
  1094. unmap_page_from_agp(page);
  1095. if (flags & AGP_PAGE_DESTROY_FREE) {
  1096. put_page(page);
  1097. __free_page(page);
  1098. atomic_dec(&agp_bridge->current_memory_agp);
  1099. }
  1100. }
  1101. EXPORT_SYMBOL(agp_generic_destroy_page);
  1102. /* End Basic Page Allocation Routines */
  1103. /**
  1104. * agp_enable - initialise the agp point-to-point connection.
  1105. *
  1106. * @mode: agp mode register value to configure with.
  1107. */
  1108. void agp_enable(struct agp_bridge_data *bridge, u32 mode)
  1109. {
  1110. if (!bridge)
  1111. return;
  1112. bridge->driver->agp_enable(bridge, mode);
  1113. }
  1114. EXPORT_SYMBOL(agp_enable);
  1115. /* When we remove the global variable agp_bridge from all drivers
  1116. * then agp_alloc_bridge and agp_generic_find_bridge need to be updated
  1117. */
  1118. struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev)
  1119. {
  1120. if (list_empty(&agp_bridges))
  1121. return NULL;
  1122. return agp_bridge;
  1123. }
  1124. static void ipi_handler(void *null)
  1125. {
  1126. flush_agp_cache();
  1127. }
  1128. void global_cache_flush(void)
  1129. {
  1130. if (on_each_cpu(ipi_handler, NULL, 1) != 0)
  1131. panic(PFX "timed out waiting for the other CPUs!\n");
  1132. }
  1133. EXPORT_SYMBOL(global_cache_flush);
  1134. unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
  1135. dma_addr_t addr, int type)
  1136. {
  1137. /* memory type is ignored in the generic routine */
  1138. if (bridge->driver->masks)
  1139. return addr | bridge->driver->masks[0].mask;
  1140. else
  1141. return addr;
  1142. }
  1143. EXPORT_SYMBOL(agp_generic_mask_memory);
  1144. int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge,
  1145. int type)
  1146. {
  1147. if (type >= AGP_USER_TYPES)
  1148. return 0;
  1149. return type;
  1150. }
  1151. EXPORT_SYMBOL(agp_generic_type_to_mask_type);
  1152. /*
  1153. * These functions are implemented according to the AGPv3 spec,
  1154. * which covers implementation details that had previously been
  1155. * left open.
  1156. */
  1157. int agp3_generic_fetch_size(void)
  1158. {
  1159. u16 temp_size;
  1160. int i;
  1161. struct aper_size_info_16 *values;
  1162. pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size);
  1163. values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
  1164. for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
  1165. if (temp_size == values[i].size_value) {
  1166. agp_bridge->previous_size =
  1167. agp_bridge->current_size = (void *) (values + i);
  1168. agp_bridge->aperture_size_idx = i;
  1169. return values[i].size;
  1170. }
  1171. }
  1172. return 0;
  1173. }
  1174. EXPORT_SYMBOL(agp3_generic_fetch_size);
  1175. void agp3_generic_tlbflush(struct agp_memory *mem)
  1176. {
  1177. u32 ctrl;
  1178. pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
  1179. pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN);
  1180. pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl);
  1181. }
  1182. EXPORT_SYMBOL(agp3_generic_tlbflush);
  1183. int agp3_generic_configure(void)
  1184. {
  1185. u32 temp;
  1186. struct aper_size_info_16 *current_size;
  1187. current_size = A_SIZE_16(agp_bridge->current_size);
  1188. pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
  1189. agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
  1190. /* set aperture size */
  1191. pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value);
  1192. /* set gart pointer */
  1193. pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr);
  1194. /* enable aperture and GTLB */
  1195. pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp);
  1196. pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN);
  1197. return 0;
  1198. }
  1199. EXPORT_SYMBOL(agp3_generic_configure);
  1200. void agp3_generic_cleanup(void)
  1201. {
  1202. u32 ctrl;
  1203. pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
  1204. pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB);
  1205. }
  1206. EXPORT_SYMBOL(agp3_generic_cleanup);
  1207. const struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] =
  1208. {
  1209. {4096, 1048576, 10,0x000},
  1210. {2048, 524288, 9, 0x800},
  1211. {1024, 262144, 8, 0xc00},
  1212. { 512, 131072, 7, 0xe00},
  1213. { 256, 65536, 6, 0xf00},
  1214. { 128, 32768, 5, 0xf20},
  1215. { 64, 16384, 4, 0xf30},
  1216. { 32, 8192, 3, 0xf38},
  1217. { 16, 4096, 2, 0xf3c},
  1218. { 8, 2048, 1, 0xf3e},
  1219. { 4, 1024, 0, 0xf3f}
  1220. };
  1221. EXPORT_SYMBOL(agp3_generic_sizes);