sworks-agp.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558
  1. /*
  2. * Serverworks AGPGART routines.
  3. */
  4. #include <linux/module.h>
  5. #include <linux/pci.h>
  6. #include <linux/init.h>
  7. #include <linux/string.h>
  8. #include <linux/slab.h>
  9. #include <linux/agp_backend.h>
  10. #include "agp.h"
  11. #define SVWRKS_COMMAND 0x04
  12. #define SVWRKS_APSIZE 0x10
  13. #define SVWRKS_MMBASE 0x14
  14. #define SVWRKS_CACHING 0x4b
  15. #define SVWRKS_AGP_ENABLE 0x60
  16. #define SVWRKS_FEATURE 0x68
  17. #define SVWRKS_SIZE_MASK 0xfe000000
  18. /* Memory mapped registers */
  19. #define SVWRKS_GART_CACHE 0x02
  20. #define SVWRKS_GATTBASE 0x04
  21. #define SVWRKS_TLBFLUSH 0x10
  22. #define SVWRKS_POSTFLUSH 0x14
  23. #define SVWRKS_DIRFLUSH 0x0c
  24. struct serverworks_page_map {
  25. unsigned long *real;
  26. unsigned long __iomem *remapped;
  27. };
  28. static struct _serverworks_private {
  29. struct pci_dev *svrwrks_dev; /* device one */
  30. volatile u8 __iomem *registers;
  31. struct serverworks_page_map **gatt_pages;
  32. int num_tables;
  33. struct serverworks_page_map scratch_dir;
  34. int gart_addr_ofs;
  35. int mm_addr_ofs;
  36. } serverworks_private;
  37. static int serverworks_create_page_map(struct serverworks_page_map *page_map)
  38. {
  39. int i;
  40. page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
  41. if (page_map->real == NULL) {
  42. return -ENOMEM;
  43. }
  44. SetPageReserved(virt_to_page(page_map->real));
  45. global_cache_flush();
  46. page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real),
  47. PAGE_SIZE);
  48. if (page_map->remapped == NULL) {
  49. ClearPageReserved(virt_to_page(page_map->real));
  50. free_page((unsigned long) page_map->real);
  51. page_map->real = NULL;
  52. return -ENOMEM;
  53. }
  54. global_cache_flush();
  55. for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++)
  56. writel(agp_bridge->scratch_page, page_map->remapped+i);
  57. return 0;
  58. }
  59. static void serverworks_free_page_map(struct serverworks_page_map *page_map)
  60. {
  61. iounmap(page_map->remapped);
  62. ClearPageReserved(virt_to_page(page_map->real));
  63. free_page((unsigned long) page_map->real);
  64. }
  65. static void serverworks_free_gatt_pages(void)
  66. {
  67. int i;
  68. struct serverworks_page_map **tables;
  69. struct serverworks_page_map *entry;
  70. tables = serverworks_private.gatt_pages;
  71. for(i = 0; i < serverworks_private.num_tables; i++) {
  72. entry = tables[i];
  73. if (entry != NULL) {
  74. if (entry->real != NULL) {
  75. serverworks_free_page_map(entry);
  76. }
  77. kfree(entry);
  78. }
  79. }
  80. kfree(tables);
  81. }
  82. static int serverworks_create_gatt_pages(int nr_tables)
  83. {
  84. struct serverworks_page_map **tables;
  85. struct serverworks_page_map *entry;
  86. int retval = 0;
  87. int i;
  88. tables = kmalloc((nr_tables + 1) * sizeof(struct serverworks_page_map *),
  89. GFP_KERNEL);
  90. if (tables == NULL) {
  91. return -ENOMEM;
  92. }
  93. memset(tables, 0, sizeof(struct serverworks_page_map *) * (nr_tables + 1));
  94. for (i = 0; i < nr_tables; i++) {
  95. entry = kmalloc(sizeof(struct serverworks_page_map), GFP_KERNEL);
  96. if (entry == NULL) {
  97. retval = -ENOMEM;
  98. break;
  99. }
  100. memset(entry, 0, sizeof(struct serverworks_page_map));
  101. tables[i] = entry;
  102. retval = serverworks_create_page_map(entry);
  103. if (retval != 0) break;
  104. }
  105. serverworks_private.num_tables = nr_tables;
  106. serverworks_private.gatt_pages = tables;
  107. if (retval != 0) serverworks_free_gatt_pages();
  108. return retval;
  109. }
  110. #define SVRWRKS_GET_GATT(addr) (serverworks_private.gatt_pages[\
  111. GET_PAGE_DIR_IDX(addr)]->remapped)
  112. #ifndef GET_PAGE_DIR_OFF
  113. #define GET_PAGE_DIR_OFF(addr) (addr >> 22)
  114. #endif
  115. #ifndef GET_PAGE_DIR_IDX
  116. #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
  117. GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
  118. #endif
  119. #ifndef GET_GATT_OFF
  120. #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
  121. #endif
  122. static int serverworks_create_gatt_table(struct agp_bridge_data *bridge)
  123. {
  124. struct aper_size_info_lvl2 *value;
  125. struct serverworks_page_map page_dir;
  126. int retval;
  127. u32 temp;
  128. int i;
  129. value = A_SIZE_LVL2(agp_bridge->current_size);
  130. retval = serverworks_create_page_map(&page_dir);
  131. if (retval != 0) {
  132. return retval;
  133. }
  134. retval = serverworks_create_page_map(&serverworks_private.scratch_dir);
  135. if (retval != 0) {
  136. serverworks_free_page_map(&page_dir);
  137. return retval;
  138. }
  139. /* Create a fake scratch directory */
  140. for(i = 0; i < 1024; i++) {
  141. writel(agp_bridge->scratch_page, serverworks_private.scratch_dir.remapped+i);
  142. writel(virt_to_gart(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i);
  143. }
  144. retval = serverworks_create_gatt_pages(value->num_entries / 1024);
  145. if (retval != 0) {
  146. serverworks_free_page_map(&page_dir);
  147. serverworks_free_page_map(&serverworks_private.scratch_dir);
  148. return retval;
  149. }
  150. agp_bridge->gatt_table_real = (u32 *)page_dir.real;
  151. agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
  152. agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real);
  153. /* Get the address for the gart region.
  154. * This is a bus address even on the alpha, b/c its
  155. * used to program the agp master not the cpu
  156. */
  157. pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp);
  158. agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
  159. /* Calculate the agp offset */
  160. for(i = 0; i < value->num_entries / 1024; i++)
  161. writel(virt_to_gart(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i);
  162. return 0;
  163. }
  164. static int serverworks_free_gatt_table(struct agp_bridge_data *bridge)
  165. {
  166. struct serverworks_page_map page_dir;
  167. page_dir.real = (unsigned long *)agp_bridge->gatt_table_real;
  168. page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table;
  169. serverworks_free_gatt_pages();
  170. serverworks_free_page_map(&page_dir);
  171. serverworks_free_page_map(&serverworks_private.scratch_dir);
  172. return 0;
  173. }
  174. static int serverworks_fetch_size(void)
  175. {
  176. int i;
  177. u32 temp;
  178. u32 temp2;
  179. struct aper_size_info_lvl2 *values;
  180. values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
  181. pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp);
  182. pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,
  183. SVWRKS_SIZE_MASK);
  184. pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp2);
  185. pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,temp);
  186. temp2 &= SVWRKS_SIZE_MASK;
  187. for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
  188. if (temp2 == values[i].size_value) {
  189. agp_bridge->previous_size =
  190. agp_bridge->current_size = (void *) (values + i);
  191. agp_bridge->aperture_size_idx = i;
  192. return values[i].size;
  193. }
  194. }
  195. return 0;
  196. }
  197. /*
  198. * This routine could be implemented by taking the addresses
  199. * written to the GATT, and flushing them individually. However
  200. * currently it just flushes the whole table. Which is probably
  201. * more efficent, since agp_memory blocks can be a large number of
  202. * entries.
  203. */
  204. static void serverworks_tlbflush(struct agp_memory *temp)
  205. {
  206. writeb(1, serverworks_private.registers+SVWRKS_POSTFLUSH);
  207. while (readb(serverworks_private.registers+SVWRKS_POSTFLUSH) == 1)
  208. cpu_relax();
  209. writel(1, serverworks_private.registers+SVWRKS_DIRFLUSH);
  210. while(readl(serverworks_private.registers+SVWRKS_DIRFLUSH) == 1)
  211. cpu_relax();
  212. }
  213. static int serverworks_configure(void)
  214. {
  215. struct aper_size_info_lvl2 *current_size;
  216. u32 temp;
  217. u8 enable_reg;
  218. u16 cap_reg;
  219. current_size = A_SIZE_LVL2(agp_bridge->current_size);
  220. /* Get the memory mapped registers */
  221. pci_read_config_dword(agp_bridge->dev, serverworks_private.mm_addr_ofs, &temp);
  222. temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
  223. serverworks_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096);
  224. if (!serverworks_private.registers) {
  225. printk (KERN_ERR PFX "Unable to ioremap() memory.\n");
  226. return -ENOMEM;
  227. }
  228. writeb(0xA, serverworks_private.registers+SVWRKS_GART_CACHE);
  229. readb(serverworks_private.registers+SVWRKS_GART_CACHE); /* PCI Posting. */
  230. writel(agp_bridge->gatt_bus_addr, serverworks_private.registers+SVWRKS_GATTBASE);
  231. readl(serverworks_private.registers+SVWRKS_GATTBASE); /* PCI Posting. */
  232. cap_reg = readw(serverworks_private.registers+SVWRKS_COMMAND);
  233. cap_reg &= ~0x0007;
  234. cap_reg |= 0x4;
  235. writew(cap_reg, serverworks_private.registers+SVWRKS_COMMAND);
  236. readw(serverworks_private.registers+SVWRKS_COMMAND);
  237. pci_read_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, &enable_reg);
  238. enable_reg |= 0x1; /* Agp Enable bit */
  239. pci_write_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, enable_reg);
  240. serverworks_tlbflush(NULL);
  241. agp_bridge->capndx = pci_find_capability(serverworks_private.svrwrks_dev, PCI_CAP_ID_AGP);
  242. /* Fill in the mode register */
  243. pci_read_config_dword(serverworks_private.svrwrks_dev,
  244. agp_bridge->capndx+PCI_AGP_STATUS, &agp_bridge->mode);
  245. pci_read_config_byte(agp_bridge->dev, SVWRKS_CACHING, &enable_reg);
  246. enable_reg &= ~0x3;
  247. pci_write_config_byte(agp_bridge->dev, SVWRKS_CACHING, enable_reg);
  248. pci_read_config_byte(agp_bridge->dev, SVWRKS_FEATURE, &enable_reg);
  249. enable_reg |= (1<<6);
  250. pci_write_config_byte(agp_bridge->dev,SVWRKS_FEATURE, enable_reg);
  251. return 0;
  252. }
  253. static void serverworks_cleanup(void)
  254. {
  255. iounmap((void __iomem *) serverworks_private.registers);
  256. }
  257. static int serverworks_insert_memory(struct agp_memory *mem,
  258. off_t pg_start, int type)
  259. {
  260. int i, j, num_entries;
  261. unsigned long __iomem *cur_gatt;
  262. unsigned long addr;
  263. num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
  264. if (type != 0 || mem->type != 0) {
  265. return -EINVAL;
  266. }
  267. if ((pg_start + mem->page_count) > num_entries) {
  268. return -EINVAL;
  269. }
  270. j = pg_start;
  271. while (j < (pg_start + mem->page_count)) {
  272. addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
  273. cur_gatt = SVRWRKS_GET_GATT(addr);
  274. if (!PGE_EMPTY(agp_bridge, readl(cur_gatt+GET_GATT_OFF(addr))))
  275. return -EBUSY;
  276. j++;
  277. }
  278. if (mem->is_flushed == FALSE) {
  279. global_cache_flush();
  280. mem->is_flushed = TRUE;
  281. }
  282. for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
  283. addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
  284. cur_gatt = SVRWRKS_GET_GATT(addr);
  285. writel(agp_bridge->driver->mask_memory(agp_bridge, mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr));
  286. }
  287. serverworks_tlbflush(mem);
  288. return 0;
  289. }
  290. static int serverworks_remove_memory(struct agp_memory *mem, off_t pg_start,
  291. int type)
  292. {
  293. int i;
  294. unsigned long __iomem *cur_gatt;
  295. unsigned long addr;
  296. if (type != 0 || mem->type != 0) {
  297. return -EINVAL;
  298. }
  299. global_cache_flush();
  300. serverworks_tlbflush(mem);
  301. for (i = pg_start; i < (mem->page_count + pg_start); i++) {
  302. addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
  303. cur_gatt = SVRWRKS_GET_GATT(addr);
  304. writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
  305. }
  306. serverworks_tlbflush(mem);
  307. return 0;
  308. }
  309. static struct gatt_mask serverworks_masks[] =
  310. {
  311. {.mask = 1, .type = 0}
  312. };
  313. static struct aper_size_info_lvl2 serverworks_sizes[7] =
  314. {
  315. {2048, 524288, 0x80000000},
  316. {1024, 262144, 0xc0000000},
  317. {512, 131072, 0xe0000000},
  318. {256, 65536, 0xf0000000},
  319. {128, 32768, 0xf8000000},
  320. {64, 16384, 0xfc000000},
  321. {32, 8192, 0xfe000000}
  322. };
  323. static void serverworks_agp_enable(struct agp_bridge_data *bridge, u32 mode)
  324. {
  325. u32 command;
  326. pci_read_config_dword(serverworks_private.svrwrks_dev,
  327. bridge->capndx + PCI_AGP_STATUS,
  328. &command);
  329. command = agp_collect_device_status(bridge, mode, command);
  330. command &= ~0x10; /* disable FW */
  331. command &= ~0x08;
  332. command |= 0x100;
  333. pci_write_config_dword(serverworks_private.svrwrks_dev,
  334. bridge->capndx + PCI_AGP_COMMAND,
  335. command);
  336. agp_device_command(command, 0);
  337. }
  338. static struct agp_bridge_driver sworks_driver = {
  339. .owner = THIS_MODULE,
  340. .aperture_sizes = serverworks_sizes,
  341. .size_type = LVL2_APER_SIZE,
  342. .num_aperture_sizes = 7,
  343. .configure = serverworks_configure,
  344. .fetch_size = serverworks_fetch_size,
  345. .cleanup = serverworks_cleanup,
  346. .tlb_flush = serverworks_tlbflush,
  347. .mask_memory = agp_generic_mask_memory,
  348. .masks = serverworks_masks,
  349. .agp_enable = serverworks_agp_enable,
  350. .cache_flush = global_cache_flush,
  351. .create_gatt_table = serverworks_create_gatt_table,
  352. .free_gatt_table = serverworks_free_gatt_table,
  353. .insert_memory = serverworks_insert_memory,
  354. .remove_memory = serverworks_remove_memory,
  355. .alloc_by_type = agp_generic_alloc_by_type,
  356. .free_by_type = agp_generic_free_by_type,
  357. .agp_alloc_page = agp_generic_alloc_page,
  358. .agp_destroy_page = agp_generic_destroy_page,
  359. };
  360. static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
  361. const struct pci_device_id *ent)
  362. {
  363. struct agp_bridge_data *bridge;
  364. struct pci_dev *bridge_dev;
  365. u32 temp, temp2;
  366. u8 cap_ptr = 0;
  367. /* Everything is on func 1 here so we are hardcoding function one */
  368. bridge_dev = pci_find_slot((unsigned int)pdev->bus->number,
  369. PCI_DEVFN(0, 1));
  370. if (!bridge_dev) {
  371. printk(KERN_INFO PFX "Detected a Serverworks chipset "
  372. "but could not find the secondary device.\n");
  373. return -ENODEV;
  374. }
  375. cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
  376. switch (pdev->device) {
  377. case 0x0006:
  378. /* ServerWorks CNB20HE
  379. Fail silently.*/
  380. printk (KERN_ERR PFX "Detected ServerWorks CNB20HE chipset: No AGP present.\n");
  381. return -ENODEV;
  382. case PCI_DEVICE_ID_SERVERWORKS_HE:
  383. case PCI_DEVICE_ID_SERVERWORKS_LE:
  384. case 0x0007:
  385. break;
  386. default:
  387. if (cap_ptr)
  388. printk(KERN_ERR PFX "Unsupported Serverworks chipset "
  389. "(device id: %04x)\n", pdev->device);
  390. return -ENODEV;
  391. }
  392. serverworks_private.svrwrks_dev = bridge_dev;
  393. serverworks_private.gart_addr_ofs = 0x10;
  394. pci_read_config_dword(pdev, SVWRKS_APSIZE, &temp);
  395. if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
  396. pci_read_config_dword(pdev, SVWRKS_APSIZE + 4, &temp2);
  397. if (temp2 != 0) {
  398. printk(KERN_INFO PFX "Detected 64 bit aperture address, "
  399. "but top bits are not zero. Disabling agp\n");
  400. return -ENODEV;
  401. }
  402. serverworks_private.mm_addr_ofs = 0x18;
  403. } else
  404. serverworks_private.mm_addr_ofs = 0x14;
  405. pci_read_config_dword(pdev, serverworks_private.mm_addr_ofs, &temp);
  406. if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
  407. pci_read_config_dword(pdev,
  408. serverworks_private.mm_addr_ofs + 4, &temp2);
  409. if (temp2 != 0) {
  410. printk(KERN_INFO PFX "Detected 64 bit MMIO address, "
  411. "but top bits are not zero. Disabling agp\n");
  412. return -ENODEV;
  413. }
  414. }
  415. bridge = agp_alloc_bridge();
  416. if (!bridge)
  417. return -ENOMEM;
  418. bridge->driver = &sworks_driver;
  419. bridge->dev_private_data = &serverworks_private,
  420. bridge->dev = pdev;
  421. pci_set_drvdata(pdev, bridge);
  422. return agp_add_bridge(bridge);
  423. }
  424. static void __devexit agp_serverworks_remove(struct pci_dev *pdev)
  425. {
  426. struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
  427. agp_remove_bridge(bridge);
  428. agp_put_bridge(bridge);
  429. }
  430. static struct pci_device_id agp_serverworks_pci_table[] = {
  431. {
  432. .class = (PCI_CLASS_BRIDGE_HOST << 8),
  433. .class_mask = ~0,
  434. .vendor = PCI_VENDOR_ID_SERVERWORKS,
  435. .device = PCI_ANY_ID,
  436. .subvendor = PCI_ANY_ID,
  437. .subdevice = PCI_ANY_ID,
  438. },
  439. { }
  440. };
  441. MODULE_DEVICE_TABLE(pci, agp_serverworks_pci_table);
  442. static struct pci_driver agp_serverworks_pci_driver = {
  443. .name = "agpgart-serverworks",
  444. .id_table = agp_serverworks_pci_table,
  445. .probe = agp_serverworks_probe,
  446. .remove = agp_serverworks_remove,
  447. };
  448. static int __init agp_serverworks_init(void)
  449. {
  450. if (agp_off)
  451. return -EINVAL;
  452. return pci_register_driver(&agp_serverworks_pci_driver);
  453. }
  454. static void __exit agp_serverworks_cleanup(void)
  455. {
  456. pci_unregister_driver(&agp_serverworks_pci_driver);
  457. }
  458. module_init(agp_serverworks_init);
  459. module_exit(agp_serverworks_cleanup);
  460. MODULE_LICENSE("GPL and additional rights");