mspec.c 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427
  1. /*
  2. * Copyright (C) 2001-2006 Silicon Graphics, Inc. All rights
  3. * reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of version 2 of the GNU General Public License
  7. * as published by the Free Software Foundation.
  8. */
  9. /*
  10. * SN Platform Special Memory (mspec) Support
  11. *
  12. * This driver exports the SN special memory (mspec) facility to user
  13. * processes.
  14. * There are three types of memory made available thru this driver:
  15. * fetchops, uncached and cached.
  16. *
  17. * Fetchops are atomic memory operations that are implemented in the
  18. * memory controller on SGI SN hardware.
  19. *
  20. * Uncached are used for memory write combining feature of the ia64
  21. * cpu.
  22. *
  23. * Cached are used for areas of memory that are used as cached addresses
  24. * on our partition and used as uncached addresses from other partitions.
  25. * Due to a design constraint of the SN2 Shub, you can not have processors
  26. * on the same FSB perform both a cached and uncached reference to the
  27. * same cache line. These special memory cached regions prevent the
  28. * kernel from ever dropping in a TLB entry and therefore prevent the
  29. * processor from ever speculating a cache line from this page.
  30. */
  31. #include <linux/types.h>
  32. #include <linux/kernel.h>
  33. #include <linux/module.h>
  34. #include <linux/init.h>
  35. #include <linux/errno.h>
  36. #include <linux/miscdevice.h>
  37. #include <linux/spinlock.h>
  38. #include <linux/mm.h>
  39. #include <linux/fs.h>
  40. #include <linux/vmalloc.h>
  41. #include <linux/string.h>
  42. #include <linux/slab.h>
  43. #include <linux/numa.h>
  44. #include <asm/page.h>
  45. #include <asm/system.h>
  46. #include <asm/pgtable.h>
  47. #include <asm/atomic.h>
  48. #include <asm/tlbflush.h>
  49. #include <asm/uncached.h>
  50. #include <asm/sn/addrs.h>
  51. #include <asm/sn/arch.h>
  52. #include <asm/sn/mspec.h>
  53. #include <asm/sn/sn_cpuid.h>
  54. #include <asm/sn/io.h>
  55. #include <asm/sn/bte.h>
  56. #include <asm/sn/shubio.h>
  57. #define FETCHOP_ID "SGI Fetchop,"
  58. #define CACHED_ID "Cached,"
  59. #define UNCACHED_ID "Uncached"
  60. #define REVISION "4.0"
  61. #define MSPEC_BASENAME "mspec"
  62. /*
  63. * Page types allocated by the device.
  64. */
  65. enum {
  66. MSPEC_FETCHOP = 1,
  67. MSPEC_CACHED,
  68. MSPEC_UNCACHED
  69. };
  70. #ifdef CONFIG_SGI_SN
  71. static int is_sn2;
  72. #else
  73. #define is_sn2 0
  74. #endif
  75. /*
  76. * One of these structures is allocated when an mspec region is mmaped. The
  77. * structure is pointed to by the vma->vm_private_data field in the vma struct.
  78. * This structure is used to record the addresses of the mspec pages.
  79. */
  80. struct vma_data {
  81. atomic_t refcnt; /* Number of vmas sharing the data. */
  82. spinlock_t lock; /* Serialize access to the vma. */
  83. int count; /* Number of pages allocated. */
  84. int type; /* Type of pages allocated. */
  85. unsigned long maddr[0]; /* Array of MSPEC addresses. */
  86. };
  87. /* used on shub2 to clear FOP cache in the HUB */
  88. static unsigned long scratch_page[MAX_NUMNODES];
  89. #define SH2_AMO_CACHE_ENTRIES 4
  90. static inline int
  91. mspec_zero_block(unsigned long addr, int len)
  92. {
  93. int status;
  94. if (is_sn2) {
  95. if (is_shub2()) {
  96. int nid;
  97. void *p;
  98. int i;
  99. nid = nasid_to_cnodeid(get_node_number(__pa(addr)));
  100. p = (void *)TO_AMO(scratch_page[nid]);
  101. for (i=0; i < SH2_AMO_CACHE_ENTRIES; i++) {
  102. FETCHOP_LOAD_OP(p, FETCHOP_LOAD);
  103. p += FETCHOP_VAR_SIZE;
  104. }
  105. }
  106. status = bte_copy(0, addr & ~__IA64_UNCACHED_OFFSET, len,
  107. BTE_WACQUIRE | BTE_ZERO_FILL, NULL);
  108. } else {
  109. memset((char *) addr, 0, len);
  110. status = 0;
  111. }
  112. return status;
  113. }
  114. /*
  115. * mspec_open
  116. *
  117. * Called when a device mapping is created by a means other than mmap
  118. * (via fork, etc.). Increments the reference count on the underlying
  119. * mspec data so it is not freed prematurely.
  120. */
  121. static void
  122. mspec_open(struct vm_area_struct *vma)
  123. {
  124. struct vma_data *vdata;
  125. vdata = vma->vm_private_data;
  126. atomic_inc(&vdata->refcnt);
  127. }
  128. /*
  129. * mspec_close
  130. *
  131. * Called when unmapping a device mapping. Frees all mspec pages
  132. * belonging to the vma.
  133. */
  134. static void
  135. mspec_close(struct vm_area_struct *vma)
  136. {
  137. struct vma_data *vdata;
  138. int i, pages, result, vdata_size;
  139. vdata = vma->vm_private_data;
  140. if (!atomic_dec_and_test(&vdata->refcnt))
  141. return;
  142. pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
  143. vdata_size = sizeof(struct vma_data) + pages * sizeof(long);
  144. for (i = 0; i < pages; i++) {
  145. if (vdata->maddr[i] == 0)
  146. continue;
  147. /*
  148. * Clear the page before sticking it back
  149. * into the pool.
  150. */
  151. result = mspec_zero_block(vdata->maddr[i], PAGE_SIZE);
  152. if (!result)
  153. uncached_free_page(vdata->maddr[i]);
  154. else
  155. printk(KERN_WARNING "mspec_close(): "
  156. "failed to zero page %i\n",
  157. result);
  158. }
  159. if (vdata_size <= PAGE_SIZE)
  160. kfree(vdata);
  161. else
  162. vfree(vdata);
  163. }
  164. /*
  165. * mspec_nopfn
  166. *
  167. * Creates a mspec page and maps it to user space.
  168. */
  169. static unsigned long
  170. mspec_nopfn(struct vm_area_struct *vma, unsigned long address)
  171. {
  172. unsigned long paddr, maddr;
  173. unsigned long pfn;
  174. int index;
  175. struct vma_data *vdata = vma->vm_private_data;
  176. index = (address - vma->vm_start) >> PAGE_SHIFT;
  177. maddr = (volatile unsigned long) vdata->maddr[index];
  178. if (maddr == 0) {
  179. maddr = uncached_alloc_page(numa_node_id());
  180. if (maddr == 0)
  181. return NOPFN_OOM;
  182. spin_lock(&vdata->lock);
  183. if (vdata->maddr[index] == 0) {
  184. vdata->count++;
  185. vdata->maddr[index] = maddr;
  186. } else {
  187. uncached_free_page(maddr);
  188. maddr = vdata->maddr[index];
  189. }
  190. spin_unlock(&vdata->lock);
  191. }
  192. if (vdata->type == MSPEC_FETCHOP)
  193. paddr = TO_AMO(maddr);
  194. else
  195. paddr = maddr & ~__IA64_UNCACHED_OFFSET;
  196. pfn = paddr >> PAGE_SHIFT;
  197. return pfn;
  198. }
  199. static struct vm_operations_struct mspec_vm_ops = {
  200. .open = mspec_open,
  201. .close = mspec_close,
  202. .nopfn = mspec_nopfn
  203. };
  204. /*
  205. * mspec_mmap
  206. *
  207. * Called when mmaping the device. Initializes the vma with a fault handler
  208. * and private data structure necessary to allocate, track, and free the
  209. * underlying pages.
  210. */
  211. static int
  212. mspec_mmap(struct file *file, struct vm_area_struct *vma, int type)
  213. {
  214. struct vma_data *vdata;
  215. int pages, vdata_size;
  216. if (vma->vm_pgoff != 0)
  217. return -EINVAL;
  218. if ((vma->vm_flags & VM_SHARED) == 0)
  219. return -EINVAL;
  220. if ((vma->vm_flags & VM_WRITE) == 0)
  221. return -EPERM;
  222. pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
  223. vdata_size = sizeof(struct vma_data) + pages * sizeof(long);
  224. if (vdata_size <= PAGE_SIZE)
  225. vdata = kmalloc(vdata_size, GFP_KERNEL);
  226. else
  227. vdata = vmalloc(vdata_size);
  228. if (!vdata)
  229. return -ENOMEM;
  230. memset(vdata, 0, vdata_size);
  231. vdata->type = type;
  232. spin_lock_init(&vdata->lock);
  233. vdata->refcnt = ATOMIC_INIT(1);
  234. vma->vm_private_data = vdata;
  235. vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP);
  236. if (vdata->type == MSPEC_FETCHOP || vdata->type == MSPEC_UNCACHED)
  237. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  238. vma->vm_ops = &mspec_vm_ops;
  239. return 0;
  240. }
  241. static int
  242. fetchop_mmap(struct file *file, struct vm_area_struct *vma)
  243. {
  244. return mspec_mmap(file, vma, MSPEC_FETCHOP);
  245. }
  246. static int
  247. cached_mmap(struct file *file, struct vm_area_struct *vma)
  248. {
  249. return mspec_mmap(file, vma, MSPEC_CACHED);
  250. }
  251. static int
  252. uncached_mmap(struct file *file, struct vm_area_struct *vma)
  253. {
  254. return mspec_mmap(file, vma, MSPEC_UNCACHED);
  255. }
  256. static const struct file_operations fetchop_fops = {
  257. .owner = THIS_MODULE,
  258. .mmap = fetchop_mmap
  259. };
  260. static struct miscdevice fetchop_miscdev = {
  261. .minor = MISC_DYNAMIC_MINOR,
  262. .name = "sgi_fetchop",
  263. .fops = &fetchop_fops
  264. };
  265. static const struct file_operations cached_fops = {
  266. .owner = THIS_MODULE,
  267. .mmap = cached_mmap
  268. };
  269. static struct miscdevice cached_miscdev = {
  270. .minor = MISC_DYNAMIC_MINOR,
  271. .name = "mspec_cached",
  272. .fops = &cached_fops
  273. };
  274. static const struct file_operations uncached_fops = {
  275. .owner = THIS_MODULE,
  276. .mmap = uncached_mmap
  277. };
  278. static struct miscdevice uncached_miscdev = {
  279. .minor = MISC_DYNAMIC_MINOR,
  280. .name = "mspec_uncached",
  281. .fops = &uncached_fops
  282. };
  283. /*
  284. * mspec_init
  285. *
  286. * Called at boot time to initialize the mspec facility.
  287. */
  288. static int __init
  289. mspec_init(void)
  290. {
  291. int ret;
  292. int nid;
  293. /*
  294. * The fetchop device only works on SN2 hardware, uncached and cached
  295. * memory drivers should both be valid on all ia64 hardware
  296. */
  297. #ifdef CONFIG_SGI_SN
  298. if (ia64_platform_is("sn2")) {
  299. is_sn2 = 1;
  300. if (is_shub2()) {
  301. ret = -ENOMEM;
  302. for_each_online_node(nid) {
  303. int actual_nid;
  304. int nasid;
  305. unsigned long phys;
  306. scratch_page[nid] = uncached_alloc_page(nid);
  307. if (scratch_page[nid] == 0)
  308. goto free_scratch_pages;
  309. phys = __pa(scratch_page[nid]);
  310. nasid = get_node_number(phys);
  311. actual_nid = nasid_to_cnodeid(nasid);
  312. if (actual_nid != nid)
  313. goto free_scratch_pages;
  314. }
  315. }
  316. ret = misc_register(&fetchop_miscdev);
  317. if (ret) {
  318. printk(KERN_ERR
  319. "%s: failed to register device %i\n",
  320. FETCHOP_ID, ret);
  321. goto free_scratch_pages;
  322. }
  323. }
  324. #endif
  325. ret = misc_register(&cached_miscdev);
  326. if (ret) {
  327. printk(KERN_ERR "%s: failed to register device %i\n",
  328. CACHED_ID, ret);
  329. if (is_sn2)
  330. misc_deregister(&fetchop_miscdev);
  331. goto free_scratch_pages;
  332. }
  333. ret = misc_register(&uncached_miscdev);
  334. if (ret) {
  335. printk(KERN_ERR "%s: failed to register device %i\n",
  336. UNCACHED_ID, ret);
  337. misc_deregister(&cached_miscdev);
  338. if (is_sn2)
  339. misc_deregister(&fetchop_miscdev);
  340. goto free_scratch_pages;
  341. }
  342. printk(KERN_INFO "%s %s initialized devices: %s %s %s\n",
  343. MSPEC_BASENAME, REVISION, is_sn2 ? FETCHOP_ID : "",
  344. CACHED_ID, UNCACHED_ID);
  345. return 0;
  346. free_scratch_pages:
  347. for_each_node(nid) {
  348. if (scratch_page[nid] != 0)
  349. uncached_free_page(scratch_page[nid]);
  350. }
  351. return ret;
  352. }
  353. static void __exit
  354. mspec_exit(void)
  355. {
  356. int nid;
  357. misc_deregister(&uncached_miscdev);
  358. misc_deregister(&cached_miscdev);
  359. if (is_sn2) {
  360. misc_deregister(&fetchop_miscdev);
  361. for_each_node(nid) {
  362. if (scratch_page[nid] != 0)
  363. uncached_free_page(scratch_page[nid]);
  364. }
  365. }
  366. }
  367. module_init(mspec_init);
  368. module_exit(mspec_exit);
  369. MODULE_AUTHOR("Silicon Graphics, Inc. <linux-altix@sgi.com>");
  370. MODULE_DESCRIPTION("Driver for SGI SN special memory operations");
  371. MODULE_LICENSE("GPL");