mspec.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442
  1. /*
  2. * Copyright (C) 2001-2006 Silicon Graphics, Inc. All rights
  3. * reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of version 2 of the GNU General Public License
  7. * as published by the Free Software Foundation.
  8. */
  9. /*
  10. * SN Platform Special Memory (mspec) Support
  11. *
  12. * This driver exports the SN special memory (mspec) facility to user
  13. * processes.
  14. * There are three types of memory made available thru this driver:
  15. * fetchops, uncached and cached.
  16. *
  17. * Fetchops are atomic memory operations that are implemented in the
  18. * memory controller on SGI SN hardware.
  19. *
  20. * Uncached are used for memory write combining feature of the ia64
  21. * cpu.
  22. *
  23. * Cached are used for areas of memory that are used as cached addresses
  24. * on our partition and used as uncached addresses from other partitions.
  25. * Due to a design constraint of the SN2 Shub, you can not have processors
  26. * on the same FSB perform both a cached and uncached reference to the
  27. * same cache line. These special memory cached regions prevent the
  28. * kernel from ever dropping in a TLB entry and therefore prevent the
  29. * processor from ever speculating a cache line from this page.
  30. */
  31. #include <linux/types.h>
  32. #include <linux/kernel.h>
  33. #include <linux/module.h>
  34. #include <linux/init.h>
  35. #include <linux/errno.h>
  36. #include <linux/miscdevice.h>
  37. #include <linux/spinlock.h>
  38. #include <linux/mm.h>
  39. #include <linux/fs.h>
  40. #include <linux/vmalloc.h>
  41. #include <linux/string.h>
  42. #include <linux/slab.h>
  43. #include <linux/numa.h>
  44. #include <asm/page.h>
  45. #include <asm/pgtable.h>
  46. #include <linux/atomic.h>
  47. #include <asm/tlbflush.h>
  48. #include <asm/uncached.h>
  49. #include <asm/sn/addrs.h>
  50. #include <asm/sn/arch.h>
  51. #include <asm/sn/mspec.h>
  52. #include <asm/sn/sn_cpuid.h>
  53. #include <asm/sn/io.h>
  54. #include <asm/sn/bte.h>
  55. #include <asm/sn/shubio.h>
  56. #define FETCHOP_ID "SGI Fetchop,"
  57. #define CACHED_ID "Cached,"
  58. #define UNCACHED_ID "Uncached"
  59. #define REVISION "4.0"
  60. #define MSPEC_BASENAME "mspec"
  61. /*
  62. * Page types allocated by the device.
  63. */
  64. enum mspec_page_type {
  65. MSPEC_FETCHOP = 1,
  66. MSPEC_CACHED,
  67. MSPEC_UNCACHED
  68. };
  69. #ifdef CONFIG_SGI_SN
  70. static int is_sn2;
  71. #else
  72. #define is_sn2 0
  73. #endif
  74. /*
  75. * One of these structures is allocated when an mspec region is mmaped. The
  76. * structure is pointed to by the vma->vm_private_data field in the vma struct.
  77. * This structure is used to record the addresses of the mspec pages.
  78. * This structure is shared by all vma's that are split off from the
  79. * original vma when split_vma()'s are done.
  80. *
  81. * The refcnt is incremented atomically because mm->mmap_sem does not
  82. * protect in fork case where multiple tasks share the vma_data.
  83. */
  84. struct vma_data {
  85. atomic_t refcnt; /* Number of vmas sharing the data. */
  86. spinlock_t lock; /* Serialize access to this structure. */
  87. int count; /* Number of pages allocated. */
  88. enum mspec_page_type type; /* Type of pages allocated. */
  89. unsigned long vm_start; /* Original (unsplit) base. */
  90. unsigned long vm_end; /* Original (unsplit) end. */
  91. unsigned long maddr[0]; /* Array of MSPEC addresses. */
  92. };
  93. /* used on shub2 to clear FOP cache in the HUB */
  94. static unsigned long scratch_page[MAX_NUMNODES];
  95. #define SH2_AMO_CACHE_ENTRIES 4
  96. static inline int
  97. mspec_zero_block(unsigned long addr, int len)
  98. {
  99. int status;
  100. if (is_sn2) {
  101. if (is_shub2()) {
  102. int nid;
  103. void *p;
  104. int i;
  105. nid = nasid_to_cnodeid(get_node_number(__pa(addr)));
  106. p = (void *)TO_AMO(scratch_page[nid]);
  107. for (i=0; i < SH2_AMO_CACHE_ENTRIES; i++) {
  108. FETCHOP_LOAD_OP(p, FETCHOP_LOAD);
  109. p += FETCHOP_VAR_SIZE;
  110. }
  111. }
  112. status = bte_copy(0, addr & ~__IA64_UNCACHED_OFFSET, len,
  113. BTE_WACQUIRE | BTE_ZERO_FILL, NULL);
  114. } else {
  115. memset((char *) addr, 0, len);
  116. status = 0;
  117. }
  118. return status;
  119. }
  120. /*
  121. * mspec_open
  122. *
  123. * Called when a device mapping is created by a means other than mmap
  124. * (via fork, munmap, etc.). Increments the reference count on the
  125. * underlying mspec data so it is not freed prematurely.
  126. */
  127. static void
  128. mspec_open(struct vm_area_struct *vma)
  129. {
  130. struct vma_data *vdata;
  131. vdata = vma->vm_private_data;
  132. atomic_inc(&vdata->refcnt);
  133. }
  134. /*
  135. * mspec_close
  136. *
  137. * Called when unmapping a device mapping. Frees all mspec pages
  138. * belonging to all the vma's sharing this vma_data structure.
  139. */
  140. static void
  141. mspec_close(struct vm_area_struct *vma)
  142. {
  143. struct vma_data *vdata;
  144. int index, last_index;
  145. unsigned long my_page;
  146. vdata = vma->vm_private_data;
  147. if (!atomic_dec_and_test(&vdata->refcnt))
  148. return;
  149. last_index = (vdata->vm_end - vdata->vm_start) >> PAGE_SHIFT;
  150. for (index = 0; index < last_index; index++) {
  151. if (vdata->maddr[index] == 0)
  152. continue;
  153. /*
  154. * Clear the page before sticking it back
  155. * into the pool.
  156. */
  157. my_page = vdata->maddr[index];
  158. vdata->maddr[index] = 0;
  159. if (!mspec_zero_block(my_page, PAGE_SIZE))
  160. uncached_free_page(my_page, 1);
  161. else
  162. printk(KERN_WARNING "mspec_close(): "
  163. "failed to zero page %ld\n", my_page);
  164. }
  165. kvfree(vdata);
  166. }
  167. /*
  168. * mspec_fault
  169. *
  170. * Creates a mspec page and maps it to user space.
  171. */
  172. static int
  173. mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  174. {
  175. unsigned long paddr, maddr;
  176. unsigned long pfn;
  177. pgoff_t index = vmf->pgoff;
  178. struct vma_data *vdata = vma->vm_private_data;
  179. maddr = (volatile unsigned long) vdata->maddr[index];
  180. if (maddr == 0) {
  181. maddr = uncached_alloc_page(numa_node_id(), 1);
  182. if (maddr == 0)
  183. return VM_FAULT_OOM;
  184. spin_lock(&vdata->lock);
  185. if (vdata->maddr[index] == 0) {
  186. vdata->count++;
  187. vdata->maddr[index] = maddr;
  188. } else {
  189. uncached_free_page(maddr, 1);
  190. maddr = vdata->maddr[index];
  191. }
  192. spin_unlock(&vdata->lock);
  193. }
  194. if (vdata->type == MSPEC_FETCHOP)
  195. paddr = TO_AMO(maddr);
  196. else
  197. paddr = maddr & ~__IA64_UNCACHED_OFFSET;
  198. pfn = paddr >> PAGE_SHIFT;
  199. /*
  200. * vm_insert_pfn can fail with -EBUSY, but in that case it will
  201. * be because another thread has installed the pte first, so it
  202. * is no problem.
  203. */
  204. vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
  205. return VM_FAULT_NOPAGE;
  206. }
  207. static const struct vm_operations_struct mspec_vm_ops = {
  208. .open = mspec_open,
  209. .close = mspec_close,
  210. .fault = mspec_fault,
  211. };
  212. /*
  213. * mspec_mmap
  214. *
  215. * Called when mmapping the device. Initializes the vma with a fault handler
  216. * and private data structure necessary to allocate, track, and free the
  217. * underlying pages.
  218. */
  219. static int
  220. mspec_mmap(struct file *file, struct vm_area_struct *vma,
  221. enum mspec_page_type type)
  222. {
  223. struct vma_data *vdata;
  224. int pages, vdata_size;
  225. if (vma->vm_pgoff != 0)
  226. return -EINVAL;
  227. if ((vma->vm_flags & VM_SHARED) == 0)
  228. return -EINVAL;
  229. if ((vma->vm_flags & VM_WRITE) == 0)
  230. return -EPERM;
  231. pages = vma_pages(vma);
  232. vdata_size = sizeof(struct vma_data) + pages * sizeof(long);
  233. if (vdata_size <= PAGE_SIZE)
  234. vdata = kzalloc(vdata_size, GFP_KERNEL);
  235. else
  236. vdata = vzalloc(vdata_size);
  237. if (!vdata)
  238. return -ENOMEM;
  239. vdata->vm_start = vma->vm_start;
  240. vdata->vm_end = vma->vm_end;
  241. vdata->type = type;
  242. spin_lock_init(&vdata->lock);
  243. atomic_set(&vdata->refcnt, 1);
  244. vma->vm_private_data = vdata;
  245. vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
  246. if (vdata->type == MSPEC_FETCHOP || vdata->type == MSPEC_UNCACHED)
  247. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  248. vma->vm_ops = &mspec_vm_ops;
  249. return 0;
  250. }
  251. static int
  252. fetchop_mmap(struct file *file, struct vm_area_struct *vma)
  253. {
  254. return mspec_mmap(file, vma, MSPEC_FETCHOP);
  255. }
  256. static int
  257. cached_mmap(struct file *file, struct vm_area_struct *vma)
  258. {
  259. return mspec_mmap(file, vma, MSPEC_CACHED);
  260. }
  261. static int
  262. uncached_mmap(struct file *file, struct vm_area_struct *vma)
  263. {
  264. return mspec_mmap(file, vma, MSPEC_UNCACHED);
  265. }
  266. static const struct file_operations fetchop_fops = {
  267. .owner = THIS_MODULE,
  268. .mmap = fetchop_mmap,
  269. .llseek = noop_llseek,
  270. };
  271. static struct miscdevice fetchop_miscdev = {
  272. .minor = MISC_DYNAMIC_MINOR,
  273. .name = "sgi_fetchop",
  274. .fops = &fetchop_fops
  275. };
  276. static const struct file_operations cached_fops = {
  277. .owner = THIS_MODULE,
  278. .mmap = cached_mmap,
  279. .llseek = noop_llseek,
  280. };
  281. static struct miscdevice cached_miscdev = {
  282. .minor = MISC_DYNAMIC_MINOR,
  283. .name = "mspec_cached",
  284. .fops = &cached_fops
  285. };
  286. static const struct file_operations uncached_fops = {
  287. .owner = THIS_MODULE,
  288. .mmap = uncached_mmap,
  289. .llseek = noop_llseek,
  290. };
  291. static struct miscdevice uncached_miscdev = {
  292. .minor = MISC_DYNAMIC_MINOR,
  293. .name = "mspec_uncached",
  294. .fops = &uncached_fops
  295. };
  296. /*
  297. * mspec_init
  298. *
  299. * Called at boot time to initialize the mspec facility.
  300. */
  301. static int __init
  302. mspec_init(void)
  303. {
  304. int ret;
  305. int nid;
  306. /*
  307. * The fetchop device only works on SN2 hardware, uncached and cached
  308. * memory drivers should both be valid on all ia64 hardware
  309. */
  310. #ifdef CONFIG_SGI_SN
  311. if (ia64_platform_is("sn2")) {
  312. is_sn2 = 1;
  313. if (is_shub2()) {
  314. ret = -ENOMEM;
  315. for_each_node_state(nid, N_ONLINE) {
  316. int actual_nid;
  317. int nasid;
  318. unsigned long phys;
  319. scratch_page[nid] = uncached_alloc_page(nid, 1);
  320. if (scratch_page[nid] == 0)
  321. goto free_scratch_pages;
  322. phys = __pa(scratch_page[nid]);
  323. nasid = get_node_number(phys);
  324. actual_nid = nasid_to_cnodeid(nasid);
  325. if (actual_nid != nid)
  326. goto free_scratch_pages;
  327. }
  328. }
  329. ret = misc_register(&fetchop_miscdev);
  330. if (ret) {
  331. printk(KERN_ERR
  332. "%s: failed to register device %i\n",
  333. FETCHOP_ID, ret);
  334. goto free_scratch_pages;
  335. }
  336. }
  337. #endif
  338. ret = misc_register(&cached_miscdev);
  339. if (ret) {
  340. printk(KERN_ERR "%s: failed to register device %i\n",
  341. CACHED_ID, ret);
  342. if (is_sn2)
  343. misc_deregister(&fetchop_miscdev);
  344. goto free_scratch_pages;
  345. }
  346. ret = misc_register(&uncached_miscdev);
  347. if (ret) {
  348. printk(KERN_ERR "%s: failed to register device %i\n",
  349. UNCACHED_ID, ret);
  350. misc_deregister(&cached_miscdev);
  351. if (is_sn2)
  352. misc_deregister(&fetchop_miscdev);
  353. goto free_scratch_pages;
  354. }
  355. printk(KERN_INFO "%s %s initialized devices: %s %s %s\n",
  356. MSPEC_BASENAME, REVISION, is_sn2 ? FETCHOP_ID : "",
  357. CACHED_ID, UNCACHED_ID);
  358. return 0;
  359. free_scratch_pages:
  360. for_each_node(nid) {
  361. if (scratch_page[nid] != 0)
  362. uncached_free_page(scratch_page[nid], 1);
  363. }
  364. return ret;
  365. }
  366. static void __exit
  367. mspec_exit(void)
  368. {
  369. int nid;
  370. misc_deregister(&uncached_miscdev);
  371. misc_deregister(&cached_miscdev);
  372. if (is_sn2) {
  373. misc_deregister(&fetchop_miscdev);
  374. for_each_node(nid) {
  375. if (scratch_page[nid] != 0)
  376. uncached_free_page(scratch_page[nid], 1);
  377. }
  378. }
  379. }
  380. module_init(mspec_init);
  381. module_exit(mspec_exit);
  382. MODULE_AUTHOR("Silicon Graphics, Inc. <linux-altix@sgi.com>");
  383. MODULE_DESCRIPTION("Driver for SGI SN special memory operations");
  384. MODULE_LICENSE("GPL");