tlb.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537
  1. /*
  2. * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3. * Licensed under the GPL
  4. */
  5. #include <linux/mm.h>
  6. #include <linux/module.h>
  7. #include <linux/sched.h>
  8. #include <asm/pgtable.h>
  9. #include <asm/tlbflush.h>
  10. #include "as-layout.h"
  11. #include "mem_user.h"
  12. #include "os.h"
  13. #include "skas.h"
  14. struct host_vm_change {
  15. struct host_vm_op {
  16. enum { NONE, MMAP, MUNMAP, MPROTECT } type;
  17. union {
  18. struct {
  19. unsigned long addr;
  20. unsigned long len;
  21. unsigned int prot;
  22. int fd;
  23. __u64 offset;
  24. } mmap;
  25. struct {
  26. unsigned long addr;
  27. unsigned long len;
  28. } munmap;
  29. struct {
  30. unsigned long addr;
  31. unsigned long len;
  32. unsigned int prot;
  33. } mprotect;
  34. } u;
  35. } ops[1];
  36. int index;
  37. struct mm_id *id;
  38. void *data;
  39. int force;
  40. };
  41. #define INIT_HVC(mm, force) \
  42. ((struct host_vm_change) \
  43. { .ops = { { .type = NONE } }, \
  44. .id = &mm->context.id, \
  45. .data = NULL, \
  46. .index = 0, \
  47. .force = force })
  48. static int do_ops(struct host_vm_change *hvc, int end,
  49. int finished)
  50. {
  51. struct host_vm_op *op;
  52. int i, ret = 0;
  53. for (i = 0; i < end && !ret; i++) {
  54. op = &hvc->ops[i];
  55. switch (op->type) {
  56. case MMAP:
  57. ret = map(hvc->id, op->u.mmap.addr, op->u.mmap.len,
  58. op->u.mmap.prot, op->u.mmap.fd,
  59. op->u.mmap.offset, finished, &hvc->data);
  60. break;
  61. case MUNMAP:
  62. ret = unmap(hvc->id, op->u.munmap.addr,
  63. op->u.munmap.len, finished, &hvc->data);
  64. break;
  65. case MPROTECT:
  66. ret = protect(hvc->id, op->u.mprotect.addr,
  67. op->u.mprotect.len, op->u.mprotect.prot,
  68. finished, &hvc->data);
  69. break;
  70. default:
  71. printk(KERN_ERR "Unknown op type %d in do_ops\n",
  72. op->type);
  73. break;
  74. }
  75. }
  76. return ret;
  77. }
  78. static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
  79. unsigned int prot, struct host_vm_change *hvc)
  80. {
  81. __u64 offset;
  82. struct host_vm_op *last;
  83. int fd, ret = 0;
  84. fd = phys_mapping(phys, &offset);
  85. if (hvc->index != 0) {
  86. last = &hvc->ops[hvc->index - 1];
  87. if ((last->type == MMAP) &&
  88. (last->u.mmap.addr + last->u.mmap.len == virt) &&
  89. (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
  90. (last->u.mmap.offset + last->u.mmap.len == offset)) {
  91. last->u.mmap.len += len;
  92. return 0;
  93. }
  94. }
  95. if (hvc->index == ARRAY_SIZE(hvc->ops)) {
  96. ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
  97. hvc->index = 0;
  98. }
  99. hvc->ops[hvc->index++] = ((struct host_vm_op)
  100. { .type = MMAP,
  101. .u = { .mmap = { .addr = virt,
  102. .len = len,
  103. .prot = prot,
  104. .fd = fd,
  105. .offset = offset }
  106. } });
  107. return ret;
  108. }
  109. static int add_munmap(unsigned long addr, unsigned long len,
  110. struct host_vm_change *hvc)
  111. {
  112. struct host_vm_op *last;
  113. int ret = 0;
  114. if (hvc->index != 0) {
  115. last = &hvc->ops[hvc->index - 1];
  116. if ((last->type == MUNMAP) &&
  117. (last->u.munmap.addr + last->u.mmap.len == addr)) {
  118. last->u.munmap.len += len;
  119. return 0;
  120. }
  121. }
  122. if (hvc->index == ARRAY_SIZE(hvc->ops)) {
  123. ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
  124. hvc->index = 0;
  125. }
  126. hvc->ops[hvc->index++] = ((struct host_vm_op)
  127. { .type = MUNMAP,
  128. .u = { .munmap = { .addr = addr,
  129. .len = len } } });
  130. return ret;
  131. }
  132. static int add_mprotect(unsigned long addr, unsigned long len,
  133. unsigned int prot, struct host_vm_change *hvc)
  134. {
  135. struct host_vm_op *last;
  136. int ret = 0;
  137. if (hvc->index != 0) {
  138. last = &hvc->ops[hvc->index - 1];
  139. if ((last->type == MPROTECT) &&
  140. (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
  141. (last->u.mprotect.prot == prot)) {
  142. last->u.mprotect.len += len;
  143. return 0;
  144. }
  145. }
  146. if (hvc->index == ARRAY_SIZE(hvc->ops)) {
  147. ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
  148. hvc->index = 0;
  149. }
  150. hvc->ops[hvc->index++] = ((struct host_vm_op)
  151. { .type = MPROTECT,
  152. .u = { .mprotect = { .addr = addr,
  153. .len = len,
  154. .prot = prot } } });
  155. return ret;
  156. }
  157. #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
  158. static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
  159. unsigned long end,
  160. struct host_vm_change *hvc)
  161. {
  162. pte_t *pte;
  163. int r, w, x, prot, ret = 0;
  164. pte = pte_offset_kernel(pmd, addr);
  165. do {
  166. if ((addr >= STUB_START) && (addr < STUB_END))
  167. continue;
  168. r = pte_read(*pte);
  169. w = pte_write(*pte);
  170. x = pte_exec(*pte);
  171. if (!pte_young(*pte)) {
  172. r = 0;
  173. w = 0;
  174. } else if (!pte_dirty(*pte))
  175. w = 0;
  176. prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
  177. (x ? UM_PROT_EXEC : 0));
  178. if (hvc->force || pte_newpage(*pte)) {
  179. if (pte_present(*pte))
  180. ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
  181. PAGE_SIZE, prot, hvc);
  182. else
  183. ret = add_munmap(addr, PAGE_SIZE, hvc);
  184. } else if (pte_newprot(*pte))
  185. ret = add_mprotect(addr, PAGE_SIZE, prot, hvc);
  186. *pte = pte_mkuptodate(*pte);
  187. } while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret));
  188. return ret;
  189. }
  190. static inline int update_pmd_range(pud_t *pud, unsigned long addr,
  191. unsigned long end,
  192. struct host_vm_change *hvc)
  193. {
  194. pmd_t *pmd;
  195. unsigned long next;
  196. int ret = 0;
  197. pmd = pmd_offset(pud, addr);
  198. do {
  199. next = pmd_addr_end(addr, end);
  200. if (!pmd_present(*pmd)) {
  201. if (hvc->force || pmd_newpage(*pmd)) {
  202. ret = add_munmap(addr, next - addr, hvc);
  203. pmd_mkuptodate(*pmd);
  204. }
  205. }
  206. else ret = update_pte_range(pmd, addr, next, hvc);
  207. } while (pmd++, addr = next, ((addr < end) && !ret));
  208. return ret;
  209. }
  210. static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
  211. unsigned long end,
  212. struct host_vm_change *hvc)
  213. {
  214. pud_t *pud;
  215. unsigned long next;
  216. int ret = 0;
  217. pud = pud_offset(pgd, addr);
  218. do {
  219. next = pud_addr_end(addr, end);
  220. if (!pud_present(*pud)) {
  221. if (hvc->force || pud_newpage(*pud)) {
  222. ret = add_munmap(addr, next - addr, hvc);
  223. pud_mkuptodate(*pud);
  224. }
  225. }
  226. else ret = update_pmd_range(pud, addr, next, hvc);
  227. } while (pud++, addr = next, ((addr < end) && !ret));
  228. return ret;
  229. }
  230. void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
  231. unsigned long end_addr, int force)
  232. {
  233. pgd_t *pgd;
  234. struct host_vm_change hvc;
  235. unsigned long addr = start_addr, next;
  236. int ret = 0;
  237. hvc = INIT_HVC(mm, force);
  238. pgd = pgd_offset(mm, addr);
  239. do {
  240. next = pgd_addr_end(addr, end_addr);
  241. if (!pgd_present(*pgd)) {
  242. if (force || pgd_newpage(*pgd)) {
  243. ret = add_munmap(addr, next - addr, &hvc);
  244. pgd_mkuptodate(*pgd);
  245. }
  246. }
  247. else ret = update_pud_range(pgd, addr, next, &hvc);
  248. } while (pgd++, addr = next, ((addr < end_addr) && !ret));
  249. if (!ret)
  250. ret = do_ops(&hvc, hvc.index, 1);
  251. /* This is not an else because ret is modified above */
  252. if (ret) {
  253. printk(KERN_ERR "fix_range_common: failed, killing current "
  254. "process\n");
  255. force_sig(SIGKILL, current);
  256. }
  257. }
  258. static int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
  259. {
  260. struct mm_struct *mm;
  261. pgd_t *pgd;
  262. pud_t *pud;
  263. pmd_t *pmd;
  264. pte_t *pte;
  265. unsigned long addr, last;
  266. int updated = 0, err;
  267. mm = &init_mm;
  268. for (addr = start; addr < end;) {
  269. pgd = pgd_offset(mm, addr);
  270. if (!pgd_present(*pgd)) {
  271. last = ADD_ROUND(addr, PGDIR_SIZE);
  272. if (last > end)
  273. last = end;
  274. if (pgd_newpage(*pgd)) {
  275. updated = 1;
  276. err = os_unmap_memory((void *) addr,
  277. last - addr);
  278. if (err < 0)
  279. panic("munmap failed, errno = %d\n",
  280. -err);
  281. }
  282. addr = last;
  283. continue;
  284. }
  285. pud = pud_offset(pgd, addr);
  286. if (!pud_present(*pud)) {
  287. last = ADD_ROUND(addr, PUD_SIZE);
  288. if (last > end)
  289. last = end;
  290. if (pud_newpage(*pud)) {
  291. updated = 1;
  292. err = os_unmap_memory((void *) addr,
  293. last - addr);
  294. if (err < 0)
  295. panic("munmap failed, errno = %d\n",
  296. -err);
  297. }
  298. addr = last;
  299. continue;
  300. }
  301. pmd = pmd_offset(pud, addr);
  302. if (!pmd_present(*pmd)) {
  303. last = ADD_ROUND(addr, PMD_SIZE);
  304. if (last > end)
  305. last = end;
  306. if (pmd_newpage(*pmd)) {
  307. updated = 1;
  308. err = os_unmap_memory((void *) addr,
  309. last - addr);
  310. if (err < 0)
  311. panic("munmap failed, errno = %d\n",
  312. -err);
  313. }
  314. addr = last;
  315. continue;
  316. }
  317. pte = pte_offset_kernel(pmd, addr);
  318. if (!pte_present(*pte) || pte_newpage(*pte)) {
  319. updated = 1;
  320. err = os_unmap_memory((void *) addr,
  321. PAGE_SIZE);
  322. if (err < 0)
  323. panic("munmap failed, errno = %d\n",
  324. -err);
  325. if (pte_present(*pte))
  326. map_memory(addr,
  327. pte_val(*pte) & PAGE_MASK,
  328. PAGE_SIZE, 1, 1, 1);
  329. }
  330. else if (pte_newprot(*pte)) {
  331. updated = 1;
  332. os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
  333. }
  334. addr += PAGE_SIZE;
  335. }
  336. return updated;
  337. }
  338. void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
  339. {
  340. pgd_t *pgd;
  341. pud_t *pud;
  342. pmd_t *pmd;
  343. pte_t *pte;
  344. struct mm_struct *mm = vma->vm_mm;
  345. void *flush = NULL;
  346. int r, w, x, prot, err = 0;
  347. struct mm_id *mm_id;
  348. address &= PAGE_MASK;
  349. pgd = pgd_offset(mm, address);
  350. if (!pgd_present(*pgd))
  351. goto kill;
  352. pud = pud_offset(pgd, address);
  353. if (!pud_present(*pud))
  354. goto kill;
  355. pmd = pmd_offset(pud, address);
  356. if (!pmd_present(*pmd))
  357. goto kill;
  358. pte = pte_offset_kernel(pmd, address);
  359. r = pte_read(*pte);
  360. w = pte_write(*pte);
  361. x = pte_exec(*pte);
  362. if (!pte_young(*pte)) {
  363. r = 0;
  364. w = 0;
  365. } else if (!pte_dirty(*pte)) {
  366. w = 0;
  367. }
  368. mm_id = &mm->context.id;
  369. prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
  370. (x ? UM_PROT_EXEC : 0));
  371. if (pte_newpage(*pte)) {
  372. if (pte_present(*pte)) {
  373. unsigned long long offset;
  374. int fd;
  375. fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
  376. err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
  377. 1, &flush);
  378. }
  379. else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
  380. }
  381. else if (pte_newprot(*pte))
  382. err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
  383. if (err)
  384. goto kill;
  385. *pte = pte_mkuptodate(*pte);
  386. return;
  387. kill:
  388. printk(KERN_ERR "Failed to flush page for address 0x%lx\n", address);
  389. force_sig(SIGKILL, current);
  390. }
  391. pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
  392. {
  393. return pgd_offset(mm, address);
  394. }
  395. pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
  396. {
  397. return pud_offset(pgd, address);
  398. }
  399. pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
  400. {
  401. return pmd_offset(pud, address);
  402. }
  403. pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
  404. {
  405. return pte_offset_kernel(pmd, address);
  406. }
  407. pte_t *addr_pte(struct task_struct *task, unsigned long addr)
  408. {
  409. pgd_t *pgd = pgd_offset(task->mm, addr);
  410. pud_t *pud = pud_offset(pgd, addr);
  411. pmd_t *pmd = pmd_offset(pud, addr);
  412. return pte_offset_map(pmd, addr);
  413. }
  414. void flush_tlb_all(void)
  415. {
  416. flush_tlb_mm(current->mm);
  417. }
  418. void flush_tlb_kernel_range(unsigned long start, unsigned long end)
  419. {
  420. flush_tlb_kernel_range_common(start, end);
  421. }
  422. void flush_tlb_kernel_vm(void)
  423. {
  424. flush_tlb_kernel_range_common(start_vm, end_vm);
  425. }
  426. void __flush_tlb_one(unsigned long addr)
  427. {
  428. flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
  429. }
  430. static void fix_range(struct mm_struct *mm, unsigned long start_addr,
  431. unsigned long end_addr, int force)
  432. {
  433. fix_range_common(mm, start_addr, end_addr, force);
  434. }
  435. void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  436. unsigned long end)
  437. {
  438. if (vma->vm_mm == NULL)
  439. flush_tlb_kernel_range_common(start, end);
  440. else fix_range(vma->vm_mm, start, end, 0);
  441. }
  442. EXPORT_SYMBOL(flush_tlb_range);
  443. void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
  444. unsigned long end)
  445. {
  446. /*
  447. * Don't bother flushing if this address space is about to be
  448. * destroyed.
  449. */
  450. if (atomic_read(&mm->mm_users) == 0)
  451. return;
  452. fix_range(mm, start, end, 0);
  453. }
  454. void flush_tlb_mm(struct mm_struct *mm)
  455. {
  456. struct vm_area_struct *vma = mm->mmap;
  457. while (vma != NULL) {
  458. fix_range(mm, vma->vm_start, vma->vm_end, 0);
  459. vma = vma->vm_next;
  460. }
  461. }
  462. void force_flush_all(void)
  463. {
  464. struct mm_struct *mm = current->mm;
  465. struct vm_area_struct *vma = mm->mmap;
  466. while (vma != NULL) {
  467. fix_range(mm, vma->vm_start, vma->vm_end, 1);
  468. vma = vma->vm_next;
  469. }
  470. }