tlb.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527
  1. /*
  2. * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3. * Licensed under the GPL
  4. */
  5. #include <linux/mm.h>
  6. #include <linux/sched.h>
  7. #include <asm/pgtable.h>
  8. #include <asm/tlbflush.h>
  9. #include "as-layout.h"
  10. #include "mem_user.h"
  11. #include "os.h"
  12. #include "skas.h"
  13. #include "tlb.h"
  14. struct host_vm_change {
  15. struct host_vm_op {
  16. enum { NONE, MMAP, MUNMAP, MPROTECT } type;
  17. union {
  18. struct {
  19. unsigned long addr;
  20. unsigned long len;
  21. unsigned int prot;
  22. int fd;
  23. __u64 offset;
  24. } mmap;
  25. struct {
  26. unsigned long addr;
  27. unsigned long len;
  28. } munmap;
  29. struct {
  30. unsigned long addr;
  31. unsigned long len;
  32. unsigned int prot;
  33. } mprotect;
  34. } u;
  35. } ops[1];
  36. int index;
  37. struct mm_id *id;
  38. void *data;
  39. int force;
  40. };
  41. #define INIT_HVC(mm, force) \
  42. ((struct host_vm_change) \
  43. { .ops = { { .type = NONE } }, \
  44. .id = &mm->context.id, \
  45. .data = NULL, \
  46. .index = 0, \
  47. .force = force })
  48. static int do_ops(struct host_vm_change *hvc, int end,
  49. int finished)
  50. {
  51. struct host_vm_op *op;
  52. int i, ret = 0;
  53. for (i = 0; i < end && !ret; i++) {
  54. op = &hvc->ops[i];
  55. switch(op->type) {
  56. case MMAP:
  57. ret = map(hvc->id, op->u.mmap.addr, op->u.mmap.len,
  58. op->u.mmap.prot, op->u.mmap.fd,
  59. op->u.mmap.offset, finished, &hvc->data);
  60. break;
  61. case MUNMAP:
  62. ret = unmap(hvc->id, op->u.munmap.addr,
  63. op->u.munmap.len, finished, &hvc->data);
  64. break;
  65. case MPROTECT:
  66. ret = protect(hvc->id, op->u.mprotect.addr,
  67. op->u.mprotect.len, op->u.mprotect.prot,
  68. finished, &hvc->data);
  69. break;
  70. default:
  71. printk(KERN_ERR "Unknown op type %d in do_ops\n",
  72. op->type);
  73. break;
  74. }
  75. }
  76. return ret;
  77. }
  78. static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
  79. unsigned int prot, struct host_vm_change *hvc)
  80. {
  81. __u64 offset;
  82. struct host_vm_op *last;
  83. int fd, ret = 0;
  84. fd = phys_mapping(phys, &offset);
  85. if (hvc->index != 0) {
  86. last = &hvc->ops[hvc->index - 1];
  87. if ((last->type == MMAP) &&
  88. (last->u.mmap.addr + last->u.mmap.len == virt) &&
  89. (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
  90. (last->u.mmap.offset + last->u.mmap.len == offset)) {
  91. last->u.mmap.len += len;
  92. return 0;
  93. }
  94. }
  95. if (hvc->index == ARRAY_SIZE(hvc->ops)) {
  96. ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
  97. hvc->index = 0;
  98. }
  99. hvc->ops[hvc->index++] = ((struct host_vm_op)
  100. { .type = MMAP,
  101. .u = { .mmap = { .addr = virt,
  102. .len = len,
  103. .prot = prot,
  104. .fd = fd,
  105. .offset = offset }
  106. } });
  107. return ret;
  108. }
  109. static int add_munmap(unsigned long addr, unsigned long len,
  110. struct host_vm_change *hvc)
  111. {
  112. struct host_vm_op *last;
  113. int ret = 0;
  114. if (hvc->index != 0) {
  115. last = &hvc->ops[hvc->index - 1];
  116. if ((last->type == MUNMAP) &&
  117. (last->u.munmap.addr + last->u.mmap.len == addr)) {
  118. last->u.munmap.len += len;
  119. return 0;
  120. }
  121. }
  122. if (hvc->index == ARRAY_SIZE(hvc->ops)) {
  123. ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
  124. hvc->index = 0;
  125. }
  126. hvc->ops[hvc->index++] = ((struct host_vm_op)
  127. { .type = MUNMAP,
  128. .u = { .munmap = { .addr = addr,
  129. .len = len } } });
  130. return ret;
  131. }
  132. static int add_mprotect(unsigned long addr, unsigned long len,
  133. unsigned int prot, struct host_vm_change *hvc)
  134. {
  135. struct host_vm_op *last;
  136. int ret = 0;
  137. if (hvc->index != 0) {
  138. last = &hvc->ops[hvc->index - 1];
  139. if ((last->type == MPROTECT) &&
  140. (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
  141. (last->u.mprotect.prot == prot)) {
  142. last->u.mprotect.len += len;
  143. return 0;
  144. }
  145. }
  146. if (hvc->index == ARRAY_SIZE(hvc->ops)) {
  147. ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
  148. hvc->index = 0;
  149. }
  150. hvc->ops[hvc->index++] = ((struct host_vm_op)
  151. { .type = MPROTECT,
  152. .u = { .mprotect = { .addr = addr,
  153. .len = len,
  154. .prot = prot } } });
  155. return ret;
  156. }
  157. #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
  158. static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
  159. unsigned long end,
  160. struct host_vm_change *hvc)
  161. {
  162. pte_t *pte;
  163. int r, w, x, prot, ret = 0;
  164. pte = pte_offset_kernel(pmd, addr);
  165. do {
  166. r = pte_read(*pte);
  167. w = pte_write(*pte);
  168. x = pte_exec(*pte);
  169. if (!pte_young(*pte)) {
  170. r = 0;
  171. w = 0;
  172. } else if (!pte_dirty(*pte)) {
  173. w = 0;
  174. }
  175. prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
  176. (x ? UM_PROT_EXEC : 0));
  177. if (hvc->force || pte_newpage(*pte)) {
  178. if (pte_present(*pte))
  179. ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
  180. PAGE_SIZE, prot, hvc);
  181. else ret = add_munmap(addr, PAGE_SIZE, hvc);
  182. }
  183. else if (pte_newprot(*pte))
  184. ret = add_mprotect(addr, PAGE_SIZE, prot, hvc);
  185. *pte = pte_mkuptodate(*pte);
  186. } while (pte++, addr += PAGE_SIZE, ((addr != end) && !ret));
  187. return ret;
  188. }
  189. static inline int update_pmd_range(pud_t *pud, unsigned long addr,
  190. unsigned long end,
  191. struct host_vm_change *hvc)
  192. {
  193. pmd_t *pmd;
  194. unsigned long next;
  195. int ret = 0;
  196. pmd = pmd_offset(pud, addr);
  197. do {
  198. next = pmd_addr_end(addr, end);
  199. if (!pmd_present(*pmd)) {
  200. if (hvc->force || pmd_newpage(*pmd)) {
  201. ret = add_munmap(addr, next - addr, hvc);
  202. pmd_mkuptodate(*pmd);
  203. }
  204. }
  205. else ret = update_pte_range(pmd, addr, next, hvc);
  206. } while (pmd++, addr = next, ((addr != end) && !ret));
  207. return ret;
  208. }
  209. static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
  210. unsigned long end,
  211. struct host_vm_change *hvc)
  212. {
  213. pud_t *pud;
  214. unsigned long next;
  215. int ret = 0;
  216. pud = pud_offset(pgd, addr);
  217. do {
  218. next = pud_addr_end(addr, end);
  219. if (!pud_present(*pud)) {
  220. if (hvc->force || pud_newpage(*pud)) {
  221. ret = add_munmap(addr, next - addr, hvc);
  222. pud_mkuptodate(*pud);
  223. }
  224. }
  225. else ret = update_pmd_range(pud, addr, next, hvc);
  226. } while (pud++, addr = next, ((addr != end) && !ret));
  227. return ret;
  228. }
  229. void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
  230. unsigned long end_addr, int force)
  231. {
  232. pgd_t *pgd;
  233. struct host_vm_change hvc;
  234. unsigned long addr = start_addr, next;
  235. int ret = 0;
  236. hvc = INIT_HVC(mm, force);
  237. pgd = pgd_offset(mm, addr);
  238. do {
  239. next = pgd_addr_end(addr, end_addr);
  240. if (!pgd_present(*pgd)) {
  241. if (force || pgd_newpage(*pgd)) {
  242. ret = add_munmap(addr, next - addr, &hvc);
  243. pgd_mkuptodate(*pgd);
  244. }
  245. }
  246. else ret = update_pud_range(pgd, addr, next, &hvc);
  247. } while (pgd++, addr = next, ((addr != end_addr) && !ret));
  248. if (!ret)
  249. ret = do_ops(&hvc, hvc.index, 1);
  250. /* This is not an else because ret is modified above */
  251. if (ret) {
  252. printk(KERN_ERR "fix_range_common: failed, killing current "
  253. "process\n");
  254. force_sig(SIGKILL, current);
  255. }
  256. }
  257. int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
  258. {
  259. struct mm_struct *mm;
  260. pgd_t *pgd;
  261. pud_t *pud;
  262. pmd_t *pmd;
  263. pte_t *pte;
  264. unsigned long addr, last;
  265. int updated = 0, err;
  266. mm = &init_mm;
  267. for (addr = start; addr < end;) {
  268. pgd = pgd_offset(mm, addr);
  269. if (!pgd_present(*pgd)) {
  270. last = ADD_ROUND(addr, PGDIR_SIZE);
  271. if (last > end)
  272. last = end;
  273. if (pgd_newpage(*pgd)) {
  274. updated = 1;
  275. err = os_unmap_memory((void *) addr,
  276. last - addr);
  277. if (err < 0)
  278. panic("munmap failed, errno = %d\n",
  279. -err);
  280. }
  281. addr = last;
  282. continue;
  283. }
  284. pud = pud_offset(pgd, addr);
  285. if (!pud_present(*pud)) {
  286. last = ADD_ROUND(addr, PUD_SIZE);
  287. if (last > end)
  288. last = end;
  289. if (pud_newpage(*pud)) {
  290. updated = 1;
  291. err = os_unmap_memory((void *) addr,
  292. last - addr);
  293. if (err < 0)
  294. panic("munmap failed, errno = %d\n",
  295. -err);
  296. }
  297. addr = last;
  298. continue;
  299. }
  300. pmd = pmd_offset(pud, addr);
  301. if (!pmd_present(*pmd)) {
  302. last = ADD_ROUND(addr, PMD_SIZE);
  303. if (last > end)
  304. last = end;
  305. if (pmd_newpage(*pmd)) {
  306. updated = 1;
  307. err = os_unmap_memory((void *) addr,
  308. last - addr);
  309. if (err < 0)
  310. panic("munmap failed, errno = %d\n",
  311. -err);
  312. }
  313. addr = last;
  314. continue;
  315. }
  316. pte = pte_offset_kernel(pmd, addr);
  317. if (!pte_present(*pte) || pte_newpage(*pte)) {
  318. updated = 1;
  319. err = os_unmap_memory((void *) addr,
  320. PAGE_SIZE);
  321. if (err < 0)
  322. panic("munmap failed, errno = %d\n",
  323. -err);
  324. if (pte_present(*pte))
  325. map_memory(addr,
  326. pte_val(*pte) & PAGE_MASK,
  327. PAGE_SIZE, 1, 1, 1);
  328. }
  329. else if (pte_newprot(*pte)) {
  330. updated = 1;
  331. os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
  332. }
  333. addr += PAGE_SIZE;
  334. }
  335. return updated;
  336. }
  337. void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
  338. {
  339. pgd_t *pgd;
  340. pud_t *pud;
  341. pmd_t *pmd;
  342. pte_t *pte;
  343. struct mm_struct *mm = vma->vm_mm;
  344. void *flush = NULL;
  345. int r, w, x, prot, err = 0;
  346. struct mm_id *mm_id;
  347. address &= PAGE_MASK;
  348. pgd = pgd_offset(mm, address);
  349. if (!pgd_present(*pgd))
  350. goto kill;
  351. pud = pud_offset(pgd, address);
  352. if (!pud_present(*pud))
  353. goto kill;
  354. pmd = pmd_offset(pud, address);
  355. if (!pmd_present(*pmd))
  356. goto kill;
  357. pte = pte_offset_kernel(pmd, address);
  358. r = pte_read(*pte);
  359. w = pte_write(*pte);
  360. x = pte_exec(*pte);
  361. if (!pte_young(*pte)) {
  362. r = 0;
  363. w = 0;
  364. } else if (!pte_dirty(*pte)) {
  365. w = 0;
  366. }
  367. mm_id = &mm->context.id;
  368. prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
  369. (x ? UM_PROT_EXEC : 0));
  370. if (pte_newpage(*pte)) {
  371. if (pte_present(*pte)) {
  372. unsigned long long offset;
  373. int fd;
  374. fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
  375. err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
  376. 1, &flush);
  377. }
  378. else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
  379. }
  380. else if (pte_newprot(*pte))
  381. err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
  382. if (err)
  383. goto kill;
  384. *pte = pte_mkuptodate(*pte);
  385. return;
  386. kill:
  387. printk(KERN_ERR "Failed to flush page for address 0x%lx\n", address);
  388. force_sig(SIGKILL, current);
  389. }
  390. pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
  391. {
  392. return pgd_offset(mm, address);
  393. }
  394. pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
  395. {
  396. return pud_offset(pgd, address);
  397. }
  398. pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
  399. {
  400. return pmd_offset(pud, address);
  401. }
  402. pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
  403. {
  404. return pte_offset_kernel(pmd, address);
  405. }
  406. pte_t *addr_pte(struct task_struct *task, unsigned long addr)
  407. {
  408. pgd_t *pgd = pgd_offset(task->mm, addr);
  409. pud_t *pud = pud_offset(pgd, addr);
  410. pmd_t *pmd = pmd_offset(pud, addr);
  411. return pte_offset_map(pmd, addr);
  412. }
  413. void flush_tlb_all(void)
  414. {
  415. flush_tlb_mm(current->mm);
  416. }
  417. void flush_tlb_kernel_range(unsigned long start, unsigned long end)
  418. {
  419. flush_tlb_kernel_range_common(start, end);
  420. }
  421. void flush_tlb_kernel_vm(void)
  422. {
  423. flush_tlb_kernel_range_common(start_vm, end_vm);
  424. }
  425. void __flush_tlb_one(unsigned long addr)
  426. {
  427. flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
  428. }
  429. static void fix_range(struct mm_struct *mm, unsigned long start_addr,
  430. unsigned long end_addr, int force)
  431. {
  432. if (!proc_mm && (end_addr > STUB_START))
  433. end_addr = STUB_START;
  434. fix_range_common(mm, start_addr, end_addr, force);
  435. }
  436. void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  437. unsigned long end)
  438. {
  439. if (vma->vm_mm == NULL)
  440. flush_tlb_kernel_range_common(start, end);
  441. else fix_range(vma->vm_mm, start, end, 0);
  442. }
  443. void flush_tlb_mm(struct mm_struct *mm)
  444. {
  445. unsigned long end;
  446. /*
  447. * Don't bother flushing if this address space is about to be
  448. * destroyed.
  449. */
  450. if (atomic_read(&mm->mm_users) == 0)
  451. return;
  452. end = proc_mm ? task_size : STUB_START;
  453. fix_range(mm, 0, end, 0);
  454. }
  455. void force_flush_all(void)
  456. {
  457. struct mm_struct *mm = current->mm;
  458. struct vm_area_struct *vma = mm->mmap;
  459. while (vma != NULL) {
  460. fix_range(mm, vma->vm_start, vma->vm_end, 1);
  461. vma = vma->vm_next;
  462. }
  463. }