tlb.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526
  1. /*
  2. * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3. * Licensed under the GPL
  4. */
  5. #include "linux/mm.h"
  6. #include "asm/pgtable.h"
  7. #include "asm/tlbflush.h"
  8. #include "as-layout.h"
  9. #include "mem_user.h"
  10. #include "os.h"
  11. #include "skas.h"
  12. #include "tlb.h"
  13. struct host_vm_change {
  14. struct host_vm_op {
  15. enum { NONE, MMAP, MUNMAP, MPROTECT } type;
  16. union {
  17. struct {
  18. unsigned long addr;
  19. unsigned long len;
  20. unsigned int prot;
  21. int fd;
  22. __u64 offset;
  23. } mmap;
  24. struct {
  25. unsigned long addr;
  26. unsigned long len;
  27. } munmap;
  28. struct {
  29. unsigned long addr;
  30. unsigned long len;
  31. unsigned int prot;
  32. } mprotect;
  33. } u;
  34. } ops[1];
  35. int index;
  36. struct mm_id *id;
  37. void *data;
  38. int force;
  39. };
  40. #define INIT_HVC(mm, force) \
  41. ((struct host_vm_change) \
  42. { .ops = { { .type = NONE } }, \
  43. .id = &mm->context.id, \
  44. .data = NULL, \
  45. .index = 0, \
  46. .force = force })
  47. static int do_ops(struct host_vm_change *hvc, int end,
  48. int finished)
  49. {
  50. struct host_vm_op *op;
  51. int i, ret = 0;
  52. for (i = 0; i < end && !ret; i++) {
  53. op = &hvc->ops[i];
  54. switch(op->type) {
  55. case MMAP:
  56. ret = map(hvc->id, op->u.mmap.addr, op->u.mmap.len,
  57. op->u.mmap.prot, op->u.mmap.fd,
  58. op->u.mmap.offset, finished, &hvc->data);
  59. break;
  60. case MUNMAP:
  61. ret = unmap(hvc->id, op->u.munmap.addr,
  62. op->u.munmap.len, finished, &hvc->data);
  63. break;
  64. case MPROTECT:
  65. ret = protect(hvc->id, op->u.mprotect.addr,
  66. op->u.mprotect.len, op->u.mprotect.prot,
  67. finished, &hvc->data);
  68. break;
  69. default:
  70. printk(KERN_ERR "Unknown op type %d in do_ops\n",
  71. op->type);
  72. break;
  73. }
  74. }
  75. return ret;
  76. }
  77. static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
  78. unsigned int prot, struct host_vm_change *hvc)
  79. {
  80. __u64 offset;
  81. struct host_vm_op *last;
  82. int fd, ret = 0;
  83. fd = phys_mapping(phys, &offset);
  84. if (hvc->index != 0) {
  85. last = &hvc->ops[hvc->index - 1];
  86. if ((last->type == MMAP) &&
  87. (last->u.mmap.addr + last->u.mmap.len == virt) &&
  88. (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
  89. (last->u.mmap.offset + last->u.mmap.len == offset)) {
  90. last->u.mmap.len += len;
  91. return 0;
  92. }
  93. }
  94. if (hvc->index == ARRAY_SIZE(hvc->ops)) {
  95. ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
  96. hvc->index = 0;
  97. }
  98. hvc->ops[hvc->index++] = ((struct host_vm_op)
  99. { .type = MMAP,
  100. .u = { .mmap = { .addr = virt,
  101. .len = len,
  102. .prot = prot,
  103. .fd = fd,
  104. .offset = offset }
  105. } });
  106. return ret;
  107. }
  108. static int add_munmap(unsigned long addr, unsigned long len,
  109. struct host_vm_change *hvc)
  110. {
  111. struct host_vm_op *last;
  112. int ret = 0;
  113. if (hvc->index != 0) {
  114. last = &hvc->ops[hvc->index - 1];
  115. if ((last->type == MUNMAP) &&
  116. (last->u.munmap.addr + last->u.mmap.len == addr)) {
  117. last->u.munmap.len += len;
  118. return 0;
  119. }
  120. }
  121. if (hvc->index == ARRAY_SIZE(hvc->ops)) {
  122. ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
  123. hvc->index = 0;
  124. }
  125. hvc->ops[hvc->index++] = ((struct host_vm_op)
  126. { .type = MUNMAP,
  127. .u = { .munmap = { .addr = addr,
  128. .len = len } } });
  129. return ret;
  130. }
  131. static int add_mprotect(unsigned long addr, unsigned long len,
  132. unsigned int prot, struct host_vm_change *hvc)
  133. {
  134. struct host_vm_op *last;
  135. int ret = 0;
  136. if (hvc->index != 0) {
  137. last = &hvc->ops[hvc->index - 1];
  138. if ((last->type == MPROTECT) &&
  139. (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
  140. (last->u.mprotect.prot == prot)) {
  141. last->u.mprotect.len += len;
  142. return 0;
  143. }
  144. }
  145. if (hvc->index == ARRAY_SIZE(hvc->ops)) {
  146. ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
  147. hvc->index = 0;
  148. }
  149. hvc->ops[hvc->index++] = ((struct host_vm_op)
  150. { .type = MPROTECT,
  151. .u = { .mprotect = { .addr = addr,
  152. .len = len,
  153. .prot = prot } } });
  154. return ret;
  155. }
  156. #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
  157. static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
  158. unsigned long end,
  159. struct host_vm_change *hvc)
  160. {
  161. pte_t *pte;
  162. int r, w, x, prot, ret = 0;
  163. pte = pte_offset_kernel(pmd, addr);
  164. do {
  165. r = pte_read(*pte);
  166. w = pte_write(*pte);
  167. x = pte_exec(*pte);
  168. if (!pte_young(*pte)) {
  169. r = 0;
  170. w = 0;
  171. } else if (!pte_dirty(*pte)) {
  172. w = 0;
  173. }
  174. prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
  175. (x ? UM_PROT_EXEC : 0));
  176. if (hvc->force || pte_newpage(*pte)) {
  177. if (pte_present(*pte))
  178. ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
  179. PAGE_SIZE, prot, hvc);
  180. else ret = add_munmap(addr, PAGE_SIZE, hvc);
  181. }
  182. else if (pte_newprot(*pte))
  183. ret = add_mprotect(addr, PAGE_SIZE, prot, hvc);
  184. *pte = pte_mkuptodate(*pte);
  185. } while (pte++, addr += PAGE_SIZE, ((addr != end) && !ret));
  186. return ret;
  187. }
  188. static inline int update_pmd_range(pud_t *pud, unsigned long addr,
  189. unsigned long end,
  190. struct host_vm_change *hvc)
  191. {
  192. pmd_t *pmd;
  193. unsigned long next;
  194. int ret = 0;
  195. pmd = pmd_offset(pud, addr);
  196. do {
  197. next = pmd_addr_end(addr, end);
  198. if (!pmd_present(*pmd)) {
  199. if (hvc->force || pmd_newpage(*pmd)) {
  200. ret = add_munmap(addr, next - addr, hvc);
  201. pmd_mkuptodate(*pmd);
  202. }
  203. }
  204. else ret = update_pte_range(pmd, addr, next, hvc);
  205. } while (pmd++, addr = next, ((addr != end) && !ret));
  206. return ret;
  207. }
  208. static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
  209. unsigned long end,
  210. struct host_vm_change *hvc)
  211. {
  212. pud_t *pud;
  213. unsigned long next;
  214. int ret = 0;
  215. pud = pud_offset(pgd, addr);
  216. do {
  217. next = pud_addr_end(addr, end);
  218. if (!pud_present(*pud)) {
  219. if (hvc->force || pud_newpage(*pud)) {
  220. ret = add_munmap(addr, next - addr, hvc);
  221. pud_mkuptodate(*pud);
  222. }
  223. }
  224. else ret = update_pmd_range(pud, addr, next, hvc);
  225. } while (pud++, addr = next, ((addr != end) && !ret));
  226. return ret;
  227. }
  228. void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
  229. unsigned long end_addr, int force)
  230. {
  231. pgd_t *pgd;
  232. struct host_vm_change hvc;
  233. unsigned long addr = start_addr, next;
  234. int ret = 0;
  235. hvc = INIT_HVC(mm, force);
  236. pgd = pgd_offset(mm, addr);
  237. do {
  238. next = pgd_addr_end(addr, end_addr);
  239. if (!pgd_present(*pgd)) {
  240. if (force || pgd_newpage(*pgd)) {
  241. ret = add_munmap(addr, next - addr, &hvc);
  242. pgd_mkuptodate(*pgd);
  243. }
  244. }
  245. else ret = update_pud_range(pgd, addr, next, &hvc);
  246. } while (pgd++, addr = next, ((addr != end_addr) && !ret));
  247. if (!ret)
  248. ret = do_ops(&hvc, hvc.index, 1);
  249. /* This is not an else because ret is modified above */
  250. if (ret) {
  251. printk(KERN_ERR "fix_range_common: failed, killing current "
  252. "process\n");
  253. force_sig(SIGKILL, current);
  254. }
  255. }
  256. int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
  257. {
  258. struct mm_struct *mm;
  259. pgd_t *pgd;
  260. pud_t *pud;
  261. pmd_t *pmd;
  262. pte_t *pte;
  263. unsigned long addr, last;
  264. int updated = 0, err;
  265. mm = &init_mm;
  266. for (addr = start; addr < end;) {
  267. pgd = pgd_offset(mm, addr);
  268. if (!pgd_present(*pgd)) {
  269. last = ADD_ROUND(addr, PGDIR_SIZE);
  270. if (last > end)
  271. last = end;
  272. if (pgd_newpage(*pgd)) {
  273. updated = 1;
  274. err = os_unmap_memory((void *) addr,
  275. last - addr);
  276. if (err < 0)
  277. panic("munmap failed, errno = %d\n",
  278. -err);
  279. }
  280. addr = last;
  281. continue;
  282. }
  283. pud = pud_offset(pgd, addr);
  284. if (!pud_present(*pud)) {
  285. last = ADD_ROUND(addr, PUD_SIZE);
  286. if (last > end)
  287. last = end;
  288. if (pud_newpage(*pud)) {
  289. updated = 1;
  290. err = os_unmap_memory((void *) addr,
  291. last - addr);
  292. if (err < 0)
  293. panic("munmap failed, errno = %d\n",
  294. -err);
  295. }
  296. addr = last;
  297. continue;
  298. }
  299. pmd = pmd_offset(pud, addr);
  300. if (!pmd_present(*pmd)) {
  301. last = ADD_ROUND(addr, PMD_SIZE);
  302. if (last > end)
  303. last = end;
  304. if (pmd_newpage(*pmd)) {
  305. updated = 1;
  306. err = os_unmap_memory((void *) addr,
  307. last - addr);
  308. if (err < 0)
  309. panic("munmap failed, errno = %d\n",
  310. -err);
  311. }
  312. addr = last;
  313. continue;
  314. }
  315. pte = pte_offset_kernel(pmd, addr);
  316. if (!pte_present(*pte) || pte_newpage(*pte)) {
  317. updated = 1;
  318. err = os_unmap_memory((void *) addr,
  319. PAGE_SIZE);
  320. if (err < 0)
  321. panic("munmap failed, errno = %d\n",
  322. -err);
  323. if (pte_present(*pte))
  324. map_memory(addr,
  325. pte_val(*pte) & PAGE_MASK,
  326. PAGE_SIZE, 1, 1, 1);
  327. }
  328. else if (pte_newprot(*pte)) {
  329. updated = 1;
  330. os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
  331. }
  332. addr += PAGE_SIZE;
  333. }
  334. return updated;
  335. }
  336. void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
  337. {
  338. pgd_t *pgd;
  339. pud_t *pud;
  340. pmd_t *pmd;
  341. pte_t *pte;
  342. struct mm_struct *mm = vma->vm_mm;
  343. void *flush = NULL;
  344. int r, w, x, prot, err = 0;
  345. struct mm_id *mm_id;
  346. address &= PAGE_MASK;
  347. pgd = pgd_offset(mm, address);
  348. if (!pgd_present(*pgd))
  349. goto kill;
  350. pud = pud_offset(pgd, address);
  351. if (!pud_present(*pud))
  352. goto kill;
  353. pmd = pmd_offset(pud, address);
  354. if (!pmd_present(*pmd))
  355. goto kill;
  356. pte = pte_offset_kernel(pmd, address);
  357. r = pte_read(*pte);
  358. w = pte_write(*pte);
  359. x = pte_exec(*pte);
  360. if (!pte_young(*pte)) {
  361. r = 0;
  362. w = 0;
  363. } else if (!pte_dirty(*pte)) {
  364. w = 0;
  365. }
  366. mm_id = &mm->context.id;
  367. prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
  368. (x ? UM_PROT_EXEC : 0));
  369. if (pte_newpage(*pte)) {
  370. if (pte_present(*pte)) {
  371. unsigned long long offset;
  372. int fd;
  373. fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
  374. err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
  375. 1, &flush);
  376. }
  377. else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
  378. }
  379. else if (pte_newprot(*pte))
  380. err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
  381. if (err)
  382. goto kill;
  383. *pte = pte_mkuptodate(*pte);
  384. return;
  385. kill:
  386. printk(KERN_ERR "Failed to flush page for address 0x%lx\n", address);
  387. force_sig(SIGKILL, current);
  388. }
  389. pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
  390. {
  391. return pgd_offset(mm, address);
  392. }
  393. pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
  394. {
  395. return pud_offset(pgd, address);
  396. }
  397. pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
  398. {
  399. return pmd_offset(pud, address);
  400. }
  401. pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
  402. {
  403. return pte_offset_kernel(pmd, address);
  404. }
  405. pte_t *addr_pte(struct task_struct *task, unsigned long addr)
  406. {
  407. pgd_t *pgd = pgd_offset(task->mm, addr);
  408. pud_t *pud = pud_offset(pgd, addr);
  409. pmd_t *pmd = pmd_offset(pud, addr);
  410. return pte_offset_map(pmd, addr);
  411. }
  412. void flush_tlb_all(void)
  413. {
  414. flush_tlb_mm(current->mm);
  415. }
  416. void flush_tlb_kernel_range(unsigned long start, unsigned long end)
  417. {
  418. flush_tlb_kernel_range_common(start, end);
  419. }
  420. void flush_tlb_kernel_vm(void)
  421. {
  422. flush_tlb_kernel_range_common(start_vm, end_vm);
  423. }
  424. void __flush_tlb_one(unsigned long addr)
  425. {
  426. flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
  427. }
  428. static void fix_range(struct mm_struct *mm, unsigned long start_addr,
  429. unsigned long end_addr, int force)
  430. {
  431. if (!proc_mm && (end_addr > STUB_START))
  432. end_addr = STUB_START;
  433. fix_range_common(mm, start_addr, end_addr, force);
  434. }
  435. void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  436. unsigned long end)
  437. {
  438. if (vma->vm_mm == NULL)
  439. flush_tlb_kernel_range_common(start, end);
  440. else fix_range(vma->vm_mm, start, end, 0);
  441. }
  442. void flush_tlb_mm(struct mm_struct *mm)
  443. {
  444. unsigned long end;
  445. /*
  446. * Don't bother flushing if this address space is about to be
  447. * destroyed.
  448. */
  449. if (atomic_read(&mm->mm_users) == 0)
  450. return;
  451. end = proc_mm ? task_size : STUB_START;
  452. fix_range(mm, 0, end, 0);
  453. }
  454. void force_flush_all(void)
  455. {
  456. struct mm_struct *mm = current->mm;
  457. struct vm_area_struct *vma = mm->mmap;
  458. while (vma != NULL) {
  459. fix_range(mm, vma->vm_start, vma->vm_end, 1);
  460. vma = vma->vm_next;
  461. }
  462. }