tlb.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531
  1. /*
  2. * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
  3. * Licensed under the GPL
  4. */
  5. #include "linux/mm.h"
  6. #include "asm/page.h"
  7. #include "asm/pgalloc.h"
  8. #include "asm/pgtable.h"
  9. #include "asm/tlbflush.h"
  10. #include "as-layout.h"
  11. #include "tlb.h"
  12. #include "mem.h"
  13. #include "mem_user.h"
  14. #include "os.h"
  15. #include "skas.h"
  16. static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
  17. unsigned int prot, struct host_vm_op *ops, int *index,
  18. int last_filled, union mm_context *mmu, void **flush,
  19. int (*do_ops)(union mm_context *, struct host_vm_op *,
  20. int, int, void **))
  21. {
  22. __u64 offset;
  23. struct host_vm_op *last;
  24. int fd, ret = 0;
  25. fd = phys_mapping(phys, &offset);
  26. if(*index != -1){
  27. last = &ops[*index];
  28. if((last->type == MMAP) &&
  29. (last->u.mmap.addr + last->u.mmap.len == virt) &&
  30. (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
  31. (last->u.mmap.offset + last->u.mmap.len == offset)){
  32. last->u.mmap.len += len;
  33. return 0;
  34. }
  35. }
  36. if(*index == last_filled){
  37. ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
  38. *index = -1;
  39. }
  40. ops[++*index] = ((struct host_vm_op) { .type = MMAP,
  41. .u = { .mmap = {
  42. .addr = virt,
  43. .len = len,
  44. .prot = prot,
  45. .fd = fd,
  46. .offset = offset }
  47. } });
  48. return ret;
  49. }
  50. static int add_munmap(unsigned long addr, unsigned long len,
  51. struct host_vm_op *ops, int *index, int last_filled,
  52. union mm_context *mmu, void **flush,
  53. int (*do_ops)(union mm_context *, struct host_vm_op *,
  54. int, int, void **))
  55. {
  56. struct host_vm_op *last;
  57. int ret = 0;
  58. if(*index != -1){
  59. last = &ops[*index];
  60. if((last->type == MUNMAP) &&
  61. (last->u.munmap.addr + last->u.mmap.len == addr)){
  62. last->u.munmap.len += len;
  63. return 0;
  64. }
  65. }
  66. if(*index == last_filled){
  67. ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
  68. *index = -1;
  69. }
  70. ops[++*index] = ((struct host_vm_op) { .type = MUNMAP,
  71. .u = { .munmap = {
  72. .addr = addr,
  73. .len = len } } });
  74. return ret;
  75. }
  76. static int add_mprotect(unsigned long addr, unsigned long len,
  77. unsigned int prot, struct host_vm_op *ops, int *index,
  78. int last_filled, union mm_context *mmu, void **flush,
  79. int (*do_ops)(union mm_context *, struct host_vm_op *,
  80. int, int, void **))
  81. {
  82. struct host_vm_op *last;
  83. int ret = 0;
  84. if(*index != -1){
  85. last = &ops[*index];
  86. if((last->type == MPROTECT) &&
  87. (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
  88. (last->u.mprotect.prot == prot)){
  89. last->u.mprotect.len += len;
  90. return 0;
  91. }
  92. }
  93. if(*index == last_filled){
  94. ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
  95. *index = -1;
  96. }
  97. ops[++*index] = ((struct host_vm_op) { .type = MPROTECT,
  98. .u = { .mprotect = {
  99. .addr = addr,
  100. .len = len,
  101. .prot = prot } } });
  102. return ret;
  103. }
  104. #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
  105. static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
  106. unsigned long end, struct host_vm_op *ops,
  107. int last_op, int *op_index, int force,
  108. union mm_context *mmu, void **flush,
  109. int (*do_ops)(union mm_context *,
  110. struct host_vm_op *, int, int,
  111. void **))
  112. {
  113. pte_t *pte;
  114. int r, w, x, prot, ret = 0;
  115. pte = pte_offset_kernel(pmd, addr);
  116. do {
  117. r = pte_read(*pte);
  118. w = pte_write(*pte);
  119. x = pte_exec(*pte);
  120. if (!pte_young(*pte)) {
  121. r = 0;
  122. w = 0;
  123. } else if (!pte_dirty(*pte)) {
  124. w = 0;
  125. }
  126. prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
  127. (x ? UM_PROT_EXEC : 0));
  128. if(force || pte_newpage(*pte)){
  129. if(pte_present(*pte))
  130. ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
  131. PAGE_SIZE, prot, ops, op_index,
  132. last_op, mmu, flush, do_ops);
  133. else ret = add_munmap(addr, PAGE_SIZE, ops, op_index,
  134. last_op, mmu, flush, do_ops);
  135. }
  136. else if(pte_newprot(*pte))
  137. ret = add_mprotect(addr, PAGE_SIZE, prot, ops, op_index,
  138. last_op, mmu, flush, do_ops);
  139. *pte = pte_mkuptodate(*pte);
  140. } while (pte++, addr += PAGE_SIZE, ((addr != end) && !ret));
  141. return ret;
  142. }
  143. static inline int update_pmd_range(pud_t *pud, unsigned long addr,
  144. unsigned long end, struct host_vm_op *ops,
  145. int last_op, int *op_index, int force,
  146. union mm_context *mmu, void **flush,
  147. int (*do_ops)(union mm_context *,
  148. struct host_vm_op *, int, int,
  149. void **))
  150. {
  151. pmd_t *pmd;
  152. unsigned long next;
  153. int ret = 0;
  154. pmd = pmd_offset(pud, addr);
  155. do {
  156. next = pmd_addr_end(addr, end);
  157. if(!pmd_present(*pmd)){
  158. if(force || pmd_newpage(*pmd)){
  159. ret = add_munmap(addr, next - addr, ops,
  160. op_index, last_op, mmu,
  161. flush, do_ops);
  162. pmd_mkuptodate(*pmd);
  163. }
  164. }
  165. else ret = update_pte_range(pmd, addr, next, ops, last_op,
  166. op_index, force, mmu, flush,
  167. do_ops);
  168. } while (pmd++, addr = next, ((addr != end) && !ret));
  169. return ret;
  170. }
  171. static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
  172. unsigned long end, struct host_vm_op *ops,
  173. int last_op, int *op_index, int force,
  174. union mm_context *mmu, void **flush,
  175. int (*do_ops)(union mm_context *,
  176. struct host_vm_op *, int, int,
  177. void **))
  178. {
  179. pud_t *pud;
  180. unsigned long next;
  181. int ret = 0;
  182. pud = pud_offset(pgd, addr);
  183. do {
  184. next = pud_addr_end(addr, end);
  185. if(!pud_present(*pud)){
  186. if(force || pud_newpage(*pud)){
  187. ret = add_munmap(addr, next - addr, ops,
  188. op_index, last_op, mmu,
  189. flush, do_ops);
  190. pud_mkuptodate(*pud);
  191. }
  192. }
  193. else ret = update_pmd_range(pud, addr, next, ops, last_op,
  194. op_index, force, mmu, flush,
  195. do_ops);
  196. } while (pud++, addr = next, ((addr != end) && !ret));
  197. return ret;
  198. }
  199. void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
  200. unsigned long end_addr, int force,
  201. int (*do_ops)(union mm_context *, struct host_vm_op *,
  202. int, int, void **))
  203. {
  204. pgd_t *pgd;
  205. union mm_context *mmu = &mm->context;
  206. struct host_vm_op ops[1];
  207. unsigned long addr = start_addr, next;
  208. int ret = 0, last_op = ARRAY_SIZE(ops) - 1, op_index = -1;
  209. void *flush = NULL;
  210. ops[0].type = NONE;
  211. pgd = pgd_offset(mm, addr);
  212. do {
  213. next = pgd_addr_end(addr, end_addr);
  214. if(!pgd_present(*pgd)){
  215. if (force || pgd_newpage(*pgd)){
  216. ret = add_munmap(addr, next - addr, ops,
  217. &op_index, last_op, mmu,
  218. &flush, do_ops);
  219. pgd_mkuptodate(*pgd);
  220. }
  221. }
  222. else ret = update_pud_range(pgd, addr, next, ops, last_op,
  223. &op_index, force, mmu, &flush,
  224. do_ops);
  225. } while (pgd++, addr = next, ((addr != end_addr) && !ret));
  226. if(!ret)
  227. ret = (*do_ops)(mmu, ops, op_index, 1, &flush);
  228. /* This is not an else because ret is modified above */
  229. if(ret) {
  230. printk("fix_range_common: failed, killing current process\n");
  231. force_sig(SIGKILL, current);
  232. }
  233. }
  234. int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
  235. {
  236. struct mm_struct *mm;
  237. pgd_t *pgd;
  238. pud_t *pud;
  239. pmd_t *pmd;
  240. pte_t *pte;
  241. unsigned long addr, last;
  242. int updated = 0, err;
  243. mm = &init_mm;
  244. for(addr = start; addr < end;){
  245. pgd = pgd_offset(mm, addr);
  246. if(!pgd_present(*pgd)){
  247. last = ADD_ROUND(addr, PGDIR_SIZE);
  248. if(last > end)
  249. last = end;
  250. if(pgd_newpage(*pgd)){
  251. updated = 1;
  252. err = os_unmap_memory((void *) addr,
  253. last - addr);
  254. if(err < 0)
  255. panic("munmap failed, errno = %d\n",
  256. -err);
  257. }
  258. addr = last;
  259. continue;
  260. }
  261. pud = pud_offset(pgd, addr);
  262. if(!pud_present(*pud)){
  263. last = ADD_ROUND(addr, PUD_SIZE);
  264. if(last > end)
  265. last = end;
  266. if(pud_newpage(*pud)){
  267. updated = 1;
  268. err = os_unmap_memory((void *) addr,
  269. last - addr);
  270. if(err < 0)
  271. panic("munmap failed, errno = %d\n",
  272. -err);
  273. }
  274. addr = last;
  275. continue;
  276. }
  277. pmd = pmd_offset(pud, addr);
  278. if(!pmd_present(*pmd)){
  279. last = ADD_ROUND(addr, PMD_SIZE);
  280. if(last > end)
  281. last = end;
  282. if(pmd_newpage(*pmd)){
  283. updated = 1;
  284. err = os_unmap_memory((void *) addr,
  285. last - addr);
  286. if(err < 0)
  287. panic("munmap failed, errno = %d\n",
  288. -err);
  289. }
  290. addr = last;
  291. continue;
  292. }
  293. pte = pte_offset_kernel(pmd, addr);
  294. if(!pte_present(*pte) || pte_newpage(*pte)){
  295. updated = 1;
  296. err = os_unmap_memory((void *) addr,
  297. PAGE_SIZE);
  298. if(err < 0)
  299. panic("munmap failed, errno = %d\n",
  300. -err);
  301. if(pte_present(*pte))
  302. map_memory(addr,
  303. pte_val(*pte) & PAGE_MASK,
  304. PAGE_SIZE, 1, 1, 1);
  305. }
  306. else if(pte_newprot(*pte)){
  307. updated = 1;
  308. os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
  309. }
  310. addr += PAGE_SIZE;
  311. }
  312. return(updated);
  313. }
  314. void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
  315. {
  316. pgd_t *pgd;
  317. pud_t *pud;
  318. pmd_t *pmd;
  319. pte_t *pte;
  320. struct mm_struct *mm = vma->vm_mm;
  321. void *flush = NULL;
  322. int r, w, x, prot, err = 0;
  323. struct mm_id *mm_id;
  324. address &= PAGE_MASK;
  325. pgd = pgd_offset(mm, address);
  326. if(!pgd_present(*pgd))
  327. goto kill;
  328. pud = pud_offset(pgd, address);
  329. if(!pud_present(*pud))
  330. goto kill;
  331. pmd = pmd_offset(pud, address);
  332. if(!pmd_present(*pmd))
  333. goto kill;
  334. pte = pte_offset_kernel(pmd, address);
  335. r = pte_read(*pte);
  336. w = pte_write(*pte);
  337. x = pte_exec(*pte);
  338. if (!pte_young(*pte)) {
  339. r = 0;
  340. w = 0;
  341. } else if (!pte_dirty(*pte)) {
  342. w = 0;
  343. }
  344. mm_id = &mm->context.skas.id;
  345. prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
  346. (x ? UM_PROT_EXEC : 0));
  347. if(pte_newpage(*pte)){
  348. if(pte_present(*pte)){
  349. unsigned long long offset;
  350. int fd;
  351. fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
  352. err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
  353. 1, &flush);
  354. }
  355. else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
  356. }
  357. else if(pte_newprot(*pte))
  358. err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
  359. if(err)
  360. goto kill;
  361. *pte = pte_mkuptodate(*pte);
  362. return;
  363. kill:
  364. printk("Failed to flush page for address 0x%lx\n", address);
  365. force_sig(SIGKILL, current);
  366. }
  367. pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
  368. {
  369. return(pgd_offset(mm, address));
  370. }
  371. pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
  372. {
  373. return(pud_offset(pgd, address));
  374. }
  375. pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
  376. {
  377. return(pmd_offset(pud, address));
  378. }
  379. pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
  380. {
  381. return(pte_offset_kernel(pmd, address));
  382. }
  383. pte_t *addr_pte(struct task_struct *task, unsigned long addr)
  384. {
  385. pgd_t *pgd = pgd_offset(task->mm, addr);
  386. pud_t *pud = pud_offset(pgd, addr);
  387. pmd_t *pmd = pmd_offset(pud, addr);
  388. return(pte_offset_map(pmd, addr));
  389. }
  390. void flush_tlb_all(void)
  391. {
  392. flush_tlb_mm(current->mm);
  393. }
  394. void flush_tlb_kernel_range(unsigned long start, unsigned long end)
  395. {
  396. flush_tlb_kernel_range_common(start, end);
  397. }
  398. void flush_tlb_kernel_vm(void)
  399. {
  400. flush_tlb_kernel_range_common(start_vm, end_vm);
  401. }
  402. void __flush_tlb_one(unsigned long addr)
  403. {
  404. flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
  405. }
  406. static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
  407. int finished, void **flush)
  408. {
  409. struct host_vm_op *op;
  410. int i, ret = 0;
  411. for(i = 0; i <= last && !ret; i++){
  412. op = &ops[i];
  413. switch(op->type){
  414. case MMAP:
  415. ret = map(&mmu->skas.id, op->u.mmap.addr,
  416. op->u.mmap.len, op->u.mmap.prot,
  417. op->u.mmap.fd, op->u.mmap.offset, finished,
  418. flush);
  419. break;
  420. case MUNMAP:
  421. ret = unmap(&mmu->skas.id, op->u.munmap.addr,
  422. op->u.munmap.len, finished, flush);
  423. break;
  424. case MPROTECT:
  425. ret = protect(&mmu->skas.id, op->u.mprotect.addr,
  426. op->u.mprotect.len, op->u.mprotect.prot,
  427. finished, flush);
  428. break;
  429. default:
  430. printk("Unknown op type %d in do_ops\n", op->type);
  431. break;
  432. }
  433. }
  434. return ret;
  435. }
  436. static void fix_range(struct mm_struct *mm, unsigned long start_addr,
  437. unsigned long end_addr, int force)
  438. {
  439. if(!proc_mm && (end_addr > CONFIG_STUB_START))
  440. end_addr = CONFIG_STUB_START;
  441. fix_range_common(mm, start_addr, end_addr, force, do_ops);
  442. }
  443. void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  444. unsigned long end)
  445. {
  446. if(vma->vm_mm == NULL)
  447. flush_tlb_kernel_range_common(start, end);
  448. else fix_range(vma->vm_mm, start, end, 0);
  449. }
  450. void flush_tlb_mm(struct mm_struct *mm)
  451. {
  452. unsigned long end;
  453. /* Don't bother flushing if this address space is about to be
  454. * destroyed.
  455. */
  456. if(atomic_read(&mm->mm_users) == 0)
  457. return;
  458. end = proc_mm ? task_size : CONFIG_STUB_START;
  459. fix_range(mm, 0, end, 0);
  460. }
  461. void force_flush_all(void)
  462. {
  463. struct mm_struct *mm = current->mm;
  464. struct vm_area_struct *vma = mm->mmap;
  465. while(vma != NULL) {
  466. fix_range(mm, vma->vm_start, vma->vm_end, 1);
  467. vma = vma->vm_next;
  468. }
  469. }