tlb.c 10.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412
  1. /*
  2. * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
  3. * Licensed under the GPL
  4. */
  5. #include "linux/mm.h"
  6. #include "asm/page.h"
  7. #include "asm/pgalloc.h"
  8. #include "asm/pgtable.h"
  9. #include "asm/tlbflush.h"
  10. #include "choose-mode.h"
  11. #include "mode_kern.h"
  12. #include "as-layout.h"
  13. #include "tlb.h"
  14. #include "mem.h"
  15. #include "mem_user.h"
  16. #include "os.h"
  17. static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
  18. unsigned int prot, struct host_vm_op *ops, int *index,
  19. int last_filled, union mm_context *mmu, void **flush,
  20. int (*do_ops)(union mm_context *, struct host_vm_op *,
  21. int, int, void **))
  22. {
  23. __u64 offset;
  24. struct host_vm_op *last;
  25. int fd, ret = 0;
  26. fd = phys_mapping(phys, &offset);
  27. if(*index != -1){
  28. last = &ops[*index];
  29. if((last->type == MMAP) &&
  30. (last->u.mmap.addr + last->u.mmap.len == virt) &&
  31. (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
  32. (last->u.mmap.offset + last->u.mmap.len == offset)){
  33. last->u.mmap.len += len;
  34. return 0;
  35. }
  36. }
  37. if(*index == last_filled){
  38. ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
  39. *index = -1;
  40. }
  41. ops[++*index] = ((struct host_vm_op) { .type = MMAP,
  42. .u = { .mmap = {
  43. .addr = virt,
  44. .len = len,
  45. .prot = prot,
  46. .fd = fd,
  47. .offset = offset }
  48. } });
  49. return ret;
  50. }
  51. static int add_munmap(unsigned long addr, unsigned long len,
  52. struct host_vm_op *ops, int *index, int last_filled,
  53. union mm_context *mmu, void **flush,
  54. int (*do_ops)(union mm_context *, struct host_vm_op *,
  55. int, int, void **))
  56. {
  57. struct host_vm_op *last;
  58. int ret = 0;
  59. if(*index != -1){
  60. last = &ops[*index];
  61. if((last->type == MUNMAP) &&
  62. (last->u.munmap.addr + last->u.mmap.len == addr)){
  63. last->u.munmap.len += len;
  64. return 0;
  65. }
  66. }
  67. if(*index == last_filled){
  68. ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
  69. *index = -1;
  70. }
  71. ops[++*index] = ((struct host_vm_op) { .type = MUNMAP,
  72. .u = { .munmap = {
  73. .addr = addr,
  74. .len = len } } });
  75. return ret;
  76. }
  77. static int add_mprotect(unsigned long addr, unsigned long len,
  78. unsigned int prot, struct host_vm_op *ops, int *index,
  79. int last_filled, union mm_context *mmu, void **flush,
  80. int (*do_ops)(union mm_context *, struct host_vm_op *,
  81. int, int, void **))
  82. {
  83. struct host_vm_op *last;
  84. int ret = 0;
  85. if(*index != -1){
  86. last = &ops[*index];
  87. if((last->type == MPROTECT) &&
  88. (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
  89. (last->u.mprotect.prot == prot)){
  90. last->u.mprotect.len += len;
  91. return 0;
  92. }
  93. }
  94. if(*index == last_filled){
  95. ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
  96. *index = -1;
  97. }
  98. ops[++*index] = ((struct host_vm_op) { .type = MPROTECT,
  99. .u = { .mprotect = {
  100. .addr = addr,
  101. .len = len,
  102. .prot = prot } } });
  103. return ret;
  104. }
  105. #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
  106. static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
  107. unsigned long end, struct host_vm_op *ops,
  108. int last_op, int *op_index, int force,
  109. union mm_context *mmu, void **flush,
  110. int (*do_ops)(union mm_context *,
  111. struct host_vm_op *, int, int,
  112. void **))
  113. {
  114. pte_t *pte;
  115. int r, w, x, prot, ret = 0;
  116. pte = pte_offset_kernel(pmd, addr);
  117. do {
  118. r = pte_read(*pte);
  119. w = pte_write(*pte);
  120. x = pte_exec(*pte);
  121. if (!pte_young(*pte)) {
  122. r = 0;
  123. w = 0;
  124. } else if (!pte_dirty(*pte)) {
  125. w = 0;
  126. }
  127. prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
  128. (x ? UM_PROT_EXEC : 0));
  129. if(force || pte_newpage(*pte)){
  130. if(pte_present(*pte))
  131. ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
  132. PAGE_SIZE, prot, ops, op_index,
  133. last_op, mmu, flush, do_ops);
  134. else ret = add_munmap(addr, PAGE_SIZE, ops, op_index,
  135. last_op, mmu, flush, do_ops);
  136. }
  137. else if(pte_newprot(*pte))
  138. ret = add_mprotect(addr, PAGE_SIZE, prot, ops, op_index,
  139. last_op, mmu, flush, do_ops);
  140. *pte = pte_mkuptodate(*pte);
  141. } while (pte++, addr += PAGE_SIZE, ((addr != end) && !ret));
  142. return ret;
  143. }
  144. static inline int update_pmd_range(pud_t *pud, unsigned long addr,
  145. unsigned long end, struct host_vm_op *ops,
  146. int last_op, int *op_index, int force,
  147. union mm_context *mmu, void **flush,
  148. int (*do_ops)(union mm_context *,
  149. struct host_vm_op *, int, int,
  150. void **))
  151. {
  152. pmd_t *pmd;
  153. unsigned long next;
  154. int ret = 0;
  155. pmd = pmd_offset(pud, addr);
  156. do {
  157. next = pmd_addr_end(addr, end);
  158. if(!pmd_present(*pmd)){
  159. if(force || pmd_newpage(*pmd)){
  160. ret = add_munmap(addr, next - addr, ops,
  161. op_index, last_op, mmu,
  162. flush, do_ops);
  163. pmd_mkuptodate(*pmd);
  164. }
  165. }
  166. else ret = update_pte_range(pmd, addr, next, ops, last_op,
  167. op_index, force, mmu, flush,
  168. do_ops);
  169. } while (pmd++, addr = next, ((addr != end) && !ret));
  170. return ret;
  171. }
  172. static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
  173. unsigned long end, struct host_vm_op *ops,
  174. int last_op, int *op_index, int force,
  175. union mm_context *mmu, void **flush,
  176. int (*do_ops)(union mm_context *,
  177. struct host_vm_op *, int, int,
  178. void **))
  179. {
  180. pud_t *pud;
  181. unsigned long next;
  182. int ret = 0;
  183. pud = pud_offset(pgd, addr);
  184. do {
  185. next = pud_addr_end(addr, end);
  186. if(!pud_present(*pud)){
  187. if(force || pud_newpage(*pud)){
  188. ret = add_munmap(addr, next - addr, ops,
  189. op_index, last_op, mmu,
  190. flush, do_ops);
  191. pud_mkuptodate(*pud);
  192. }
  193. }
  194. else ret = update_pmd_range(pud, addr, next, ops, last_op,
  195. op_index, force, mmu, flush,
  196. do_ops);
  197. } while (pud++, addr = next, ((addr != end) && !ret));
  198. return ret;
  199. }
  200. void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
  201. unsigned long end_addr, int force,
  202. int (*do_ops)(union mm_context *, struct host_vm_op *,
  203. int, int, void **))
  204. {
  205. pgd_t *pgd;
  206. union mm_context *mmu = &mm->context;
  207. struct host_vm_op ops[1];
  208. unsigned long addr = start_addr, next;
  209. int ret = 0, last_op = ARRAY_SIZE(ops) - 1, op_index = -1;
  210. void *flush = NULL;
  211. ops[0].type = NONE;
  212. pgd = pgd_offset(mm, addr);
  213. do {
  214. next = pgd_addr_end(addr, end_addr);
  215. if(!pgd_present(*pgd)){
  216. if (force || pgd_newpage(*pgd)){
  217. ret = add_munmap(addr, next - addr, ops,
  218. &op_index, last_op, mmu,
  219. &flush, do_ops);
  220. pgd_mkuptodate(*pgd);
  221. }
  222. }
  223. else ret = update_pud_range(pgd, addr, next, ops, last_op,
  224. &op_index, force, mmu, &flush,
  225. do_ops);
  226. } while (pgd++, addr = next, ((addr != end_addr) && !ret));
  227. if(!ret)
  228. ret = (*do_ops)(mmu, ops, op_index, 1, &flush);
  229. /* This is not an else because ret is modified above */
  230. if(ret) {
  231. printk("fix_range_common: failed, killing current process\n");
  232. force_sig(SIGKILL, current);
  233. }
  234. }
  235. int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
  236. {
  237. struct mm_struct *mm;
  238. pgd_t *pgd;
  239. pud_t *pud;
  240. pmd_t *pmd;
  241. pte_t *pte;
  242. unsigned long addr, last;
  243. int updated = 0, err;
  244. mm = &init_mm;
  245. for(addr = start; addr < end;){
  246. pgd = pgd_offset(mm, addr);
  247. if(!pgd_present(*pgd)){
  248. last = ADD_ROUND(addr, PGDIR_SIZE);
  249. if(last > end)
  250. last = end;
  251. if(pgd_newpage(*pgd)){
  252. updated = 1;
  253. err = os_unmap_memory((void *) addr,
  254. last - addr);
  255. if(err < 0)
  256. panic("munmap failed, errno = %d\n",
  257. -err);
  258. }
  259. addr = last;
  260. continue;
  261. }
  262. pud = pud_offset(pgd, addr);
  263. if(!pud_present(*pud)){
  264. last = ADD_ROUND(addr, PUD_SIZE);
  265. if(last > end)
  266. last = end;
  267. if(pud_newpage(*pud)){
  268. updated = 1;
  269. err = os_unmap_memory((void *) addr,
  270. last - addr);
  271. if(err < 0)
  272. panic("munmap failed, errno = %d\n",
  273. -err);
  274. }
  275. addr = last;
  276. continue;
  277. }
  278. pmd = pmd_offset(pud, addr);
  279. if(!pmd_present(*pmd)){
  280. last = ADD_ROUND(addr, PMD_SIZE);
  281. if(last > end)
  282. last = end;
  283. if(pmd_newpage(*pmd)){
  284. updated = 1;
  285. err = os_unmap_memory((void *) addr,
  286. last - addr);
  287. if(err < 0)
  288. panic("munmap failed, errno = %d\n",
  289. -err);
  290. }
  291. addr = last;
  292. continue;
  293. }
  294. pte = pte_offset_kernel(pmd, addr);
  295. if(!pte_present(*pte) || pte_newpage(*pte)){
  296. updated = 1;
  297. err = os_unmap_memory((void *) addr,
  298. PAGE_SIZE);
  299. if(err < 0)
  300. panic("munmap failed, errno = %d\n",
  301. -err);
  302. if(pte_present(*pte))
  303. map_memory(addr,
  304. pte_val(*pte) & PAGE_MASK,
  305. PAGE_SIZE, 1, 1, 1);
  306. }
  307. else if(pte_newprot(*pte)){
  308. updated = 1;
  309. os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
  310. }
  311. addr += PAGE_SIZE;
  312. }
  313. return(updated);
  314. }
  315. pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
  316. {
  317. return(pgd_offset(mm, address));
  318. }
  319. pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
  320. {
  321. return(pud_offset(pgd, address));
  322. }
  323. pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
  324. {
  325. return(pmd_offset(pud, address));
  326. }
  327. pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
  328. {
  329. return(pte_offset_kernel(pmd, address));
  330. }
  331. pte_t *addr_pte(struct task_struct *task, unsigned long addr)
  332. {
  333. pgd_t *pgd = pgd_offset(task->mm, addr);
  334. pud_t *pud = pud_offset(pgd, addr);
  335. pmd_t *pmd = pmd_offset(pud, addr);
  336. return(pte_offset_map(pmd, addr));
  337. }
  338. void flush_tlb_all(void)
  339. {
  340. flush_tlb_mm(current->mm);
  341. }
  342. void flush_tlb_kernel_range(unsigned long start, unsigned long end)
  343. {
  344. CHOOSE_MODE_PROC(flush_tlb_kernel_range_tt,
  345. flush_tlb_kernel_range_common, start, end);
  346. }
  347. void flush_tlb_kernel_vm(void)
  348. {
  349. CHOOSE_MODE(flush_tlb_kernel_vm_tt(),
  350. flush_tlb_kernel_range_common(start_vm, end_vm));
  351. }
  352. void __flush_tlb_one(unsigned long addr)
  353. {
  354. CHOOSE_MODE_PROC(__flush_tlb_one_tt, __flush_tlb_one_skas, addr);
  355. }
  356. void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  357. unsigned long end)
  358. {
  359. CHOOSE_MODE_PROC(flush_tlb_range_tt, flush_tlb_range_skas, vma, start,
  360. end);
  361. }
  362. void flush_tlb_mm(struct mm_struct *mm)
  363. {
  364. CHOOSE_MODE_PROC(flush_tlb_mm_tt, flush_tlb_mm_skas, mm);
  365. }
  366. void force_flush_all(void)
  367. {
  368. CHOOSE_MODE(force_flush_all_tt(), force_flush_all_skas());
  369. }