copypage-xsc3.c 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. /*
  2. * linux/arch/arm/mm/copypage-xsc3.S
  3. *
  4. * Copyright (C) 2004 Intel Corp.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * Adapted for 3rd gen XScale core, no more mini-dcache
  11. * Author: Matt Gilbert (matthew.m.gilbert@intel.com)
  12. */
  13. #include <linux/init.h>
  14. #include <linux/highmem.h>
  15. /*
  16. * General note:
  17. * We don't really want write-allocate cache behaviour for these functions
  18. * since that will just eat through 8K of the cache.
  19. */
  20. /*
  21. * XSC3 optimised copy_user_highpage
  22. * r0 = destination
  23. * r1 = source
  24. *
  25. * The source page may have some clean entries in the cache already, but we
  26. * can safely ignore them - break_cow() will flush them out of the cache
  27. * if we eventually end up using our copied page.
  28. *
  29. */
  30. static void __naked
  31. xsc3_mc_copy_user_page(void *kto, const void *kfrom)
  32. {
  33. asm("\
  34. stmfd sp!, {r4, r5, lr} \n\
  35. mov lr, %0 \n\
  36. \n\
  37. pld [r1, #0] \n\
  38. pld [r1, #32] \n\
  39. 1: pld [r1, #64] \n\
  40. pld [r1, #96] \n\
  41. \n\
  42. 2: ldrd r2, [r1], #8 \n\
  43. mov ip, r0 \n\
  44. ldrd r4, [r1], #8 \n\
  45. mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\
  46. strd r2, [r0], #8 \n\
  47. ldrd r2, [r1], #8 \n\
  48. strd r4, [r0], #8 \n\
  49. ldrd r4, [r1], #8 \n\
  50. strd r2, [r0], #8 \n\
  51. strd r4, [r0], #8 \n\
  52. ldrd r2, [r1], #8 \n\
  53. mov ip, r0 \n\
  54. ldrd r4, [r1], #8 \n\
  55. mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\
  56. strd r2, [r0], #8 \n\
  57. ldrd r2, [r1], #8 \n\
  58. subs lr, lr, #1 \n\
  59. strd r4, [r0], #8 \n\
  60. ldrd r4, [r1], #8 \n\
  61. strd r2, [r0], #8 \n\
  62. strd r4, [r0], #8 \n\
  63. bgt 1b \n\
  64. beq 2b \n\
  65. \n\
  66. ldmfd sp!, {r4, r5, pc}"
  67. :
  68. : "I" (PAGE_SIZE / 64 - 1));
  69. }
  70. void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
  71. unsigned long vaddr)
  72. {
  73. void *kto, *kfrom;
  74. kto = kmap_atomic(to, KM_USER0);
  75. kfrom = kmap_atomic(from, KM_USER1);
  76. xsc3_mc_copy_user_page(kto, kfrom);
  77. kunmap_atomic(kfrom, KM_USER1);
  78. kunmap_atomic(kto, KM_USER0);
  79. }
  80. /*
  81. * XScale optimised clear_user_page
  82. * r0 = destination
  83. * r1 = virtual user address of ultimate destination page
  84. */
  85. void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
  86. {
  87. void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
  88. asm volatile ("\
  89. mov r1, %2 \n\
  90. mov r2, #0 \n\
  91. mov r3, #0 \n\
  92. 1: mcr p15, 0, %0, c7, c6, 1 @ invalidate line\n\
  93. strd r2, [%0], #8 \n\
  94. strd r2, [%0], #8 \n\
  95. strd r2, [%0], #8 \n\
  96. strd r2, [%0], #8 \n\
  97. subs r1, r1, #1 \n\
  98. bne 1b"
  99. : "=r" (ptr)
  100. : "0" (kaddr), "I" (PAGE_SIZE / 32)
  101. : "r1", "r2", "r3");
  102. kunmap_atomic(kaddr, KM_USER0);
  103. }
  104. struct cpu_user_fns xsc3_mc_user_fns __initdata = {
  105. .cpu_clear_user_highpage = xsc3_mc_clear_user_highpage,
  106. .cpu_copy_user_highpage = xsc3_mc_copy_user_highpage,
  107. };