__invalidate_icache.S 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. * A routine for synchronizing the instruction and data caches.
  14. * Useful for self-modifying code.
  15. *
  16. * r0 holds the buffer address
  17. * r1 holds the size in bytes
  18. */
  19. #include <arch/chip.h>
  20. #include <feedback.h>
  21. #if defined(__NEWLIB__) || defined(__BME__)
  22. #include <sys/page.h>
  23. #else
  24. #include <asm/page.h>
  25. #endif
  26. #ifdef __tilegx__
  27. /* Share code among Tile family chips but adjust opcodes appropriately. */
  28. #define slt cmpltu
  29. #define bbst blbst
  30. #define bnezt bnzt
  31. #endif
  32. #if defined(__tilegx__) && __SIZEOF_POINTER__ == 4
  33. /* Force 32-bit ops so pointers wrap around appropriately. */
  34. #define ADD_PTR addx
  35. #define ADDI_PTR addxi
  36. #else
  37. #define ADD_PTR add
  38. #define ADDI_PTR addi
  39. #endif
  40. .section .text.__invalidate_icache, "ax"
  41. .global __invalidate_icache
  42. .type __invalidate_icache,@function
  43. .hidden __invalidate_icache
  44. .align 8
  45. __invalidate_icache:
  46. FEEDBACK_ENTER(__invalidate_icache)
  47. {
  48. ADD_PTR r1, r0, r1 /* end of buffer */
  49. blez r1, .Lexit /* skip out if size <= 0 */
  50. }
  51. {
  52. ADDI_PTR r1, r1, -1 /* point to last byte to flush */
  53. andi r0, r0, -CHIP_L1I_LINE_SIZE() /* align to cache-line size */
  54. }
  55. {
  56. andi r1, r1, -CHIP_L1I_LINE_SIZE() /* last cache line to flush */
  57. mf
  58. }
  59. #if CHIP_L1I_CACHE_SIZE() > PAGE_SIZE
  60. {
  61. moveli r4, CHIP_L1I_CACHE_SIZE() / PAGE_SIZE /* loop counter */
  62. move r2, r0 /* remember starting address */
  63. }
  64. #endif
  65. drain
  66. {
  67. slt r3, r0, r1 /* set up loop invariant */
  68. #if CHIP_L1I_CACHE_SIZE() > PAGE_SIZE
  69. moveli r6, PAGE_SIZE
  70. #endif
  71. }
  72. .Lentry:
  73. {
  74. icoh r0
  75. ADDI_PTR r0, r0, CHIP_L1I_LINE_SIZE() /* advance buffer */
  76. }
  77. {
  78. slt r3, r0, r1 /* check if buffer < buffer + size */
  79. bbst r3, .Lentry /* loop if buffer < buffer + size */
  80. }
  81. #if CHIP_L1I_CACHE_SIZE() > PAGE_SIZE
  82. {
  83. ADD_PTR r2, r2, r6
  84. ADD_PTR r1, r1, r6
  85. }
  86. {
  87. move r0, r2
  88. addi r4, r4, -1
  89. }
  90. {
  91. slt r3, r0, r1 /* set up loop invariant */
  92. bnezt r4, .Lentry
  93. }
  94. #endif
  95. drain
  96. .Lexit:
  97. jrp lr
  98. .Lend___invalidate_icache:
  99. .size __invalidate_icache, \
  100. .Lend___invalidate_icache - __invalidate_icache