vmlinux.lds.h 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105
  1. #ifndef LOAD_OFFSET
  2. #define LOAD_OFFSET 0
  3. #endif
  4. #ifndef VMLINUX_SYMBOL
  5. #define VMLINUX_SYMBOL(_sym_) _sym_
  6. #endif
  7. /* Align . to a 8 byte boundary equals to maximum function alignment. */
  8. #define ALIGN_FUNCTION() . = ALIGN(8)
  9. #define RODATA \
  10. .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
  11. *(.rodata) *(.rodata.*) \
  12. *(__vermagic) /* Kernel version magic */ \
  13. } \
  14. \
  15. .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
  16. *(.rodata1) \
  17. } \
  18. \
  19. /* PCI quirks */ \
  20. .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
  21. VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
  22. *(.pci_fixup_early) \
  23. VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
  24. VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
  25. *(.pci_fixup_header) \
  26. VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
  27. VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
  28. *(.pci_fixup_final) \
  29. VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
  30. VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
  31. *(.pci_fixup_enable) \
  32. VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
  33. } \
  34. \
  35. /* Kernel symbol table: Normal symbols */ \
  36. __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
  37. VMLINUX_SYMBOL(__start___ksymtab) = .; \
  38. *(__ksymtab) \
  39. VMLINUX_SYMBOL(__stop___ksymtab) = .; \
  40. } \
  41. \
  42. /* Kernel symbol table: GPL-only symbols */ \
  43. __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
  44. VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
  45. *(__ksymtab_gpl) \
  46. VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
  47. } \
  48. \
  49. /* Kernel symbol table: Normal symbols */ \
  50. __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
  51. VMLINUX_SYMBOL(__start___kcrctab) = .; \
  52. *(__kcrctab) \
  53. VMLINUX_SYMBOL(__stop___kcrctab) = .; \
  54. } \
  55. \
  56. /* Kernel symbol table: GPL-only symbols */ \
  57. __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
  58. VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
  59. *(__kcrctab_gpl) \
  60. VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
  61. } \
  62. \
  63. /* Kernel symbol table: strings */ \
  64. __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
  65. *(__ksymtab_strings) \
  66. } \
  67. \
  68. /* Built-in module parameters. */ \
  69. __param : AT(ADDR(__param) - LOAD_OFFSET) { \
  70. VMLINUX_SYMBOL(__start___param) = .; \
  71. *(__param) \
  72. VMLINUX_SYMBOL(__stop___param) = .; \
  73. }
  74. #define SECURITY_INIT \
  75. .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
  76. VMLINUX_SYMBOL(__security_initcall_start) = .; \
  77. *(.security_initcall.init) \
  78. VMLINUX_SYMBOL(__security_initcall_end) = .; \
  79. }
  80. /* sched.text is aling to function alignment to secure we have same
  81. * address even at second ld pass when generating System.map */
  82. #define SCHED_TEXT \
  83. ALIGN_FUNCTION(); \
  84. VMLINUX_SYMBOL(__sched_text_start) = .; \
  85. *(.sched.text) \
  86. VMLINUX_SYMBOL(__sched_text_end) = .;
  87. /* spinlock.text is aling to function alignment to secure we have same
  88. * address even at second ld pass when generating System.map */
  89. #define LOCK_TEXT \
  90. ALIGN_FUNCTION(); \
  91. VMLINUX_SYMBOL(__lock_text_start) = .; \
  92. *(.spinlock.text) \
  93. VMLINUX_SYMBOL(__lock_text_end) = .;
  94. #define KPROBES_TEXT \
  95. ALIGN_FUNCTION(); \
  96. VMLINUX_SYMBOL(__kprobes_text_start) = .; \
  97. *(.kprobes.text) \
  98. VMLINUX_SYMBOL(__kprobes_text_end) = .;