suspend.c 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106
  1. #include <linux/init.h>
  2. #include <linux/slab.h>
  3. #include <asm/cacheflush.h>
  4. #include <asm/idmap.h>
  5. #include <asm/pgalloc.h>
  6. #include <asm/pgtable.h>
  7. #include <asm/memory.h>
  8. #include <asm/smp_plat.h>
  9. #include <asm/suspend.h>
  10. #include <asm/tlbflush.h>
  11. extern int __cpu_suspend(unsigned long, int (*)(unsigned long), u32 cpuid);
  12. extern void cpu_resume_mmu(void);
  13. #ifdef CONFIG_MMU
  14. /*
  15. * Hide the first two arguments to __cpu_suspend - these are an implementation
  16. * detail which platform code shouldn't have to know about.
  17. */
  18. int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
  19. {
  20. struct mm_struct *mm = current->active_mm;
  21. u32 __mpidr = cpu_logical_map(smp_processor_id());
  22. int ret;
  23. if (!idmap_pgd)
  24. return -EINVAL;
  25. /*
  26. * Provide a temporary page table with an identity mapping for
  27. * the MMU-enable code, required for resuming. On successful
  28. * resume (indicated by a zero return code), we need to switch
  29. * back to the correct page tables.
  30. */
  31. ret = __cpu_suspend(arg, fn, __mpidr);
  32. if (ret == 0) {
  33. cpu_switch_mm(mm->pgd, mm);
  34. local_flush_bp_all();
  35. local_flush_tlb_all();
  36. }
  37. return ret;
  38. }
  39. #else
  40. int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
  41. {
  42. u32 __mpidr = cpu_logical_map(smp_processor_id());
  43. return __cpu_suspend(arg, fn, __mpidr);
  44. }
  45. #define idmap_pgd NULL
  46. #endif
  47. /*
  48. * This is called by __cpu_suspend() to save the state, and do whatever
  49. * flushing is required to ensure that when the CPU goes to sleep we have
  50. * the necessary data available when the caches are not searched.
  51. */
  52. void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr)
  53. {
  54. u32 *ctx = ptr;
  55. *save_ptr = virt_to_phys(ptr);
  56. /* This must correspond to the LDM in cpu_resume() assembly */
  57. *ptr++ = virt_to_phys(idmap_pgd);
  58. *ptr++ = sp;
  59. *ptr++ = virt_to_phys(cpu_do_resume);
  60. cpu_do_suspend(ptr);
  61. flush_cache_louis();
  62. /*
  63. * flush_cache_louis does not guarantee that
  64. * save_ptr and ptr are cleaned to main memory,
  65. * just up to the Level of Unification Inner Shareable.
  66. * Since the context pointer and context itself
  67. * are to be retrieved with the MMU off that
  68. * data must be cleaned from all cache levels
  69. * to main memory using "area" cache primitives.
  70. */
  71. __cpuc_flush_dcache_area(ctx, ptrsz);
  72. __cpuc_flush_dcache_area(save_ptr, sizeof(*save_ptr));
  73. outer_clean_range(*save_ptr, *save_ptr + ptrsz);
  74. outer_clean_range(virt_to_phys(save_ptr),
  75. virt_to_phys(save_ptr) + sizeof(*save_ptr));
  76. }
  77. extern struct sleep_save_sp sleep_save_sp;
  78. static int cpu_suspend_alloc_sp(void)
  79. {
  80. void *ctx_ptr;
  81. /* ctx_ptr is an array of physical addresses */
  82. ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(u32), GFP_KERNEL);
  83. if (WARN_ON(!ctx_ptr))
  84. return -ENOMEM;
  85. sleep_save_sp.save_ptr_stash = ctx_ptr;
  86. sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr);
  87. sync_cache_w(&sleep_save_sp);
  88. return 0;
  89. }
  90. early_initcall(cpu_suspend_alloc_sp);