pgtable.c 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117
  1. /*
  2. * This file contains common routines for dealing with free of page tables
  3. *
  4. * Derived from arch/powerpc/mm/tlb_64.c:
  5. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  6. *
  7. * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  8. * and Cort Dougan (PReP) (cort@cs.nmt.edu)
  9. * Copyright (C) 1996 Paul Mackerras
  10. *
  11. * Derived from "arch/i386/mm/init.c"
  12. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  13. *
  14. * Dave Engebretsen <engebret@us.ibm.com>
  15. * Rework for PPC64 port.
  16. *
  17. * This program is free software; you can redistribute it and/or
  18. * modify it under the terms of the GNU General Public License
  19. * as published by the Free Software Foundation; either version
  20. * 2 of the License, or (at your option) any later version.
  21. */
  22. #include <linux/kernel.h>
  23. #include <linux/mm.h>
  24. #include <linux/init.h>
  25. #include <linux/percpu.h>
  26. #include <linux/hardirq.h>
  27. #include <asm/pgalloc.h>
  28. #include <asm/tlbflush.h>
  29. #include <asm/tlb.h>
  30. static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
  31. static unsigned long pte_freelist_forced_free;
  32. struct pte_freelist_batch
  33. {
  34. struct rcu_head rcu;
  35. unsigned int index;
  36. pgtable_free_t tables[0];
  37. };
  38. #define PTE_FREELIST_SIZE \
  39. ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
  40. / sizeof(pgtable_free_t))
  41. static void pte_free_smp_sync(void *arg)
  42. {
  43. /* Do nothing, just ensure we sync with all CPUs */
  44. }
  45. /* This is only called when we are critically out of memory
  46. * (and fail to get a page in pte_free_tlb).
  47. */
  48. static void pgtable_free_now(pgtable_free_t pgf)
  49. {
  50. pte_freelist_forced_free++;
  51. smp_call_function(pte_free_smp_sync, NULL, 1);
  52. pgtable_free(pgf);
  53. }
  54. static void pte_free_rcu_callback(struct rcu_head *head)
  55. {
  56. struct pte_freelist_batch *batch =
  57. container_of(head, struct pte_freelist_batch, rcu);
  58. unsigned int i;
  59. for (i = 0; i < batch->index; i++)
  60. pgtable_free(batch->tables[i]);
  61. free_page((unsigned long)batch);
  62. }
  63. static void pte_free_submit(struct pte_freelist_batch *batch)
  64. {
  65. INIT_RCU_HEAD(&batch->rcu);
  66. call_rcu(&batch->rcu, pte_free_rcu_callback);
  67. }
  68. void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
  69. {
  70. /* This is safe since tlb_gather_mmu has disabled preemption */
  71. cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
  72. struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
  73. if (atomic_read(&tlb->mm->mm_users) < 2 ||
  74. cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
  75. pgtable_free(pgf);
  76. return;
  77. }
  78. if (*batchp == NULL) {
  79. *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
  80. if (*batchp == NULL) {
  81. pgtable_free_now(pgf);
  82. return;
  83. }
  84. (*batchp)->index = 0;
  85. }
  86. (*batchp)->tables[(*batchp)->index++] = pgf;
  87. if ((*batchp)->index == PTE_FREELIST_SIZE) {
  88. pte_free_submit(*batchp);
  89. *batchp = NULL;
  90. }
  91. }
  92. void pte_free_finish(void)
  93. {
  94. /* This is safe since tlb_gather_mmu has disabled preemption */
  95. struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
  96. if (*batchp == NULL)
  97. return;
  98. pte_free_submit(*batchp);
  99. *batchp = NULL;
  100. }