mmu_context_hash64.c 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. /*
  2. * MMU context allocation for 64-bit kernels.
  3. *
  4. * Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. *
  11. */
  12. #include <linux/sched.h>
  13. #include <linux/kernel.h>
  14. #include <linux/errno.h>
  15. #include <linux/string.h>
  16. #include <linux/types.h>
  17. #include <linux/mm.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/idr.h>
  20. #include <linux/export.h>
  21. #include <linux/gfp.h>
  22. #include <linux/slab.h>
  23. #include <asm/mmu_context.h>
  24. #include "icswx.h"
  25. static DEFINE_SPINLOCK(mmu_context_lock);
  26. static DEFINE_IDA(mmu_context_ida);
  27. /*
  28. * The proto-VSID space has 2^35 - 1 segments available for user mappings.
  29. * Each segment contains 2^28 bytes. Each context maps 2^44 bytes,
  30. * so we can support 2^19-1 contexts (19 == 35 + 28 - 44).
  31. */
  32. #define MAX_CONTEXT ((1UL << 19) - 1)
  33. int __init_new_context(void)
  34. {
  35. int index;
  36. int err;
  37. again:
  38. if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
  39. return -ENOMEM;
  40. spin_lock(&mmu_context_lock);
  41. err = ida_get_new_above(&mmu_context_ida, 1, &index);
  42. spin_unlock(&mmu_context_lock);
  43. if (err == -EAGAIN)
  44. goto again;
  45. else if (err)
  46. return err;
  47. if (index > MAX_CONTEXT) {
  48. spin_lock(&mmu_context_lock);
  49. ida_remove(&mmu_context_ida, index);
  50. spin_unlock(&mmu_context_lock);
  51. return -ENOMEM;
  52. }
  53. return index;
  54. }
  55. EXPORT_SYMBOL_GPL(__init_new_context);
  56. int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  57. {
  58. int index;
  59. index = __init_new_context();
  60. if (index < 0)
  61. return index;
  62. /* The old code would re-promote on fork, we don't do that
  63. * when using slices as it could cause problem promoting slices
  64. * that have been forced down to 4K
  65. */
  66. if (slice_mm_new_context(mm))
  67. slice_set_user_psize(mm, mmu_virtual_psize);
  68. subpage_prot_init_new_context(mm);
  69. mm->context.id = index;
  70. #ifdef CONFIG_PPC_ICSWX
  71. mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
  72. if (!mm->context.cop_lockp) {
  73. __destroy_context(index);
  74. subpage_prot_free(mm);
  75. mm->context.id = MMU_NO_CONTEXT;
  76. return -ENOMEM;
  77. }
  78. spin_lock_init(mm->context.cop_lockp);
  79. #endif /* CONFIG_PPC_ICSWX */
  80. return 0;
  81. }
  82. void __destroy_context(int context_id)
  83. {
  84. spin_lock(&mmu_context_lock);
  85. ida_remove(&mmu_context_ida, context_id);
  86. spin_unlock(&mmu_context_lock);
  87. }
  88. EXPORT_SYMBOL_GPL(__destroy_context);
  89. void destroy_context(struct mm_struct *mm)
  90. {
  91. #ifdef CONFIG_PPC_ICSWX
  92. drop_cop(mm->context.acop, mm);
  93. kfree(mm->context.cop_lockp);
  94. mm->context.cop_lockp = NULL;
  95. #endif /* CONFIG_PPC_ICSWX */
  96. __destroy_context(mm->context.id);
  97. subpage_prot_free(mm);
  98. mm->context.id = MMU_NO_CONTEXT;
  99. }