mmu_context_hash64.c 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899
  1. /*
  2. * MMU context allocation for 64-bit kernels.
  3. *
  4. * Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. *
  11. */
  12. #include <linux/sched.h>
  13. #include <linux/kernel.h>
  14. #include <linux/errno.h>
  15. #include <linux/string.h>
  16. #include <linux/types.h>
  17. #include <linux/mm.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/idr.h>
  20. #include <linux/module.h>
  21. #include <linux/gfp.h>
  22. #include <asm/mmu_context.h>
  23. static DEFINE_SPINLOCK(mmu_context_lock);
  24. static DEFINE_IDA(mmu_context_ida);
  25. /*
  26. * The proto-VSID space has 2^35 - 1 segments available for user mappings.
  27. * Each segment contains 2^28 bytes. Each context maps 2^44 bytes,
  28. * so we can support 2^19-1 contexts (19 == 35 + 28 - 44).
  29. */
  30. #define NO_CONTEXT 0
  31. #define MAX_CONTEXT ((1UL << 19) - 1)
  32. int __init_new_context(void)
  33. {
  34. int index;
  35. int err;
  36. again:
  37. if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
  38. return -ENOMEM;
  39. spin_lock(&mmu_context_lock);
  40. err = ida_get_new_above(&mmu_context_ida, 1, &index);
  41. spin_unlock(&mmu_context_lock);
  42. if (err == -EAGAIN)
  43. goto again;
  44. else if (err)
  45. return err;
  46. if (index > MAX_CONTEXT) {
  47. spin_lock(&mmu_context_lock);
  48. ida_remove(&mmu_context_ida, index);
  49. spin_unlock(&mmu_context_lock);
  50. return -ENOMEM;
  51. }
  52. return index;
  53. }
  54. EXPORT_SYMBOL_GPL(__init_new_context);
  55. int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  56. {
  57. int index;
  58. index = __init_new_context();
  59. if (index < 0)
  60. return index;
  61. /* The old code would re-promote on fork, we don't do that
  62. * when using slices as it could cause problem promoting slices
  63. * that have been forced down to 4K
  64. */
  65. if (slice_mm_new_context(mm))
  66. slice_set_user_psize(mm, mmu_virtual_psize);
  67. subpage_prot_init_new_context(mm);
  68. mm->context.id = index;
  69. return 0;
  70. }
  71. void __destroy_context(int context_id)
  72. {
  73. spin_lock(&mmu_context_lock);
  74. ida_remove(&mmu_context_ida, context_id);
  75. spin_unlock(&mmu_context_lock);
  76. }
  77. EXPORT_SYMBOL_GPL(__destroy_context);
  78. void destroy_context(struct mm_struct *mm)
  79. {
  80. __destroy_context(mm->context.id);
  81. subpage_prot_free(mm);
  82. mm->context.id = NO_CONTEXT;
  83. }