mmu_context_hash64.c 2.3 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798
  1. /*
  2. * MMU context allocation for 64-bit kernels.
  3. *
  4. * Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. *
  11. */
  12. #include <linux/sched.h>
  13. #include <linux/kernel.h>
  14. #include <linux/errno.h>
  15. #include <linux/string.h>
  16. #include <linux/types.h>
  17. #include <linux/mm.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/idr.h>
  20. #include <linux/module.h>
  21. #include <asm/mmu_context.h>
  22. static DEFINE_SPINLOCK(mmu_context_lock);
  23. static DEFINE_IDR(mmu_context_idr);
  24. /*
  25. * The proto-VSID space has 2^35 - 1 segments available for user mappings.
  26. * Each segment contains 2^28 bytes. Each context maps 2^44 bytes,
  27. * so we can support 2^19-1 contexts (19 == 35 + 28 - 44).
  28. */
  29. #define NO_CONTEXT 0
  30. #define MAX_CONTEXT ((1UL << 19) - 1)
  31. int __init_new_context(void)
  32. {
  33. int index;
  34. int err;
  35. again:
  36. if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL))
  37. return -ENOMEM;
  38. spin_lock(&mmu_context_lock);
  39. err = idr_get_new_above(&mmu_context_idr, NULL, 1, &index);
  40. spin_unlock(&mmu_context_lock);
  41. if (err == -EAGAIN)
  42. goto again;
  43. else if (err)
  44. return err;
  45. if (index > MAX_CONTEXT) {
  46. spin_lock(&mmu_context_lock);
  47. idr_remove(&mmu_context_idr, index);
  48. spin_unlock(&mmu_context_lock);
  49. return -ENOMEM;
  50. }
  51. return index;
  52. }
  53. EXPORT_SYMBOL_GPL(__init_new_context);
  54. int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  55. {
  56. int index;
  57. index = __init_new_context();
  58. if (index < 0)
  59. return index;
  60. /* The old code would re-promote on fork, we don't do that
  61. * when using slices as it could cause problem promoting slices
  62. * that have been forced down to 4K
  63. */
  64. if (slice_mm_new_context(mm))
  65. slice_set_user_psize(mm, mmu_virtual_psize);
  66. subpage_prot_init_new_context(mm);
  67. mm->context.id = index;
  68. return 0;
  69. }
  70. void __destroy_context(int context_id)
  71. {
  72. spin_lock(&mmu_context_lock);
  73. idr_remove(&mmu_context_idr, context_id);
  74. spin_unlock(&mmu_context_lock);
  75. }
  76. EXPORT_SYMBOL_GPL(__destroy_context);
  77. void destroy_context(struct mm_struct *mm)
  78. {
  79. __destroy_context(mm->context.id);
  80. subpage_prot_free(mm);
  81. mm->context.id = NO_CONTEXT;
  82. }