blockgroup_lock.h 1.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960
  1. #ifndef _LINUX_BLOCKGROUP_LOCK_H
  2. #define _LINUX_BLOCKGROUP_LOCK_H
  3. /*
  4. * Per-blockgroup locking for ext2 and ext3.
  5. *
  6. * Simple hashed spinlocking.
  7. */
  8. #include <linux/config.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/cache.h>
  11. #ifdef CONFIG_SMP
  12. /*
  13. * We want a power-of-two. Is there a better way than this?
  14. */
  15. #if NR_CPUS >= 32
  16. #define NR_BG_LOCKS 128
  17. #elif NR_CPUS >= 16
  18. #define NR_BG_LOCKS 64
  19. #elif NR_CPUS >= 8
  20. #define NR_BG_LOCKS 32
  21. #elif NR_CPUS >= 4
  22. #define NR_BG_LOCKS 16
  23. #elif NR_CPUS >= 2
  24. #define NR_BG_LOCKS 8
  25. #else
  26. #define NR_BG_LOCKS 4
  27. #endif
  28. #else /* CONFIG_SMP */
  29. #define NR_BG_LOCKS 1
  30. #endif /* CONFIG_SMP */
  31. struct bgl_lock {
  32. spinlock_t lock;
  33. } ____cacheline_aligned_in_smp;
  34. struct blockgroup_lock {
  35. struct bgl_lock locks[NR_BG_LOCKS];
  36. };
  37. static inline void bgl_lock_init(struct blockgroup_lock *bgl)
  38. {
  39. int i;
  40. for (i = 0; i < NR_BG_LOCKS; i++)
  41. spin_lock_init(&bgl->locks[i].lock);
  42. }
  43. /*
  44. * The accessor is a macro so we can embed a blockgroup_lock into different
  45. * superblock types
  46. */
  47. #define sb_bgl_lock(sb, block_group) \
  48. (&(sb)->s_blockgroup_lock.locks[(block_group) & (NR_BG_LOCKS-1)].lock)
  49. #endif