raid1.h 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141
  1. #ifndef _RAID1_H
  2. #define _RAID1_H
  3. typedef struct mirror_info mirror_info_t;
  4. struct mirror_info {
  5. mdk_rdev_t *rdev;
  6. sector_t head_position;
  7. };
  8. /*
  9. * memory pools need a pointer to the mddev, so they can force an unplug
  10. * when memory is tight, and a count of the number of drives that the
  11. * pool was allocated for, so they know how much to allocate and free.
  12. * mddev->raid_disks cannot be used, as it can change while a pool is active
  13. * These two datums are stored in a kmalloced struct.
  14. */
  15. struct pool_info {
  16. mddev_t *mddev;
  17. int raid_disks;
  18. };
  19. typedef struct r1bio_s r1bio_t;
  20. struct r1_private_data_s {
  21. mddev_t *mddev;
  22. mirror_info_t *mirrors;
  23. int raid_disks;
  24. int last_used;
  25. sector_t next_seq_sect;
  26. spinlock_t device_lock;
  27. struct list_head retry_list;
  28. /* queue pending writes and submit them on unplug */
  29. struct bio_list pending_bio_list;
  30. /* for use when syncing mirrors: */
  31. spinlock_t resync_lock;
  32. int nr_pending;
  33. int nr_waiting;
  34. int nr_queued;
  35. int barrier;
  36. sector_t next_resync;
  37. int fullsync; /* set to 1 if a full sync is needed,
  38. * (fresh device added).
  39. * Cleared when a sync completes.
  40. */
  41. int recovery_disabled; /* when the same as
  42. * mddev->recovery_disabled
  43. * we don't allow recovery
  44. * to be attempted as we
  45. * expect a read error
  46. */
  47. wait_queue_head_t wait_barrier;
  48. struct pool_info *poolinfo;
  49. struct page *tmppage;
  50. mempool_t *r1bio_pool;
  51. mempool_t *r1buf_pool;
  52. /* When taking over an array from a different personality, we store
  53. * the new thread here until we fully activate the array.
  54. */
  55. struct mdk_thread_s *thread;
  56. };
  57. typedef struct r1_private_data_s conf_t;
  58. /*
  59. * this is our 'private' RAID1 bio.
  60. *
  61. * it contains information about what kind of IO operations were started
  62. * for this RAID1 operation, and about their status:
  63. */
  64. struct r1bio_s {
  65. atomic_t remaining; /* 'have we finished' count,
  66. * used from IRQ handlers
  67. */
  68. atomic_t behind_remaining; /* number of write-behind ios remaining
  69. * in this BehindIO request
  70. */
  71. sector_t sector;
  72. int sectors;
  73. unsigned long state;
  74. mddev_t *mddev;
  75. /*
  76. * original bio going to /dev/mdx
  77. */
  78. struct bio *master_bio;
  79. /*
  80. * if the IO is in READ direction, then this is where we read
  81. */
  82. int read_disk;
  83. struct list_head retry_list;
  84. /* Next two are only valid when R1BIO_BehindIO is set */
  85. struct page **behind_pages;
  86. int behind_page_count;
  87. /*
  88. * if the IO is in WRITE direction, then multiple bios are used.
  89. * We choose the number when they are allocated.
  90. */
  91. struct bio *bios[0];
  92. /* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/
  93. };
  94. /* when we get a read error on a read-only array, we redirect to another
  95. * device without failing the first device, or trying to over-write to
  96. * correct the read error. To keep track of bad blocks on a per-bio
  97. * level, we store IO_BLOCKED in the appropriate 'bios' pointer
  98. */
  99. #define IO_BLOCKED ((struct bio*)1)
  100. /* bits for r1bio.state */
  101. #define R1BIO_Uptodate 0
  102. #define R1BIO_IsSync 1
  103. #define R1BIO_Degraded 2
  104. #define R1BIO_BehindIO 3
  105. /* Set ReadError on bios that experience a readerror so that
  106. * raid1d knows what to do with them.
  107. */
  108. #define R1BIO_ReadError 4
  109. /* For write-behind requests, we call bi_end_io when
  110. * the last non-write-behind device completes, providing
  111. * any write was successful. Otherwise we call when
  112. * any write-behind write succeeds, otherwise we call
  113. * with failure when last write completes (and all failed).
  114. * Record that bi_end_io was called with this flag...
  115. */
  116. #define R1BIO_Returned 6
  117. extern int md_raid1_congested(mddev_t *mddev, int bits);
  118. #endif