raid1.h 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178
  1. #ifndef _RAID1_H
  2. #define _RAID1_H
  3. typedef struct mirror_info mirror_info_t;
  4. struct mirror_info {
  5. struct md_rdev *rdev;
  6. sector_t head_position;
  7. };
  8. /*
  9. * memory pools need a pointer to the mddev, so they can force an unplug
  10. * when memory is tight, and a count of the number of drives that the
  11. * pool was allocated for, so they know how much to allocate and free.
  12. * mddev->raid_disks cannot be used, as it can change while a pool is active
  13. * These two datums are stored in a kmalloced struct.
  14. */
  15. struct pool_info {
  16. struct mddev *mddev;
  17. int raid_disks;
  18. };
  19. typedef struct r1bio_s r1bio_t;
  20. struct r1_private_data_s {
  21. struct mddev *mddev;
  22. mirror_info_t *mirrors;
  23. int raid_disks;
  24. /* When choose the best device for a read (read_balance())
  25. * we try to keep sequential reads one the same device
  26. * using 'last_used' and 'next_seq_sect'
  27. */
  28. int last_used;
  29. sector_t next_seq_sect;
  30. /* During resync, read_balancing is only allowed on the part
  31. * of the array that has been resynced. 'next_resync' tells us
  32. * where that is.
  33. */
  34. sector_t next_resync;
  35. spinlock_t device_lock;
  36. /* list of 'r1bio_t' that need to be processed by raid1d, whether
  37. * to retry a read, writeout a resync or recovery block, or
  38. * anything else.
  39. */
  40. struct list_head retry_list;
  41. /* queue pending writes to be submitted on unplug */
  42. struct bio_list pending_bio_list;
  43. /* for use when syncing mirrors:
  44. * We don't allow both normal IO and resync/recovery IO at
  45. * the same time - resync/recovery can only happen when there
  46. * is no other IO. So when either is active, the other has to wait.
  47. * See more details description in raid1.c near raise_barrier().
  48. */
  49. wait_queue_head_t wait_barrier;
  50. spinlock_t resync_lock;
  51. int nr_pending;
  52. int nr_waiting;
  53. int nr_queued;
  54. int barrier;
  55. /* Set to 1 if a full sync is needed, (fresh device added).
  56. * Cleared when a sync completes.
  57. */
  58. int fullsync;
  59. /* When the same as mddev->recovery_disabled we don't allow
  60. * recovery to be attempted as we expect a read error.
  61. */
  62. int recovery_disabled;
  63. /* poolinfo contains information about the content of the
  64. * mempools - it changes when the array grows or shrinks
  65. */
  66. struct pool_info *poolinfo;
  67. mempool_t *r1bio_pool;
  68. mempool_t *r1buf_pool;
  69. /* temporary buffer to synchronous IO when attempting to repair
  70. * a read error.
  71. */
  72. struct page *tmppage;
  73. /* When taking over an array from a different personality, we store
  74. * the new thread here until we fully activate the array.
  75. */
  76. struct md_thread *thread;
  77. };
  78. typedef struct r1_private_data_s conf_t;
  79. /*
  80. * this is our 'private' RAID1 bio.
  81. *
  82. * it contains information about what kind of IO operations were started
  83. * for this RAID1 operation, and about their status:
  84. */
  85. struct r1bio_s {
  86. atomic_t remaining; /* 'have we finished' count,
  87. * used from IRQ handlers
  88. */
  89. atomic_t behind_remaining; /* number of write-behind ios remaining
  90. * in this BehindIO request
  91. */
  92. sector_t sector;
  93. int sectors;
  94. unsigned long state;
  95. struct mddev *mddev;
  96. /*
  97. * original bio going to /dev/mdx
  98. */
  99. struct bio *master_bio;
  100. /*
  101. * if the IO is in READ direction, then this is where we read
  102. */
  103. int read_disk;
  104. struct list_head retry_list;
  105. /* Next two are only valid when R1BIO_BehindIO is set */
  106. struct bio_vec *behind_bvecs;
  107. int behind_page_count;
  108. /*
  109. * if the IO is in WRITE direction, then multiple bios are used.
  110. * We choose the number when they are allocated.
  111. */
  112. struct bio *bios[0];
  113. /* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/
  114. };
  115. /* when we get a read error on a read-only array, we redirect to another
  116. * device without failing the first device, or trying to over-write to
  117. * correct the read error. To keep track of bad blocks on a per-bio
  118. * level, we store IO_BLOCKED in the appropriate 'bios' pointer
  119. */
  120. #define IO_BLOCKED ((struct bio *)1)
  121. /* When we successfully write to a known bad-block, we need to remove the
  122. * bad-block marking which must be done from process context. So we record
  123. * the success by setting bios[n] to IO_MADE_GOOD
  124. */
  125. #define IO_MADE_GOOD ((struct bio *)2)
  126. #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
  127. /* bits for r1bio.state */
  128. #define R1BIO_Uptodate 0
  129. #define R1BIO_IsSync 1
  130. #define R1BIO_Degraded 2
  131. #define R1BIO_BehindIO 3
  132. /* Set ReadError on bios that experience a readerror so that
  133. * raid1d knows what to do with them.
  134. */
  135. #define R1BIO_ReadError 4
  136. /* For write-behind requests, we call bi_end_io when
  137. * the last non-write-behind device completes, providing
  138. * any write was successful. Otherwise we call when
  139. * any write-behind write succeeds, otherwise we call
  140. * with failure when last write completes (and all failed).
  141. * Record that bi_end_io was called with this flag...
  142. */
  143. #define R1BIO_Returned 6
  144. /* If a write for this request means we can clear some
  145. * known-bad-block records, we set this flag
  146. */
  147. #define R1BIO_MadeGood 7
  148. #define R1BIO_WriteError 8
  149. extern int md_raid1_congested(struct mddev *mddev, int bits);
  150. #endif