heartbeat.c 48 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867
  1. /* -*- mode: c; c-basic-offset: 8; -*-
  2. * vim: noexpandtab sw=8 ts=8 sts=0:
  3. *
  4. * Copyright (C) 2004, 2005 Oracle. All rights reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2 of the License, or (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public
  17. * License along with this program; if not, write to the
  18. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  19. * Boston, MA 021110-1307, USA.
  20. */
  21. #include <linux/kernel.h>
  22. #include <linux/sched.h>
  23. #include <linux/jiffies.h>
  24. #include <linux/module.h>
  25. #include <linux/fs.h>
  26. #include <linux/bio.h>
  27. #include <linux/blkdev.h>
  28. #include <linux/delay.h>
  29. #include <linux/file.h>
  30. #include <linux/kthread.h>
  31. #include <linux/configfs.h>
  32. #include <linux/random.h>
  33. #include <linux/crc32.h>
  34. #include <linux/time.h>
  35. #include "heartbeat.h"
  36. #include "tcp.h"
  37. #include "nodemanager.h"
  38. #include "quorum.h"
  39. #include "masklog.h"
  40. /*
  41. * The first heartbeat pass had one global thread that would serialize all hb
  42. * callback calls. This global serializing sem should only be removed once
  43. * we've made sure that all callees can deal with being called concurrently
  44. * from multiple hb region threads.
  45. */
  46. static DECLARE_RWSEM(o2hb_callback_sem);
  47. /*
  48. * multiple hb threads are watching multiple regions. A node is live
  49. * whenever any of the threads sees activity from the node in its region.
  50. */
  51. static DEFINE_SPINLOCK(o2hb_live_lock);
  52. static struct list_head o2hb_live_slots[O2NM_MAX_NODES];
  53. static unsigned long o2hb_live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
  54. static LIST_HEAD(o2hb_node_events);
  55. static DECLARE_WAIT_QUEUE_HEAD(o2hb_steady_queue);
  56. static LIST_HEAD(o2hb_all_regions);
  57. static struct o2hb_callback {
  58. struct list_head list;
  59. } o2hb_callbacks[O2HB_NUM_CB];
  60. static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type);
  61. #define O2HB_DEFAULT_BLOCK_BITS 9
  62. unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD;
  63. /* Only sets a new threshold if there are no active regions.
  64. *
  65. * No locking or otherwise interesting code is required for reading
  66. * o2hb_dead_threshold as it can't change once regions are active and
  67. * it's not interesting to anyone until then anyway. */
  68. static void o2hb_dead_threshold_set(unsigned int threshold)
  69. {
  70. if (threshold > O2HB_MIN_DEAD_THRESHOLD) {
  71. spin_lock(&o2hb_live_lock);
  72. if (list_empty(&o2hb_all_regions))
  73. o2hb_dead_threshold = threshold;
  74. spin_unlock(&o2hb_live_lock);
  75. }
  76. }
  77. struct o2hb_node_event {
  78. struct list_head hn_item;
  79. enum o2hb_callback_type hn_event_type;
  80. struct o2nm_node *hn_node;
  81. int hn_node_num;
  82. };
  83. struct o2hb_disk_slot {
  84. struct o2hb_disk_heartbeat_block *ds_raw_block;
  85. u8 ds_node_num;
  86. u64 ds_last_time;
  87. u64 ds_last_generation;
  88. u16 ds_equal_samples;
  89. u16 ds_changed_samples;
  90. struct list_head ds_live_item;
  91. };
  92. /* each thread owns a region.. when we're asked to tear down the region
  93. * we ask the thread to stop, who cleans up the region */
  94. struct o2hb_region {
  95. struct config_item hr_item;
  96. struct list_head hr_all_item;
  97. unsigned hr_unclean_stop:1;
  98. /* protected by the hr_callback_sem */
  99. struct task_struct *hr_task;
  100. unsigned int hr_blocks;
  101. unsigned long long hr_start_block;
  102. unsigned int hr_block_bits;
  103. unsigned int hr_block_bytes;
  104. unsigned int hr_slots_per_page;
  105. unsigned int hr_num_pages;
  106. struct page **hr_slot_data;
  107. struct block_device *hr_bdev;
  108. struct o2hb_disk_slot *hr_slots;
  109. /* let the person setting up hb wait for it to return until it
  110. * has reached a 'steady' state. This will be fixed when we have
  111. * a more complete api that doesn't lead to this sort of fragility. */
  112. atomic_t hr_steady_iterations;
  113. char hr_dev_name[BDEVNAME_SIZE];
  114. unsigned int hr_timeout_ms;
  115. /* randomized as the region goes up and down so that a node
  116. * recognizes a node going up and down in one iteration */
  117. u64 hr_generation;
  118. struct delayed_work hr_write_timeout_work;
  119. unsigned long hr_last_timeout_start;
  120. /* Used during o2hb_check_slot to hold a copy of the block
  121. * being checked because we temporarily have to zero out the
  122. * crc field. */
  123. struct o2hb_disk_heartbeat_block *hr_tmp_block;
  124. };
  125. struct o2hb_bio_wait_ctxt {
  126. atomic_t wc_num_reqs;
  127. struct completion wc_io_complete;
  128. int wc_error;
  129. };
  130. static void o2hb_write_timeout(struct work_struct *work)
  131. {
  132. struct o2hb_region *reg =
  133. container_of(work, struct o2hb_region,
  134. hr_write_timeout_work.work);
  135. mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u "
  136. "milliseconds\n", reg->hr_dev_name,
  137. jiffies_to_msecs(jiffies - reg->hr_last_timeout_start));
  138. o2quo_disk_timeout();
  139. }
  140. static void o2hb_arm_write_timeout(struct o2hb_region *reg)
  141. {
  142. mlog(0, "Queue write timeout for %u ms\n", O2HB_MAX_WRITE_TIMEOUT_MS);
  143. cancel_delayed_work(&reg->hr_write_timeout_work);
  144. reg->hr_last_timeout_start = jiffies;
  145. schedule_delayed_work(&reg->hr_write_timeout_work,
  146. msecs_to_jiffies(O2HB_MAX_WRITE_TIMEOUT_MS));
  147. }
  148. static void o2hb_disarm_write_timeout(struct o2hb_region *reg)
  149. {
  150. cancel_delayed_work(&reg->hr_write_timeout_work);
  151. flush_scheduled_work();
  152. }
  153. static inline void o2hb_bio_wait_init(struct o2hb_bio_wait_ctxt *wc,
  154. unsigned int num_ios)
  155. {
  156. atomic_set(&wc->wc_num_reqs, num_ios);
  157. init_completion(&wc->wc_io_complete);
  158. wc->wc_error = 0;
  159. }
  160. /* Used in error paths too */
  161. static inline void o2hb_bio_wait_dec(struct o2hb_bio_wait_ctxt *wc,
  162. unsigned int num)
  163. {
  164. /* sadly atomic_sub_and_test() isn't available on all platforms. The
  165. * good news is that the fast path only completes one at a time */
  166. while(num--) {
  167. if (atomic_dec_and_test(&wc->wc_num_reqs)) {
  168. BUG_ON(num > 0);
  169. complete(&wc->wc_io_complete);
  170. }
  171. }
  172. }
  173. static void o2hb_wait_on_io(struct o2hb_region *reg,
  174. struct o2hb_bio_wait_ctxt *wc)
  175. {
  176. struct address_space *mapping = reg->hr_bdev->bd_inode->i_mapping;
  177. blk_run_address_space(mapping);
  178. wait_for_completion(&wc->wc_io_complete);
  179. }
  180. static int o2hb_bio_end_io(struct bio *bio,
  181. unsigned int bytes_done,
  182. int error)
  183. {
  184. struct o2hb_bio_wait_ctxt *wc = bio->bi_private;
  185. if (error) {
  186. mlog(ML_ERROR, "IO Error %d\n", error);
  187. wc->wc_error = error;
  188. }
  189. if (bio->bi_size)
  190. return 1;
  191. o2hb_bio_wait_dec(wc, 1);
  192. return 0;
  193. }
  194. /* Setup a Bio to cover I/O against num_slots slots starting at
  195. * start_slot. */
  196. static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
  197. struct o2hb_bio_wait_ctxt *wc,
  198. unsigned int start_slot,
  199. unsigned int num_slots)
  200. {
  201. int i, nr_vecs, len, first_page, last_page;
  202. unsigned int vec_len, vec_start;
  203. unsigned int bits = reg->hr_block_bits;
  204. unsigned int spp = reg->hr_slots_per_page;
  205. struct bio *bio;
  206. struct page *page;
  207. nr_vecs = (num_slots + spp - 1) / spp;
  208. /* Testing has shown this allocation to take long enough under
  209. * GFP_KERNEL that the local node can get fenced. It would be
  210. * nicest if we could pre-allocate these bios and avoid this
  211. * all together. */
  212. bio = bio_alloc(GFP_ATOMIC, nr_vecs);
  213. if (!bio) {
  214. mlog(ML_ERROR, "Could not alloc slots BIO!\n");
  215. bio = ERR_PTR(-ENOMEM);
  216. goto bail;
  217. }
  218. /* Must put everything in 512 byte sectors for the bio... */
  219. bio->bi_sector = (reg->hr_start_block + start_slot) << (bits - 9);
  220. bio->bi_bdev = reg->hr_bdev;
  221. bio->bi_private = wc;
  222. bio->bi_end_io = o2hb_bio_end_io;
  223. first_page = start_slot / spp;
  224. last_page = first_page + nr_vecs;
  225. vec_start = (start_slot << bits) % PAGE_CACHE_SIZE;
  226. for(i = first_page; i < last_page; i++) {
  227. page = reg->hr_slot_data[i];
  228. vec_len = PAGE_CACHE_SIZE;
  229. /* last page might be short */
  230. if (((i + 1) * spp) > (start_slot + num_slots))
  231. vec_len = ((num_slots + start_slot) % spp) << bits;
  232. vec_len -= vec_start;
  233. mlog(ML_HB_BIO, "page %d, vec_len = %u, vec_start = %u\n",
  234. i, vec_len, vec_start);
  235. len = bio_add_page(bio, page, vec_len, vec_start);
  236. if (len != vec_len) {
  237. bio_put(bio);
  238. bio = ERR_PTR(-EIO);
  239. mlog(ML_ERROR, "Error adding page to bio i = %d, "
  240. "vec_len = %u, len = %d\n, start = %u\n",
  241. i, vec_len, len, vec_start);
  242. goto bail;
  243. }
  244. vec_start = 0;
  245. }
  246. bail:
  247. return bio;
  248. }
  249. /*
  250. * Compute the maximum number of sectors the bdev can handle in one bio,
  251. * as a power of two.
  252. *
  253. * Stolen from oracleasm, thanks Joel!
  254. */
  255. static int compute_max_sectors(struct block_device *bdev)
  256. {
  257. int max_pages, max_sectors, pow_two_sectors;
  258. struct request_queue *q;
  259. q = bdev_get_queue(bdev);
  260. max_pages = q->max_sectors >> (PAGE_SHIFT - 9);
  261. if (max_pages > BIO_MAX_PAGES)
  262. max_pages = BIO_MAX_PAGES;
  263. if (max_pages > q->max_phys_segments)
  264. max_pages = q->max_phys_segments;
  265. if (max_pages > q->max_hw_segments)
  266. max_pages = q->max_hw_segments;
  267. max_pages--; /* Handle I/Os that straddle a page */
  268. if (max_pages) {
  269. max_sectors = max_pages << (PAGE_SHIFT - 9);
  270. } else {
  271. /* If BIO contains 1 or less than 1 page. */
  272. max_sectors = q->max_sectors;
  273. }
  274. /* Why is fls() 1-based???? */
  275. pow_two_sectors = 1 << (fls(max_sectors) - 1);
  276. return pow_two_sectors;
  277. }
  278. static inline void o2hb_compute_request_limits(struct o2hb_region *reg,
  279. unsigned int num_slots,
  280. unsigned int *num_bios,
  281. unsigned int *slots_per_bio)
  282. {
  283. unsigned int max_sectors, io_sectors;
  284. max_sectors = compute_max_sectors(reg->hr_bdev);
  285. io_sectors = num_slots << (reg->hr_block_bits - 9);
  286. *num_bios = (io_sectors + max_sectors - 1) / max_sectors;
  287. *slots_per_bio = max_sectors >> (reg->hr_block_bits - 9);
  288. mlog(ML_HB_BIO, "My io size is %u sectors for %u slots. This "
  289. "device can handle %u sectors of I/O\n", io_sectors, num_slots,
  290. max_sectors);
  291. mlog(ML_HB_BIO, "Will need %u bios holding %u slots each\n",
  292. *num_bios, *slots_per_bio);
  293. }
  294. static int o2hb_read_slots(struct o2hb_region *reg,
  295. unsigned int max_slots)
  296. {
  297. unsigned int num_bios, slots_per_bio, start_slot, num_slots;
  298. int i, status;
  299. struct o2hb_bio_wait_ctxt wc;
  300. struct bio **bios;
  301. struct bio *bio;
  302. o2hb_compute_request_limits(reg, max_slots, &num_bios, &slots_per_bio);
  303. bios = kcalloc(num_bios, sizeof(struct bio *), GFP_KERNEL);
  304. if (!bios) {
  305. status = -ENOMEM;
  306. mlog_errno(status);
  307. return status;
  308. }
  309. o2hb_bio_wait_init(&wc, num_bios);
  310. num_slots = slots_per_bio;
  311. for(i = 0; i < num_bios; i++) {
  312. start_slot = i * slots_per_bio;
  313. /* adjust num_slots at last bio */
  314. if (max_slots < (start_slot + num_slots))
  315. num_slots = max_slots - start_slot;
  316. bio = o2hb_setup_one_bio(reg, &wc, start_slot, num_slots);
  317. if (IS_ERR(bio)) {
  318. o2hb_bio_wait_dec(&wc, num_bios - i);
  319. status = PTR_ERR(bio);
  320. mlog_errno(status);
  321. goto bail_and_wait;
  322. }
  323. bios[i] = bio;
  324. submit_bio(READ, bio);
  325. }
  326. status = 0;
  327. bail_and_wait:
  328. o2hb_wait_on_io(reg, &wc);
  329. if (wc.wc_error && !status)
  330. status = wc.wc_error;
  331. if (bios) {
  332. for(i = 0; i < num_bios; i++)
  333. if (bios[i])
  334. bio_put(bios[i]);
  335. kfree(bios);
  336. }
  337. return status;
  338. }
  339. static int o2hb_issue_node_write(struct o2hb_region *reg,
  340. struct bio **write_bio,
  341. struct o2hb_bio_wait_ctxt *write_wc)
  342. {
  343. int status;
  344. unsigned int slot;
  345. struct bio *bio;
  346. o2hb_bio_wait_init(write_wc, 1);
  347. slot = o2nm_this_node();
  348. bio = o2hb_setup_one_bio(reg, write_wc, slot, 1);
  349. if (IS_ERR(bio)) {
  350. status = PTR_ERR(bio);
  351. mlog_errno(status);
  352. goto bail;
  353. }
  354. submit_bio(WRITE, bio);
  355. *write_bio = bio;
  356. status = 0;
  357. bail:
  358. return status;
  359. }
  360. static u32 o2hb_compute_block_crc_le(struct o2hb_region *reg,
  361. struct o2hb_disk_heartbeat_block *hb_block)
  362. {
  363. __le32 old_cksum;
  364. u32 ret;
  365. /* We want to compute the block crc with a 0 value in the
  366. * hb_cksum field. Save it off here and replace after the
  367. * crc. */
  368. old_cksum = hb_block->hb_cksum;
  369. hb_block->hb_cksum = 0;
  370. ret = crc32_le(0, (unsigned char *) hb_block, reg->hr_block_bytes);
  371. hb_block->hb_cksum = old_cksum;
  372. return ret;
  373. }
  374. static void o2hb_dump_slot(struct o2hb_disk_heartbeat_block *hb_block)
  375. {
  376. mlog(ML_ERROR, "Dump slot information: seq = 0x%llx, node = %u, "
  377. "cksum = 0x%x, generation 0x%llx\n",
  378. (long long)le64_to_cpu(hb_block->hb_seq),
  379. hb_block->hb_node, le32_to_cpu(hb_block->hb_cksum),
  380. (long long)le64_to_cpu(hb_block->hb_generation));
  381. }
  382. static int o2hb_verify_crc(struct o2hb_region *reg,
  383. struct o2hb_disk_heartbeat_block *hb_block)
  384. {
  385. u32 read, computed;
  386. read = le32_to_cpu(hb_block->hb_cksum);
  387. computed = o2hb_compute_block_crc_le(reg, hb_block);
  388. return read == computed;
  389. }
  390. /* We want to make sure that nobody is heartbeating on top of us --
  391. * this will help detect an invalid configuration. */
  392. static int o2hb_check_last_timestamp(struct o2hb_region *reg)
  393. {
  394. int node_num, ret;
  395. struct o2hb_disk_slot *slot;
  396. struct o2hb_disk_heartbeat_block *hb_block;
  397. node_num = o2nm_this_node();
  398. ret = 1;
  399. slot = &reg->hr_slots[node_num];
  400. /* Don't check on our 1st timestamp */
  401. if (slot->ds_last_time) {
  402. hb_block = slot->ds_raw_block;
  403. if (le64_to_cpu(hb_block->hb_seq) != slot->ds_last_time)
  404. ret = 0;
  405. }
  406. return ret;
  407. }
  408. static inline void o2hb_prepare_block(struct o2hb_region *reg,
  409. u64 generation)
  410. {
  411. int node_num;
  412. u64 cputime;
  413. struct o2hb_disk_slot *slot;
  414. struct o2hb_disk_heartbeat_block *hb_block;
  415. node_num = o2nm_this_node();
  416. slot = &reg->hr_slots[node_num];
  417. hb_block = (struct o2hb_disk_heartbeat_block *)slot->ds_raw_block;
  418. memset(hb_block, 0, reg->hr_block_bytes);
  419. /* TODO: time stuff */
  420. cputime = CURRENT_TIME.tv_sec;
  421. if (!cputime)
  422. cputime = 1;
  423. hb_block->hb_seq = cpu_to_le64(cputime);
  424. hb_block->hb_node = node_num;
  425. hb_block->hb_generation = cpu_to_le64(generation);
  426. hb_block->hb_dead_ms = cpu_to_le32(o2hb_dead_threshold * O2HB_REGION_TIMEOUT_MS);
  427. /* This step must always happen last! */
  428. hb_block->hb_cksum = cpu_to_le32(o2hb_compute_block_crc_le(reg,
  429. hb_block));
  430. mlog(ML_HB_BIO, "our node generation = 0x%llx, cksum = 0x%x\n",
  431. (long long)cpu_to_le64(generation),
  432. le32_to_cpu(hb_block->hb_cksum));
  433. }
  434. static void o2hb_fire_callbacks(struct o2hb_callback *hbcall,
  435. struct o2nm_node *node,
  436. int idx)
  437. {
  438. struct list_head *iter;
  439. struct o2hb_callback_func *f;
  440. list_for_each(iter, &hbcall->list) {
  441. f = list_entry(iter, struct o2hb_callback_func, hc_item);
  442. mlog(ML_HEARTBEAT, "calling funcs %p\n", f);
  443. (f->hc_func)(node, idx, f->hc_data);
  444. }
  445. }
  446. /* Will run the list in order until we process the passed event */
  447. static void o2hb_run_event_list(struct o2hb_node_event *queued_event)
  448. {
  449. int empty;
  450. struct o2hb_callback *hbcall;
  451. struct o2hb_node_event *event;
  452. spin_lock(&o2hb_live_lock);
  453. empty = list_empty(&queued_event->hn_item);
  454. spin_unlock(&o2hb_live_lock);
  455. if (empty)
  456. return;
  457. /* Holding callback sem assures we don't alter the callback
  458. * lists when doing this, and serializes ourselves with other
  459. * processes wanting callbacks. */
  460. down_write(&o2hb_callback_sem);
  461. spin_lock(&o2hb_live_lock);
  462. while (!list_empty(&o2hb_node_events)
  463. && !list_empty(&queued_event->hn_item)) {
  464. event = list_entry(o2hb_node_events.next,
  465. struct o2hb_node_event,
  466. hn_item);
  467. list_del_init(&event->hn_item);
  468. spin_unlock(&o2hb_live_lock);
  469. mlog(ML_HEARTBEAT, "Node %s event for %d\n",
  470. event->hn_event_type == O2HB_NODE_UP_CB ? "UP" : "DOWN",
  471. event->hn_node_num);
  472. hbcall = hbcall_from_type(event->hn_event_type);
  473. /* We should *never* have gotten on to the list with a
  474. * bad type... This isn't something that we should try
  475. * to recover from. */
  476. BUG_ON(IS_ERR(hbcall));
  477. o2hb_fire_callbacks(hbcall, event->hn_node, event->hn_node_num);
  478. spin_lock(&o2hb_live_lock);
  479. }
  480. spin_unlock(&o2hb_live_lock);
  481. up_write(&o2hb_callback_sem);
  482. }
  483. static void o2hb_queue_node_event(struct o2hb_node_event *event,
  484. enum o2hb_callback_type type,
  485. struct o2nm_node *node,
  486. int node_num)
  487. {
  488. assert_spin_locked(&o2hb_live_lock);
  489. event->hn_event_type = type;
  490. event->hn_node = node;
  491. event->hn_node_num = node_num;
  492. mlog(ML_HEARTBEAT, "Queue node %s event for node %d\n",
  493. type == O2HB_NODE_UP_CB ? "UP" : "DOWN", node_num);
  494. list_add_tail(&event->hn_item, &o2hb_node_events);
  495. }
  496. static void o2hb_shutdown_slot(struct o2hb_disk_slot *slot)
  497. {
  498. struct o2hb_node_event event =
  499. { .hn_item = LIST_HEAD_INIT(event.hn_item), };
  500. struct o2nm_node *node;
  501. node = o2nm_get_node_by_num(slot->ds_node_num);
  502. if (!node)
  503. return;
  504. spin_lock(&o2hb_live_lock);
  505. if (!list_empty(&slot->ds_live_item)) {
  506. mlog(ML_HEARTBEAT, "Shutdown, node %d leaves region\n",
  507. slot->ds_node_num);
  508. list_del_init(&slot->ds_live_item);
  509. if (list_empty(&o2hb_live_slots[slot->ds_node_num])) {
  510. clear_bit(slot->ds_node_num, o2hb_live_node_bitmap);
  511. o2hb_queue_node_event(&event, O2HB_NODE_DOWN_CB, node,
  512. slot->ds_node_num);
  513. }
  514. }
  515. spin_unlock(&o2hb_live_lock);
  516. o2hb_run_event_list(&event);
  517. o2nm_node_put(node);
  518. }
  519. static int o2hb_check_slot(struct o2hb_region *reg,
  520. struct o2hb_disk_slot *slot)
  521. {
  522. int changed = 0, gen_changed = 0;
  523. struct o2hb_node_event event =
  524. { .hn_item = LIST_HEAD_INIT(event.hn_item), };
  525. struct o2nm_node *node;
  526. struct o2hb_disk_heartbeat_block *hb_block = reg->hr_tmp_block;
  527. u64 cputime;
  528. unsigned int dead_ms = o2hb_dead_threshold * O2HB_REGION_TIMEOUT_MS;
  529. unsigned int slot_dead_ms;
  530. memcpy(hb_block, slot->ds_raw_block, reg->hr_block_bytes);
  531. /* Is this correct? Do we assume that the node doesn't exist
  532. * if we're not configured for him? */
  533. node = o2nm_get_node_by_num(slot->ds_node_num);
  534. if (!node)
  535. return 0;
  536. if (!o2hb_verify_crc(reg, hb_block)) {
  537. /* all paths from here will drop o2hb_live_lock for
  538. * us. */
  539. spin_lock(&o2hb_live_lock);
  540. /* Don't print an error on the console in this case -
  541. * a freshly formatted heartbeat area will not have a
  542. * crc set on it. */
  543. if (list_empty(&slot->ds_live_item))
  544. goto out;
  545. /* The node is live but pushed out a bad crc. We
  546. * consider it a transient miss but don't populate any
  547. * other values as they may be junk. */
  548. mlog(ML_ERROR, "Node %d has written a bad crc to %s\n",
  549. slot->ds_node_num, reg->hr_dev_name);
  550. o2hb_dump_slot(hb_block);
  551. slot->ds_equal_samples++;
  552. goto fire_callbacks;
  553. }
  554. /* we don't care if these wrap.. the state transitions below
  555. * clear at the right places */
  556. cputime = le64_to_cpu(hb_block->hb_seq);
  557. if (slot->ds_last_time != cputime)
  558. slot->ds_changed_samples++;
  559. else
  560. slot->ds_equal_samples++;
  561. slot->ds_last_time = cputime;
  562. /* The node changed heartbeat generations. We assume this to
  563. * mean it dropped off but came back before we timed out. We
  564. * want to consider it down for the time being but don't want
  565. * to lose any changed_samples state we might build up to
  566. * considering it live again. */
  567. if (slot->ds_last_generation != le64_to_cpu(hb_block->hb_generation)) {
  568. gen_changed = 1;
  569. slot->ds_equal_samples = 0;
  570. mlog(ML_HEARTBEAT, "Node %d changed generation (0x%llx "
  571. "to 0x%llx)\n", slot->ds_node_num,
  572. (long long)slot->ds_last_generation,
  573. (long long)le64_to_cpu(hb_block->hb_generation));
  574. }
  575. slot->ds_last_generation = le64_to_cpu(hb_block->hb_generation);
  576. mlog(ML_HEARTBEAT, "Slot %d gen 0x%llx cksum 0x%x "
  577. "seq %llu last %llu changed %u equal %u\n",
  578. slot->ds_node_num, (long long)slot->ds_last_generation,
  579. le32_to_cpu(hb_block->hb_cksum),
  580. (unsigned long long)le64_to_cpu(hb_block->hb_seq),
  581. (unsigned long long)slot->ds_last_time, slot->ds_changed_samples,
  582. slot->ds_equal_samples);
  583. spin_lock(&o2hb_live_lock);
  584. fire_callbacks:
  585. /* dead nodes only come to life after some number of
  586. * changes at any time during their dead time */
  587. if (list_empty(&slot->ds_live_item) &&
  588. slot->ds_changed_samples >= O2HB_LIVE_THRESHOLD) {
  589. mlog(ML_HEARTBEAT, "Node %d (id 0x%llx) joined my region\n",
  590. slot->ds_node_num, (long long)slot->ds_last_generation);
  591. /* first on the list generates a callback */
  592. if (list_empty(&o2hb_live_slots[slot->ds_node_num])) {
  593. set_bit(slot->ds_node_num, o2hb_live_node_bitmap);
  594. o2hb_queue_node_event(&event, O2HB_NODE_UP_CB, node,
  595. slot->ds_node_num);
  596. changed = 1;
  597. }
  598. list_add_tail(&slot->ds_live_item,
  599. &o2hb_live_slots[slot->ds_node_num]);
  600. slot->ds_equal_samples = 0;
  601. /* We want to be sure that all nodes agree on the
  602. * number of milliseconds before a node will be
  603. * considered dead. The self-fencing timeout is
  604. * computed from this value, and a discrepancy might
  605. * result in heartbeat calling a node dead when it
  606. * hasn't self-fenced yet. */
  607. slot_dead_ms = le32_to_cpu(hb_block->hb_dead_ms);
  608. if (slot_dead_ms && slot_dead_ms != dead_ms) {
  609. /* TODO: Perhaps we can fail the region here. */
  610. mlog(ML_ERROR, "Node %d on device %s has a dead count "
  611. "of %u ms, but our count is %u ms.\n"
  612. "Please double check your configuration values "
  613. "for 'O2CB_HEARTBEAT_THRESHOLD'\n",
  614. slot->ds_node_num, reg->hr_dev_name, slot_dead_ms,
  615. dead_ms);
  616. }
  617. goto out;
  618. }
  619. /* if the list is dead, we're done.. */
  620. if (list_empty(&slot->ds_live_item))
  621. goto out;
  622. /* live nodes only go dead after enough consequtive missed
  623. * samples.. reset the missed counter whenever we see
  624. * activity */
  625. if (slot->ds_equal_samples >= o2hb_dead_threshold || gen_changed) {
  626. mlog(ML_HEARTBEAT, "Node %d left my region\n",
  627. slot->ds_node_num);
  628. /* last off the live_slot generates a callback */
  629. list_del_init(&slot->ds_live_item);
  630. if (list_empty(&o2hb_live_slots[slot->ds_node_num])) {
  631. clear_bit(slot->ds_node_num, o2hb_live_node_bitmap);
  632. o2hb_queue_node_event(&event, O2HB_NODE_DOWN_CB, node,
  633. slot->ds_node_num);
  634. changed = 1;
  635. }
  636. /* We don't clear this because the node is still
  637. * actually writing new blocks. */
  638. if (!gen_changed)
  639. slot->ds_changed_samples = 0;
  640. goto out;
  641. }
  642. if (slot->ds_changed_samples) {
  643. slot->ds_changed_samples = 0;
  644. slot->ds_equal_samples = 0;
  645. }
  646. out:
  647. spin_unlock(&o2hb_live_lock);
  648. o2hb_run_event_list(&event);
  649. o2nm_node_put(node);
  650. return changed;
  651. }
  652. /* This could be faster if we just implmented a find_last_bit, but I
  653. * don't think the circumstances warrant it. */
  654. static int o2hb_highest_node(unsigned long *nodes,
  655. int numbits)
  656. {
  657. int highest, node;
  658. highest = numbits;
  659. node = -1;
  660. while ((node = find_next_bit(nodes, numbits, node + 1)) != -1) {
  661. if (node >= numbits)
  662. break;
  663. highest = node;
  664. }
  665. return highest;
  666. }
  667. static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
  668. {
  669. int i, ret, highest_node, change = 0;
  670. unsigned long configured_nodes[BITS_TO_LONGS(O2NM_MAX_NODES)];
  671. struct bio *write_bio;
  672. struct o2hb_bio_wait_ctxt write_wc;
  673. ret = o2nm_configured_node_map(configured_nodes,
  674. sizeof(configured_nodes));
  675. if (ret) {
  676. mlog_errno(ret);
  677. return ret;
  678. }
  679. highest_node = o2hb_highest_node(configured_nodes, O2NM_MAX_NODES);
  680. if (highest_node >= O2NM_MAX_NODES) {
  681. mlog(ML_NOTICE, "ocfs2_heartbeat: no configured nodes found!\n");
  682. return -EINVAL;
  683. }
  684. /* No sense in reading the slots of nodes that don't exist
  685. * yet. Of course, if the node definitions have holes in them
  686. * then we're reading an empty slot anyway... Consider this
  687. * best-effort. */
  688. ret = o2hb_read_slots(reg, highest_node + 1);
  689. if (ret < 0) {
  690. mlog_errno(ret);
  691. return ret;
  692. }
  693. /* With an up to date view of the slots, we can check that no
  694. * other node has been improperly configured to heartbeat in
  695. * our slot. */
  696. if (!o2hb_check_last_timestamp(reg))
  697. mlog(ML_ERROR, "Device \"%s\": another node is heartbeating "
  698. "in our slot!\n", reg->hr_dev_name);
  699. /* fill in the proper info for our next heartbeat */
  700. o2hb_prepare_block(reg, reg->hr_generation);
  701. /* And fire off the write. Note that we don't wait on this I/O
  702. * until later. */
  703. ret = o2hb_issue_node_write(reg, &write_bio, &write_wc);
  704. if (ret < 0) {
  705. mlog_errno(ret);
  706. return ret;
  707. }
  708. i = -1;
  709. while((i = find_next_bit(configured_nodes, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) {
  710. change |= o2hb_check_slot(reg, &reg->hr_slots[i]);
  711. }
  712. /*
  713. * We have to be sure we've advertised ourselves on disk
  714. * before we can go to steady state. This ensures that
  715. * people we find in our steady state have seen us.
  716. */
  717. o2hb_wait_on_io(reg, &write_wc);
  718. bio_put(write_bio);
  719. if (write_wc.wc_error) {
  720. /* Do not re-arm the write timeout on I/O error - we
  721. * can't be sure that the new block ever made it to
  722. * disk */
  723. mlog(ML_ERROR, "Write error %d on device \"%s\"\n",
  724. write_wc.wc_error, reg->hr_dev_name);
  725. return write_wc.wc_error;
  726. }
  727. o2hb_arm_write_timeout(reg);
  728. /* let the person who launched us know when things are steady */
  729. if (!change && (atomic_read(&reg->hr_steady_iterations) != 0)) {
  730. if (atomic_dec_and_test(&reg->hr_steady_iterations))
  731. wake_up(&o2hb_steady_queue);
  732. }
  733. return 0;
  734. }
  735. /* Subtract b from a, storing the result in a. a *must* have a larger
  736. * value than b. */
  737. static void o2hb_tv_subtract(struct timeval *a,
  738. struct timeval *b)
  739. {
  740. /* just return 0 when a is after b */
  741. if (a->tv_sec < b->tv_sec ||
  742. (a->tv_sec == b->tv_sec && a->tv_usec < b->tv_usec)) {
  743. a->tv_sec = 0;
  744. a->tv_usec = 0;
  745. return;
  746. }
  747. a->tv_sec -= b->tv_sec;
  748. a->tv_usec -= b->tv_usec;
  749. while ( a->tv_usec < 0 ) {
  750. a->tv_sec--;
  751. a->tv_usec += 1000000;
  752. }
  753. }
  754. static unsigned int o2hb_elapsed_msecs(struct timeval *start,
  755. struct timeval *end)
  756. {
  757. struct timeval res = *end;
  758. o2hb_tv_subtract(&res, start);
  759. return res.tv_sec * 1000 + res.tv_usec / 1000;
  760. }
  761. /*
  762. * we ride the region ref that the region dir holds. before the region
  763. * dir is removed and drops it ref it will wait to tear down this
  764. * thread.
  765. */
  766. static int o2hb_thread(void *data)
  767. {
  768. int i, ret;
  769. struct o2hb_region *reg = data;
  770. struct bio *write_bio;
  771. struct o2hb_bio_wait_ctxt write_wc;
  772. struct timeval before_hb, after_hb;
  773. unsigned int elapsed_msec;
  774. mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread running\n");
  775. set_user_nice(current, -20);
  776. while (!kthread_should_stop() && !reg->hr_unclean_stop) {
  777. /* We track the time spent inside
  778. * o2hb_do_disk_heartbeat so that we avoid more then
  779. * hr_timeout_ms between disk writes. On busy systems
  780. * this should result in a heartbeat which is less
  781. * likely to time itself out. */
  782. do_gettimeofday(&before_hb);
  783. i = 0;
  784. do {
  785. ret = o2hb_do_disk_heartbeat(reg);
  786. } while (ret && ++i < 2);
  787. do_gettimeofday(&after_hb);
  788. elapsed_msec = o2hb_elapsed_msecs(&before_hb, &after_hb);
  789. mlog(0, "start = %lu.%lu, end = %lu.%lu, msec = %u\n",
  790. before_hb.tv_sec, (unsigned long) before_hb.tv_usec,
  791. after_hb.tv_sec, (unsigned long) after_hb.tv_usec,
  792. elapsed_msec);
  793. if (elapsed_msec < reg->hr_timeout_ms) {
  794. /* the kthread api has blocked signals for us so no
  795. * need to record the return value. */
  796. msleep_interruptible(reg->hr_timeout_ms - elapsed_msec);
  797. }
  798. }
  799. o2hb_disarm_write_timeout(reg);
  800. /* unclean stop is only used in very bad situation */
  801. for(i = 0; !reg->hr_unclean_stop && i < reg->hr_blocks; i++)
  802. o2hb_shutdown_slot(&reg->hr_slots[i]);
  803. /* Explicit down notification - avoid forcing the other nodes
  804. * to timeout on this region when we could just as easily
  805. * write a clear generation - thus indicating to them that
  806. * this node has left this region.
  807. *
  808. * XXX: Should we skip this on unclean_stop? */
  809. o2hb_prepare_block(reg, 0);
  810. ret = o2hb_issue_node_write(reg, &write_bio, &write_wc);
  811. if (ret == 0) {
  812. o2hb_wait_on_io(reg, &write_wc);
  813. bio_put(write_bio);
  814. } else {
  815. mlog_errno(ret);
  816. }
  817. mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread exiting\n");
  818. return 0;
  819. }
  820. void o2hb_init(void)
  821. {
  822. int i;
  823. for (i = 0; i < ARRAY_SIZE(o2hb_callbacks); i++)
  824. INIT_LIST_HEAD(&o2hb_callbacks[i].list);
  825. for (i = 0; i < ARRAY_SIZE(o2hb_live_slots); i++)
  826. INIT_LIST_HEAD(&o2hb_live_slots[i]);
  827. INIT_LIST_HEAD(&o2hb_node_events);
  828. memset(o2hb_live_node_bitmap, 0, sizeof(o2hb_live_node_bitmap));
  829. }
  830. /* if we're already in a callback then we're already serialized by the sem */
  831. static void o2hb_fill_node_map_from_callback(unsigned long *map,
  832. unsigned bytes)
  833. {
  834. BUG_ON(bytes < (BITS_TO_LONGS(O2NM_MAX_NODES) * sizeof(unsigned long)));
  835. memcpy(map, &o2hb_live_node_bitmap, bytes);
  836. }
  837. /*
  838. * get a map of all nodes that are heartbeating in any regions
  839. */
  840. void o2hb_fill_node_map(unsigned long *map, unsigned bytes)
  841. {
  842. /* callers want to serialize this map and callbacks so that they
  843. * can trust that they don't miss nodes coming to the party */
  844. down_read(&o2hb_callback_sem);
  845. spin_lock(&o2hb_live_lock);
  846. o2hb_fill_node_map_from_callback(map, bytes);
  847. spin_unlock(&o2hb_live_lock);
  848. up_read(&o2hb_callback_sem);
  849. }
  850. EXPORT_SYMBOL_GPL(o2hb_fill_node_map);
  851. /*
  852. * heartbeat configfs bits. The heartbeat set is a default set under
  853. * the cluster set in nodemanager.c.
  854. */
  855. static struct o2hb_region *to_o2hb_region(struct config_item *item)
  856. {
  857. return item ? container_of(item, struct o2hb_region, hr_item) : NULL;
  858. }
  859. /* drop_item only drops its ref after killing the thread, nothing should
  860. * be using the region anymore. this has to clean up any state that
  861. * attributes might have built up. */
  862. static void o2hb_region_release(struct config_item *item)
  863. {
  864. int i;
  865. struct page *page;
  866. struct o2hb_region *reg = to_o2hb_region(item);
  867. if (reg->hr_tmp_block)
  868. kfree(reg->hr_tmp_block);
  869. if (reg->hr_slot_data) {
  870. for (i = 0; i < reg->hr_num_pages; i++) {
  871. page = reg->hr_slot_data[i];
  872. if (page)
  873. __free_page(page);
  874. }
  875. kfree(reg->hr_slot_data);
  876. }
  877. if (reg->hr_bdev)
  878. blkdev_put(reg->hr_bdev);
  879. if (reg->hr_slots)
  880. kfree(reg->hr_slots);
  881. spin_lock(&o2hb_live_lock);
  882. list_del(&reg->hr_all_item);
  883. spin_unlock(&o2hb_live_lock);
  884. kfree(reg);
  885. }
  886. static int o2hb_read_block_input(struct o2hb_region *reg,
  887. const char *page,
  888. size_t count,
  889. unsigned long *ret_bytes,
  890. unsigned int *ret_bits)
  891. {
  892. unsigned long bytes;
  893. char *p = (char *)page;
  894. bytes = simple_strtoul(p, &p, 0);
  895. if (!p || (*p && (*p != '\n')))
  896. return -EINVAL;
  897. /* Heartbeat and fs min / max block sizes are the same. */
  898. if (bytes > 4096 || bytes < 512)
  899. return -ERANGE;
  900. if (hweight16(bytes) != 1)
  901. return -EINVAL;
  902. if (ret_bytes)
  903. *ret_bytes = bytes;
  904. if (ret_bits)
  905. *ret_bits = ffs(bytes) - 1;
  906. return 0;
  907. }
  908. static ssize_t o2hb_region_block_bytes_read(struct o2hb_region *reg,
  909. char *page)
  910. {
  911. return sprintf(page, "%u\n", reg->hr_block_bytes);
  912. }
  913. static ssize_t o2hb_region_block_bytes_write(struct o2hb_region *reg,
  914. const char *page,
  915. size_t count)
  916. {
  917. int status;
  918. unsigned long block_bytes;
  919. unsigned int block_bits;
  920. if (reg->hr_bdev)
  921. return -EINVAL;
  922. status = o2hb_read_block_input(reg, page, count,
  923. &block_bytes, &block_bits);
  924. if (status)
  925. return status;
  926. reg->hr_block_bytes = (unsigned int)block_bytes;
  927. reg->hr_block_bits = block_bits;
  928. return count;
  929. }
  930. static ssize_t o2hb_region_start_block_read(struct o2hb_region *reg,
  931. char *page)
  932. {
  933. return sprintf(page, "%llu\n", reg->hr_start_block);
  934. }
  935. static ssize_t o2hb_region_start_block_write(struct o2hb_region *reg,
  936. const char *page,
  937. size_t count)
  938. {
  939. unsigned long long tmp;
  940. char *p = (char *)page;
  941. if (reg->hr_bdev)
  942. return -EINVAL;
  943. tmp = simple_strtoull(p, &p, 0);
  944. if (!p || (*p && (*p != '\n')))
  945. return -EINVAL;
  946. reg->hr_start_block = tmp;
  947. return count;
  948. }
  949. static ssize_t o2hb_region_blocks_read(struct o2hb_region *reg,
  950. char *page)
  951. {
  952. return sprintf(page, "%d\n", reg->hr_blocks);
  953. }
  954. static ssize_t o2hb_region_blocks_write(struct o2hb_region *reg,
  955. const char *page,
  956. size_t count)
  957. {
  958. unsigned long tmp;
  959. char *p = (char *)page;
  960. if (reg->hr_bdev)
  961. return -EINVAL;
  962. tmp = simple_strtoul(p, &p, 0);
  963. if (!p || (*p && (*p != '\n')))
  964. return -EINVAL;
  965. if (tmp > O2NM_MAX_NODES || tmp == 0)
  966. return -ERANGE;
  967. reg->hr_blocks = (unsigned int)tmp;
  968. return count;
  969. }
  970. static ssize_t o2hb_region_dev_read(struct o2hb_region *reg,
  971. char *page)
  972. {
  973. unsigned int ret = 0;
  974. if (reg->hr_bdev)
  975. ret = sprintf(page, "%s\n", reg->hr_dev_name);
  976. return ret;
  977. }
  978. static void o2hb_init_region_params(struct o2hb_region *reg)
  979. {
  980. reg->hr_slots_per_page = PAGE_CACHE_SIZE >> reg->hr_block_bits;
  981. reg->hr_timeout_ms = O2HB_REGION_TIMEOUT_MS;
  982. mlog(ML_HEARTBEAT, "hr_start_block = %llu, hr_blocks = %u\n",
  983. reg->hr_start_block, reg->hr_blocks);
  984. mlog(ML_HEARTBEAT, "hr_block_bytes = %u, hr_block_bits = %u\n",
  985. reg->hr_block_bytes, reg->hr_block_bits);
  986. mlog(ML_HEARTBEAT, "hr_timeout_ms = %u\n", reg->hr_timeout_ms);
  987. mlog(ML_HEARTBEAT, "dead threshold = %u\n", o2hb_dead_threshold);
  988. }
  989. static int o2hb_map_slot_data(struct o2hb_region *reg)
  990. {
  991. int i, j;
  992. unsigned int last_slot;
  993. unsigned int spp = reg->hr_slots_per_page;
  994. struct page *page;
  995. char *raw;
  996. struct o2hb_disk_slot *slot;
  997. reg->hr_tmp_block = kmalloc(reg->hr_block_bytes, GFP_KERNEL);
  998. if (reg->hr_tmp_block == NULL) {
  999. mlog_errno(-ENOMEM);
  1000. return -ENOMEM;
  1001. }
  1002. reg->hr_slots = kcalloc(reg->hr_blocks,
  1003. sizeof(struct o2hb_disk_slot), GFP_KERNEL);
  1004. if (reg->hr_slots == NULL) {
  1005. mlog_errno(-ENOMEM);
  1006. return -ENOMEM;
  1007. }
  1008. for(i = 0; i < reg->hr_blocks; i++) {
  1009. slot = &reg->hr_slots[i];
  1010. slot->ds_node_num = i;
  1011. INIT_LIST_HEAD(&slot->ds_live_item);
  1012. slot->ds_raw_block = NULL;
  1013. }
  1014. reg->hr_num_pages = (reg->hr_blocks + spp - 1) / spp;
  1015. mlog(ML_HEARTBEAT, "Going to require %u pages to cover %u blocks "
  1016. "at %u blocks per page\n",
  1017. reg->hr_num_pages, reg->hr_blocks, spp);
  1018. reg->hr_slot_data = kcalloc(reg->hr_num_pages, sizeof(struct page *),
  1019. GFP_KERNEL);
  1020. if (!reg->hr_slot_data) {
  1021. mlog_errno(-ENOMEM);
  1022. return -ENOMEM;
  1023. }
  1024. for(i = 0; i < reg->hr_num_pages; i++) {
  1025. page = alloc_page(GFP_KERNEL);
  1026. if (!page) {
  1027. mlog_errno(-ENOMEM);
  1028. return -ENOMEM;
  1029. }
  1030. reg->hr_slot_data[i] = page;
  1031. last_slot = i * spp;
  1032. raw = page_address(page);
  1033. for (j = 0;
  1034. (j < spp) && ((j + last_slot) < reg->hr_blocks);
  1035. j++) {
  1036. BUG_ON((j + last_slot) >= reg->hr_blocks);
  1037. slot = &reg->hr_slots[j + last_slot];
  1038. slot->ds_raw_block =
  1039. (struct o2hb_disk_heartbeat_block *) raw;
  1040. raw += reg->hr_block_bytes;
  1041. }
  1042. }
  1043. return 0;
  1044. }
  1045. /* Read in all the slots available and populate the tracking
  1046. * structures so that we can start with a baseline idea of what's
  1047. * there. */
  1048. static int o2hb_populate_slot_data(struct o2hb_region *reg)
  1049. {
  1050. int ret, i;
  1051. struct o2hb_disk_slot *slot;
  1052. struct o2hb_disk_heartbeat_block *hb_block;
  1053. mlog_entry_void();
  1054. ret = o2hb_read_slots(reg, reg->hr_blocks);
  1055. if (ret) {
  1056. mlog_errno(ret);
  1057. goto out;
  1058. }
  1059. /* We only want to get an idea of the values initially in each
  1060. * slot, so we do no verification - o2hb_check_slot will
  1061. * actually determine if each configured slot is valid and
  1062. * whether any values have changed. */
  1063. for(i = 0; i < reg->hr_blocks; i++) {
  1064. slot = &reg->hr_slots[i];
  1065. hb_block = (struct o2hb_disk_heartbeat_block *) slot->ds_raw_block;
  1066. /* Only fill the values that o2hb_check_slot uses to
  1067. * determine changing slots */
  1068. slot->ds_last_time = le64_to_cpu(hb_block->hb_seq);
  1069. slot->ds_last_generation = le64_to_cpu(hb_block->hb_generation);
  1070. }
  1071. out:
  1072. mlog_exit(ret);
  1073. return ret;
  1074. }
  1075. /* this is acting as commit; we set up all of hr_bdev and hr_task or nothing */
  1076. static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
  1077. const char *page,
  1078. size_t count)
  1079. {
  1080. long fd;
  1081. int sectsize;
  1082. char *p = (char *)page;
  1083. struct file *filp = NULL;
  1084. struct inode *inode = NULL;
  1085. ssize_t ret = -EINVAL;
  1086. if (reg->hr_bdev)
  1087. goto out;
  1088. /* We can't heartbeat without having had our node number
  1089. * configured yet. */
  1090. if (o2nm_this_node() == O2NM_MAX_NODES)
  1091. goto out;
  1092. fd = simple_strtol(p, &p, 0);
  1093. if (!p || (*p && (*p != '\n')))
  1094. goto out;
  1095. if (fd < 0 || fd >= INT_MAX)
  1096. goto out;
  1097. filp = fget(fd);
  1098. if (filp == NULL)
  1099. goto out;
  1100. if (reg->hr_blocks == 0 || reg->hr_start_block == 0 ||
  1101. reg->hr_block_bytes == 0)
  1102. goto out;
  1103. inode = igrab(filp->f_mapping->host);
  1104. if (inode == NULL)
  1105. goto out;
  1106. if (!S_ISBLK(inode->i_mode))
  1107. goto out;
  1108. reg->hr_bdev = I_BDEV(filp->f_mapping->host);
  1109. ret = blkdev_get(reg->hr_bdev, FMODE_WRITE | FMODE_READ, 0);
  1110. if (ret) {
  1111. reg->hr_bdev = NULL;
  1112. goto out;
  1113. }
  1114. inode = NULL;
  1115. bdevname(reg->hr_bdev, reg->hr_dev_name);
  1116. sectsize = bdev_hardsect_size(reg->hr_bdev);
  1117. if (sectsize != reg->hr_block_bytes) {
  1118. mlog(ML_ERROR,
  1119. "blocksize %u incorrect for device, expected %d",
  1120. reg->hr_block_bytes, sectsize);
  1121. ret = -EINVAL;
  1122. goto out;
  1123. }
  1124. o2hb_init_region_params(reg);
  1125. /* Generation of zero is invalid */
  1126. do {
  1127. get_random_bytes(&reg->hr_generation,
  1128. sizeof(reg->hr_generation));
  1129. } while (reg->hr_generation == 0);
  1130. ret = o2hb_map_slot_data(reg);
  1131. if (ret) {
  1132. mlog_errno(ret);
  1133. goto out;
  1134. }
  1135. ret = o2hb_populate_slot_data(reg);
  1136. if (ret) {
  1137. mlog_errno(ret);
  1138. goto out;
  1139. }
  1140. INIT_DELAYED_WORK(&reg->hr_write_timeout_work, o2hb_write_timeout);
  1141. /*
  1142. * A node is considered live after it has beat LIVE_THRESHOLD
  1143. * times. We're not steady until we've given them a chance
  1144. * _after_ our first read.
  1145. */
  1146. atomic_set(&reg->hr_steady_iterations, O2HB_LIVE_THRESHOLD + 1);
  1147. reg->hr_task = kthread_run(o2hb_thread, reg, "o2hb-%s",
  1148. reg->hr_item.ci_name);
  1149. if (IS_ERR(reg->hr_task)) {
  1150. ret = PTR_ERR(reg->hr_task);
  1151. mlog_errno(ret);
  1152. reg->hr_task = NULL;
  1153. goto out;
  1154. }
  1155. ret = wait_event_interruptible(o2hb_steady_queue,
  1156. atomic_read(&reg->hr_steady_iterations) == 0);
  1157. if (ret) {
  1158. kthread_stop(reg->hr_task);
  1159. reg->hr_task = NULL;
  1160. goto out;
  1161. }
  1162. ret = count;
  1163. out:
  1164. if (filp)
  1165. fput(filp);
  1166. if (inode)
  1167. iput(inode);
  1168. if (ret < 0) {
  1169. if (reg->hr_bdev) {
  1170. blkdev_put(reg->hr_bdev);
  1171. reg->hr_bdev = NULL;
  1172. }
  1173. }
  1174. return ret;
  1175. }
  1176. static ssize_t o2hb_region_pid_read(struct o2hb_region *reg,
  1177. char *page)
  1178. {
  1179. if (!reg->hr_task)
  1180. return 0;
  1181. return sprintf(page, "%u\n", reg->hr_task->pid);
  1182. }
  1183. struct o2hb_region_attribute {
  1184. struct configfs_attribute attr;
  1185. ssize_t (*show)(struct o2hb_region *, char *);
  1186. ssize_t (*store)(struct o2hb_region *, const char *, size_t);
  1187. };
  1188. static struct o2hb_region_attribute o2hb_region_attr_block_bytes = {
  1189. .attr = { .ca_owner = THIS_MODULE,
  1190. .ca_name = "block_bytes",
  1191. .ca_mode = S_IRUGO | S_IWUSR },
  1192. .show = o2hb_region_block_bytes_read,
  1193. .store = o2hb_region_block_bytes_write,
  1194. };
  1195. static struct o2hb_region_attribute o2hb_region_attr_start_block = {
  1196. .attr = { .ca_owner = THIS_MODULE,
  1197. .ca_name = "start_block",
  1198. .ca_mode = S_IRUGO | S_IWUSR },
  1199. .show = o2hb_region_start_block_read,
  1200. .store = o2hb_region_start_block_write,
  1201. };
  1202. static struct o2hb_region_attribute o2hb_region_attr_blocks = {
  1203. .attr = { .ca_owner = THIS_MODULE,
  1204. .ca_name = "blocks",
  1205. .ca_mode = S_IRUGO | S_IWUSR },
  1206. .show = o2hb_region_blocks_read,
  1207. .store = o2hb_region_blocks_write,
  1208. };
  1209. static struct o2hb_region_attribute o2hb_region_attr_dev = {
  1210. .attr = { .ca_owner = THIS_MODULE,
  1211. .ca_name = "dev",
  1212. .ca_mode = S_IRUGO | S_IWUSR },
  1213. .show = o2hb_region_dev_read,
  1214. .store = o2hb_region_dev_write,
  1215. };
  1216. static struct o2hb_region_attribute o2hb_region_attr_pid = {
  1217. .attr = { .ca_owner = THIS_MODULE,
  1218. .ca_name = "pid",
  1219. .ca_mode = S_IRUGO | S_IRUSR },
  1220. .show = o2hb_region_pid_read,
  1221. };
  1222. static struct configfs_attribute *o2hb_region_attrs[] = {
  1223. &o2hb_region_attr_block_bytes.attr,
  1224. &o2hb_region_attr_start_block.attr,
  1225. &o2hb_region_attr_blocks.attr,
  1226. &o2hb_region_attr_dev.attr,
  1227. &o2hb_region_attr_pid.attr,
  1228. NULL,
  1229. };
  1230. static ssize_t o2hb_region_show(struct config_item *item,
  1231. struct configfs_attribute *attr,
  1232. char *page)
  1233. {
  1234. struct o2hb_region *reg = to_o2hb_region(item);
  1235. struct o2hb_region_attribute *o2hb_region_attr =
  1236. container_of(attr, struct o2hb_region_attribute, attr);
  1237. ssize_t ret = 0;
  1238. if (o2hb_region_attr->show)
  1239. ret = o2hb_region_attr->show(reg, page);
  1240. return ret;
  1241. }
  1242. static ssize_t o2hb_region_store(struct config_item *item,
  1243. struct configfs_attribute *attr,
  1244. const char *page, size_t count)
  1245. {
  1246. struct o2hb_region *reg = to_o2hb_region(item);
  1247. struct o2hb_region_attribute *o2hb_region_attr =
  1248. container_of(attr, struct o2hb_region_attribute, attr);
  1249. ssize_t ret = -EINVAL;
  1250. if (o2hb_region_attr->store)
  1251. ret = o2hb_region_attr->store(reg, page, count);
  1252. return ret;
  1253. }
  1254. static struct configfs_item_operations o2hb_region_item_ops = {
  1255. .release = o2hb_region_release,
  1256. .show_attribute = o2hb_region_show,
  1257. .store_attribute = o2hb_region_store,
  1258. };
  1259. static struct config_item_type o2hb_region_type = {
  1260. .ct_item_ops = &o2hb_region_item_ops,
  1261. .ct_attrs = o2hb_region_attrs,
  1262. .ct_owner = THIS_MODULE,
  1263. };
  1264. /* heartbeat set */
  1265. struct o2hb_heartbeat_group {
  1266. struct config_group hs_group;
  1267. /* some stuff? */
  1268. };
  1269. static struct o2hb_heartbeat_group *to_o2hb_heartbeat_group(struct config_group *group)
  1270. {
  1271. return group ?
  1272. container_of(group, struct o2hb_heartbeat_group, hs_group)
  1273. : NULL;
  1274. }
  1275. static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *group,
  1276. const char *name)
  1277. {
  1278. struct o2hb_region *reg = NULL;
  1279. struct config_item *ret = NULL;
  1280. reg = kzalloc(sizeof(struct o2hb_region), GFP_KERNEL);
  1281. if (reg == NULL)
  1282. goto out; /* ENOMEM */
  1283. config_item_init_type_name(&reg->hr_item, name, &o2hb_region_type);
  1284. ret = &reg->hr_item;
  1285. spin_lock(&o2hb_live_lock);
  1286. list_add_tail(&reg->hr_all_item, &o2hb_all_regions);
  1287. spin_unlock(&o2hb_live_lock);
  1288. out:
  1289. if (ret == NULL)
  1290. kfree(reg);
  1291. return ret;
  1292. }
  1293. static void o2hb_heartbeat_group_drop_item(struct config_group *group,
  1294. struct config_item *item)
  1295. {
  1296. struct o2hb_region *reg = to_o2hb_region(item);
  1297. /* stop the thread when the user removes the region dir */
  1298. if (reg->hr_task) {
  1299. kthread_stop(reg->hr_task);
  1300. reg->hr_task = NULL;
  1301. }
  1302. config_item_put(item);
  1303. }
  1304. struct o2hb_heartbeat_group_attribute {
  1305. struct configfs_attribute attr;
  1306. ssize_t (*show)(struct o2hb_heartbeat_group *, char *);
  1307. ssize_t (*store)(struct o2hb_heartbeat_group *, const char *, size_t);
  1308. };
  1309. static ssize_t o2hb_heartbeat_group_show(struct config_item *item,
  1310. struct configfs_attribute *attr,
  1311. char *page)
  1312. {
  1313. struct o2hb_heartbeat_group *reg = to_o2hb_heartbeat_group(to_config_group(item));
  1314. struct o2hb_heartbeat_group_attribute *o2hb_heartbeat_group_attr =
  1315. container_of(attr, struct o2hb_heartbeat_group_attribute, attr);
  1316. ssize_t ret = 0;
  1317. if (o2hb_heartbeat_group_attr->show)
  1318. ret = o2hb_heartbeat_group_attr->show(reg, page);
  1319. return ret;
  1320. }
  1321. static ssize_t o2hb_heartbeat_group_store(struct config_item *item,
  1322. struct configfs_attribute *attr,
  1323. const char *page, size_t count)
  1324. {
  1325. struct o2hb_heartbeat_group *reg = to_o2hb_heartbeat_group(to_config_group(item));
  1326. struct o2hb_heartbeat_group_attribute *o2hb_heartbeat_group_attr =
  1327. container_of(attr, struct o2hb_heartbeat_group_attribute, attr);
  1328. ssize_t ret = -EINVAL;
  1329. if (o2hb_heartbeat_group_attr->store)
  1330. ret = o2hb_heartbeat_group_attr->store(reg, page, count);
  1331. return ret;
  1332. }
  1333. static ssize_t o2hb_heartbeat_group_threshold_show(struct o2hb_heartbeat_group *group,
  1334. char *page)
  1335. {
  1336. return sprintf(page, "%u\n", o2hb_dead_threshold);
  1337. }
  1338. static ssize_t o2hb_heartbeat_group_threshold_store(struct o2hb_heartbeat_group *group,
  1339. const char *page,
  1340. size_t count)
  1341. {
  1342. unsigned long tmp;
  1343. char *p = (char *)page;
  1344. tmp = simple_strtoul(p, &p, 10);
  1345. if (!p || (*p && (*p != '\n')))
  1346. return -EINVAL;
  1347. /* this will validate ranges for us. */
  1348. o2hb_dead_threshold_set((unsigned int) tmp);
  1349. return count;
  1350. }
  1351. static struct o2hb_heartbeat_group_attribute o2hb_heartbeat_group_attr_threshold = {
  1352. .attr = { .ca_owner = THIS_MODULE,
  1353. .ca_name = "dead_threshold",
  1354. .ca_mode = S_IRUGO | S_IWUSR },
  1355. .show = o2hb_heartbeat_group_threshold_show,
  1356. .store = o2hb_heartbeat_group_threshold_store,
  1357. };
  1358. static struct configfs_attribute *o2hb_heartbeat_group_attrs[] = {
  1359. &o2hb_heartbeat_group_attr_threshold.attr,
  1360. NULL,
  1361. };
  1362. static struct configfs_item_operations o2hb_hearbeat_group_item_ops = {
  1363. .show_attribute = o2hb_heartbeat_group_show,
  1364. .store_attribute = o2hb_heartbeat_group_store,
  1365. };
  1366. static struct configfs_group_operations o2hb_heartbeat_group_group_ops = {
  1367. .make_item = o2hb_heartbeat_group_make_item,
  1368. .drop_item = o2hb_heartbeat_group_drop_item,
  1369. };
  1370. static struct config_item_type o2hb_heartbeat_group_type = {
  1371. .ct_group_ops = &o2hb_heartbeat_group_group_ops,
  1372. .ct_item_ops = &o2hb_hearbeat_group_item_ops,
  1373. .ct_attrs = o2hb_heartbeat_group_attrs,
  1374. .ct_owner = THIS_MODULE,
  1375. };
  1376. /* this is just here to avoid touching group in heartbeat.h which the
  1377. * entire damn world #includes */
  1378. struct config_group *o2hb_alloc_hb_set(void)
  1379. {
  1380. struct o2hb_heartbeat_group *hs = NULL;
  1381. struct config_group *ret = NULL;
  1382. hs = kzalloc(sizeof(struct o2hb_heartbeat_group), GFP_KERNEL);
  1383. if (hs == NULL)
  1384. goto out;
  1385. config_group_init_type_name(&hs->hs_group, "heartbeat",
  1386. &o2hb_heartbeat_group_type);
  1387. ret = &hs->hs_group;
  1388. out:
  1389. if (ret == NULL)
  1390. kfree(hs);
  1391. return ret;
  1392. }
  1393. void o2hb_free_hb_set(struct config_group *group)
  1394. {
  1395. struct o2hb_heartbeat_group *hs = to_o2hb_heartbeat_group(group);
  1396. kfree(hs);
  1397. }
  1398. /* hb callback registration and issueing */
  1399. static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type)
  1400. {
  1401. if (type == O2HB_NUM_CB)
  1402. return ERR_PTR(-EINVAL);
  1403. return &o2hb_callbacks[type];
  1404. }
  1405. void o2hb_setup_callback(struct o2hb_callback_func *hc,
  1406. enum o2hb_callback_type type,
  1407. o2hb_cb_func *func,
  1408. void *data,
  1409. int priority)
  1410. {
  1411. INIT_LIST_HEAD(&hc->hc_item);
  1412. hc->hc_func = func;
  1413. hc->hc_data = data;
  1414. hc->hc_priority = priority;
  1415. hc->hc_type = type;
  1416. hc->hc_magic = O2HB_CB_MAGIC;
  1417. }
  1418. EXPORT_SYMBOL_GPL(o2hb_setup_callback);
  1419. int o2hb_register_callback(struct o2hb_callback_func *hc)
  1420. {
  1421. struct o2hb_callback_func *tmp;
  1422. struct list_head *iter;
  1423. struct o2hb_callback *hbcall;
  1424. int ret;
  1425. BUG_ON(hc->hc_magic != O2HB_CB_MAGIC);
  1426. BUG_ON(!list_empty(&hc->hc_item));
  1427. hbcall = hbcall_from_type(hc->hc_type);
  1428. if (IS_ERR(hbcall)) {
  1429. ret = PTR_ERR(hbcall);
  1430. goto out;
  1431. }
  1432. down_write(&o2hb_callback_sem);
  1433. list_for_each(iter, &hbcall->list) {
  1434. tmp = list_entry(iter, struct o2hb_callback_func, hc_item);
  1435. if (hc->hc_priority < tmp->hc_priority) {
  1436. list_add_tail(&hc->hc_item, iter);
  1437. break;
  1438. }
  1439. }
  1440. if (list_empty(&hc->hc_item))
  1441. list_add_tail(&hc->hc_item, &hbcall->list);
  1442. up_write(&o2hb_callback_sem);
  1443. ret = 0;
  1444. out:
  1445. mlog(ML_HEARTBEAT, "returning %d on behalf of %p for funcs %p\n",
  1446. ret, __builtin_return_address(0), hc);
  1447. return ret;
  1448. }
  1449. EXPORT_SYMBOL_GPL(o2hb_register_callback);
  1450. int o2hb_unregister_callback(struct o2hb_callback_func *hc)
  1451. {
  1452. BUG_ON(hc->hc_magic != O2HB_CB_MAGIC);
  1453. mlog(ML_HEARTBEAT, "on behalf of %p for funcs %p\n",
  1454. __builtin_return_address(0), hc);
  1455. if (list_empty(&hc->hc_item))
  1456. return 0;
  1457. down_write(&o2hb_callback_sem);
  1458. list_del_init(&hc->hc_item);
  1459. up_write(&o2hb_callback_sem);
  1460. return 0;
  1461. }
  1462. EXPORT_SYMBOL_GPL(o2hb_unregister_callback);
  1463. int o2hb_check_node_heartbeating(u8 node_num)
  1464. {
  1465. unsigned long testing_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
  1466. o2hb_fill_node_map(testing_map, sizeof(testing_map));
  1467. if (!test_bit(node_num, testing_map)) {
  1468. mlog(ML_HEARTBEAT,
  1469. "node (%u) does not have heartbeating enabled.\n",
  1470. node_num);
  1471. return 0;
  1472. }
  1473. return 1;
  1474. }
  1475. EXPORT_SYMBOL_GPL(o2hb_check_node_heartbeating);
  1476. int o2hb_check_node_heartbeating_from_callback(u8 node_num)
  1477. {
  1478. unsigned long testing_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
  1479. o2hb_fill_node_map_from_callback(testing_map, sizeof(testing_map));
  1480. if (!test_bit(node_num, testing_map)) {
  1481. mlog(ML_HEARTBEAT,
  1482. "node (%u) does not have heartbeating enabled.\n",
  1483. node_num);
  1484. return 0;
  1485. }
  1486. return 1;
  1487. }
  1488. EXPORT_SYMBOL_GPL(o2hb_check_node_heartbeating_from_callback);
  1489. /* Makes sure our local node is configured with a node number, and is
  1490. * heartbeating. */
  1491. int o2hb_check_local_node_heartbeating(void)
  1492. {
  1493. u8 node_num;
  1494. /* if this node was set then we have networking */
  1495. node_num = o2nm_this_node();
  1496. if (node_num == O2NM_MAX_NODES) {
  1497. mlog(ML_HEARTBEAT, "this node has not been configured.\n");
  1498. return 0;
  1499. }
  1500. return o2hb_check_node_heartbeating(node_num);
  1501. }
  1502. EXPORT_SYMBOL_GPL(o2hb_check_local_node_heartbeating);
  1503. /*
  1504. * this is just a hack until we get the plumbing which flips file systems
  1505. * read only and drops the hb ref instead of killing the node dead.
  1506. */
  1507. void o2hb_stop_all_regions(void)
  1508. {
  1509. struct o2hb_region *reg;
  1510. mlog(ML_ERROR, "stopping heartbeat on all active regions.\n");
  1511. spin_lock(&o2hb_live_lock);
  1512. list_for_each_entry(reg, &o2hb_all_regions, hr_all_item)
  1513. reg->hr_unclean_stop = 1;
  1514. spin_unlock(&o2hb_live_lock);
  1515. }
  1516. EXPORT_SYMBOL_GPL(o2hb_stop_all_regions);