ore.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837
  1. /*
  2. * Copyright (C) 2005, 2006
  3. * Avishay Traeger (avishay@gmail.com)
  4. * Copyright (C) 2008, 2009
  5. * Boaz Harrosh <bharrosh@panasas.com>
  6. *
  7. * This file is part of exofs.
  8. *
  9. * exofs is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation. Since it is based on ext2, and the only
  12. * valid version of GPL for the Linux kernel is version 2, the only valid
  13. * version of GPL for exofs is version 2.
  14. *
  15. * exofs is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with exofs; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. #include <linux/slab.h>
  25. #include <asm/div64.h>
  26. #include <scsi/osd_ore.h>
  27. #define ORE_ERR(fmt, a...) printk(KERN_ERR "ore: " fmt, ##a)
  28. #ifdef CONFIG_EXOFS_DEBUG
  29. #define ORE_DBGMSG(fmt, a...) \
  30. printk(KERN_NOTICE "ore @%s:%d: " fmt, __func__, __LINE__, ##a)
  31. #else
  32. #define ORE_DBGMSG(fmt, a...) \
  33. do { if (0) printk(fmt, ##a); } while (0)
  34. #endif
  35. /* u64 has problems with printk this will cast it to unsigned long long */
  36. #define _LLU(x) (unsigned long long)(x)
  37. #define ORE_DBGMSG2(M...) do {} while (0)
  38. /* #define ORE_DBGMSG2 ORE_DBGMSG */
  39. MODULE_AUTHOR("Boaz Harrosh <bharrosh@panasas.com>");
  40. MODULE_DESCRIPTION("Objects Raid Engine ore.ko");
  41. MODULE_LICENSE("GPL");
  42. static void ore_calc_stripe_info(struct ore_layout *layout, u64 file_offset,
  43. struct ore_striping_info *si);
  44. static u8 *_ios_cred(struct ore_io_state *ios, unsigned index)
  45. {
  46. return ios->oc->comps[index & ios->oc->single_comp].cred;
  47. }
  48. static struct osd_obj_id *_ios_obj(struct ore_io_state *ios, unsigned index)
  49. {
  50. return &ios->oc->comps[index & ios->oc->single_comp].obj;
  51. }
  52. static struct osd_dev *_ios_od(struct ore_io_state *ios, unsigned index)
  53. {
  54. return ore_comp_dev(ios->oc, index);
  55. }
  56. static int _get_io_state(struct ore_layout *layout,
  57. struct ore_components *oc, unsigned numdevs,
  58. struct ore_io_state **pios)
  59. {
  60. struct ore_io_state *ios;
  61. /*TODO: Maybe use kmem_cach per sbi of size
  62. * exofs_io_state_size(layout->s_numdevs)
  63. */
  64. ios = kzalloc(ore_io_state_size(numdevs), GFP_KERNEL);
  65. if (unlikely(!ios)) {
  66. ORE_DBGMSG("Failed kzalloc bytes=%d\n",
  67. ore_io_state_size(numdevs));
  68. *pios = NULL;
  69. return -ENOMEM;
  70. }
  71. ios->layout = layout;
  72. ios->oc = oc;
  73. *pios = ios;
  74. return 0;
  75. }
  76. /* Allocate an io_state for only a single group of devices
  77. *
  78. * If a user needs to call ore_read/write() this version must be used becase it
  79. * allocates extra stuff for striping and raid.
  80. * The ore might decide to only IO less then @length bytes do to alignmets
  81. * and constrains as follows:
  82. * - The IO cannot cross group boundary.
  83. * - In raid5/6 The end of the IO must align at end of a stripe eg.
  84. * (@offset + @length) % strip_size == 0. Or the complete range is within a
  85. * single stripe.
  86. * - Memory condition only permitted a shorter IO. (A user can use @length=~0
  87. * And check the returned ios->length for max_io_size.)
  88. *
  89. * The caller must check returned ios->length (and/or ios->nr_pages) and
  90. * re-issue these pages that fall outside of ios->length
  91. */
  92. int ore_get_rw_state(struct ore_layout *layout, struct ore_components *oc,
  93. bool is_reading, u64 offset, u64 length,
  94. struct ore_io_state **pios)
  95. {
  96. struct ore_io_state *ios;
  97. unsigned numdevs = layout->group_width * layout->mirrors_p1;
  98. int ret;
  99. ret = _get_io_state(layout, oc, numdevs, pios);
  100. if (unlikely(ret))
  101. return ret;
  102. ios = *pios;
  103. ios->reading = is_reading;
  104. ios->offset = offset;
  105. if (length) {
  106. ore_calc_stripe_info(layout, offset, &ios->si);
  107. ios->length = (length <= ios->si.group_length) ? length :
  108. ios->si.group_length;
  109. ios->nr_pages = (ios->length + PAGE_SIZE - 1) / PAGE_SIZE;
  110. }
  111. return 0;
  112. }
  113. EXPORT_SYMBOL(ore_get_rw_state);
  114. /* Allocate an io_state for all the devices in the comps array
  115. *
  116. * This version of io_state allocation is used mostly by create/remove
  117. * and trunc where we currently need all the devices. The only wastful
  118. * bit is the read/write_attributes with no IO. Those sites should
  119. * be converted to use ore_get_rw_state() with length=0
  120. */
  121. int ore_get_io_state(struct ore_layout *layout, struct ore_components *oc,
  122. struct ore_io_state **pios)
  123. {
  124. return _get_io_state(layout, oc, oc->numdevs, pios);
  125. }
  126. EXPORT_SYMBOL(ore_get_io_state);
  127. void ore_put_io_state(struct ore_io_state *ios)
  128. {
  129. if (ios) {
  130. unsigned i;
  131. for (i = 0; i < ios->numdevs; i++) {
  132. struct ore_per_dev_state *per_dev = &ios->per_dev[i];
  133. if (per_dev->or)
  134. osd_end_request(per_dev->or);
  135. if (per_dev->bio)
  136. bio_put(per_dev->bio);
  137. }
  138. kfree(ios);
  139. }
  140. }
  141. EXPORT_SYMBOL(ore_put_io_state);
  142. static void _sync_done(struct ore_io_state *ios, void *p)
  143. {
  144. struct completion *waiting = p;
  145. complete(waiting);
  146. }
  147. static void _last_io(struct kref *kref)
  148. {
  149. struct ore_io_state *ios = container_of(
  150. kref, struct ore_io_state, kref);
  151. ios->done(ios, ios->private);
  152. }
  153. static void _done_io(struct osd_request *or, void *p)
  154. {
  155. struct ore_io_state *ios = p;
  156. kref_put(&ios->kref, _last_io);
  157. }
  158. static int ore_io_execute(struct ore_io_state *ios)
  159. {
  160. DECLARE_COMPLETION_ONSTACK(wait);
  161. bool sync = (ios->done == NULL);
  162. int i, ret;
  163. if (sync) {
  164. ios->done = _sync_done;
  165. ios->private = &wait;
  166. }
  167. for (i = 0; i < ios->numdevs; i++) {
  168. struct osd_request *or = ios->per_dev[i].or;
  169. if (unlikely(!or))
  170. continue;
  171. ret = osd_finalize_request(or, 0, _ios_cred(ios, i), NULL);
  172. if (unlikely(ret)) {
  173. ORE_DBGMSG("Failed to osd_finalize_request() => %d\n",
  174. ret);
  175. return ret;
  176. }
  177. }
  178. kref_init(&ios->kref);
  179. for (i = 0; i < ios->numdevs; i++) {
  180. struct osd_request *or = ios->per_dev[i].or;
  181. if (unlikely(!or))
  182. continue;
  183. kref_get(&ios->kref);
  184. osd_execute_request_async(or, _done_io, ios);
  185. }
  186. kref_put(&ios->kref, _last_io);
  187. ret = 0;
  188. if (sync) {
  189. wait_for_completion(&wait);
  190. ret = ore_check_io(ios, NULL);
  191. }
  192. return ret;
  193. }
  194. static void _clear_bio(struct bio *bio)
  195. {
  196. struct bio_vec *bv;
  197. unsigned i;
  198. __bio_for_each_segment(bv, bio, i, 0) {
  199. unsigned this_count = bv->bv_len;
  200. if (likely(PAGE_SIZE == this_count))
  201. clear_highpage(bv->bv_page);
  202. else
  203. zero_user(bv->bv_page, bv->bv_offset, this_count);
  204. }
  205. }
  206. int ore_check_io(struct ore_io_state *ios, u64 *resid)
  207. {
  208. enum osd_err_priority acumulated_osd_err = 0;
  209. int acumulated_lin_err = 0;
  210. int i;
  211. for (i = 0; i < ios->numdevs; i++) {
  212. struct osd_sense_info osi;
  213. struct osd_request *or = ios->per_dev[i].or;
  214. int ret;
  215. if (unlikely(!or))
  216. continue;
  217. ret = osd_req_decode_sense(or, &osi);
  218. if (likely(!ret))
  219. continue;
  220. if (OSD_ERR_PRI_CLEAR_PAGES == osi.osd_err_pri) {
  221. /* start read offset passed endof file */
  222. _clear_bio(ios->per_dev[i].bio);
  223. ORE_DBGMSG("start read offset passed end of file "
  224. "offset=0x%llx, length=0x%llx\n",
  225. _LLU(ios->per_dev[i].offset),
  226. _LLU(ios->per_dev[i].length));
  227. continue; /* we recovered */
  228. }
  229. if (osi.osd_err_pri >= acumulated_osd_err) {
  230. acumulated_osd_err = osi.osd_err_pri;
  231. acumulated_lin_err = ret;
  232. }
  233. }
  234. /* TODO: raid specific residual calculations */
  235. if (resid) {
  236. if (likely(!acumulated_lin_err))
  237. *resid = 0;
  238. else
  239. *resid = ios->length;
  240. }
  241. return acumulated_lin_err;
  242. }
  243. EXPORT_SYMBOL(ore_check_io);
  244. /*
  245. * L - logical offset into the file
  246. *
  247. * U - The number of bytes in a stripe within a group
  248. *
  249. * U = stripe_unit * group_width
  250. *
  251. * T - The number of bytes striped within a group of component objects
  252. * (before advancing to the next group)
  253. *
  254. * T = stripe_unit * group_width * group_depth
  255. *
  256. * S - The number of bytes striped across all component objects
  257. * before the pattern repeats
  258. *
  259. * S = stripe_unit * group_width * group_depth * group_count
  260. *
  261. * M - The "major" (i.e., across all components) stripe number
  262. *
  263. * M = L / S
  264. *
  265. * G - Counts the groups from the beginning of the major stripe
  266. *
  267. * G = (L - (M * S)) / T [or (L % S) / T]
  268. *
  269. * H - The byte offset within the group
  270. *
  271. * H = (L - (M * S)) % T [or (L % S) % T]
  272. *
  273. * N - The "minor" (i.e., across the group) stripe number
  274. *
  275. * N = H / U
  276. *
  277. * C - The component index coresponding to L
  278. *
  279. * C = (H - (N * U)) / stripe_unit + G * group_width
  280. * [or (L % U) / stripe_unit + G * group_width]
  281. *
  282. * O - The component offset coresponding to L
  283. *
  284. * O = L % stripe_unit + N * stripe_unit + M * group_depth * stripe_unit
  285. */
  286. static void ore_calc_stripe_info(struct ore_layout *layout, u64 file_offset,
  287. struct ore_striping_info *si)
  288. {
  289. u32 stripe_unit = layout->stripe_unit;
  290. u32 group_width = layout->group_width;
  291. u64 group_depth = layout->group_depth;
  292. u32 U = stripe_unit * group_width;
  293. u64 T = U * group_depth;
  294. u64 S = T * layout->group_count;
  295. u64 M = div64_u64(file_offset, S);
  296. /*
  297. G = (L - (M * S)) / T
  298. H = (L - (M * S)) % T
  299. */
  300. u64 LmodS = file_offset - M * S;
  301. u32 G = div64_u64(LmodS, T);
  302. u64 H = LmodS - G * T;
  303. u32 N = div_u64(H, U);
  304. /* "H - (N * U)" is just "H % U" so it's bound to u32 */
  305. si->dev = (u32)(H - (N * U)) / stripe_unit + G * group_width;
  306. si->dev *= layout->mirrors_p1;
  307. div_u64_rem(file_offset, stripe_unit, &si->unit_off);
  308. si->obj_offset = si->unit_off + (N * stripe_unit) +
  309. (M * group_depth * stripe_unit);
  310. si->group_length = T - H;
  311. si->M = M;
  312. }
  313. static int _add_stripe_unit(struct ore_io_state *ios, unsigned *cur_pg,
  314. unsigned pgbase, struct ore_per_dev_state *per_dev,
  315. int cur_len)
  316. {
  317. unsigned pg = *cur_pg;
  318. struct request_queue *q =
  319. osd_request_queue(_ios_od(ios, per_dev->dev));
  320. per_dev->length += cur_len;
  321. if (per_dev->bio == NULL) {
  322. unsigned pages_in_stripe = ios->layout->group_width *
  323. (ios->layout->stripe_unit / PAGE_SIZE);
  324. unsigned bio_size = (ios->nr_pages + pages_in_stripe) /
  325. ios->layout->group_width;
  326. per_dev->bio = bio_kmalloc(GFP_KERNEL, bio_size);
  327. if (unlikely(!per_dev->bio)) {
  328. ORE_DBGMSG("Failed to allocate BIO size=%u\n",
  329. bio_size);
  330. return -ENOMEM;
  331. }
  332. }
  333. while (cur_len > 0) {
  334. unsigned pglen = min_t(unsigned, PAGE_SIZE - pgbase, cur_len);
  335. unsigned added_len;
  336. BUG_ON(ios->nr_pages <= pg);
  337. cur_len -= pglen;
  338. added_len = bio_add_pc_page(q, per_dev->bio, ios->pages[pg],
  339. pglen, pgbase);
  340. if (unlikely(pglen != added_len))
  341. return -ENOMEM;
  342. pgbase = 0;
  343. ++pg;
  344. }
  345. BUG_ON(cur_len);
  346. *cur_pg = pg;
  347. return 0;
  348. }
  349. static int _prepare_for_striping(struct ore_io_state *ios)
  350. {
  351. struct ore_striping_info *si = &ios->si;
  352. unsigned stripe_unit = ios->layout->stripe_unit;
  353. unsigned mirrors_p1 = ios->layout->mirrors_p1;
  354. unsigned devs_in_group = ios->layout->group_width * mirrors_p1;
  355. unsigned dev = si->dev;
  356. unsigned first_dev = dev - (dev % devs_in_group);
  357. unsigned cur_pg = ios->pages_consumed;
  358. u64 length = ios->length;
  359. int ret = 0;
  360. if (!ios->pages) {
  361. ios->numdevs = ios->layout->mirrors_p1;
  362. return 0;
  363. }
  364. BUG_ON(length > si->group_length);
  365. while (length) {
  366. unsigned comp = dev - first_dev;
  367. struct ore_per_dev_state *per_dev = &ios->per_dev[comp];
  368. unsigned cur_len, page_off = 0;
  369. if (!per_dev->length) {
  370. per_dev->dev = dev;
  371. if (dev < si->dev) {
  372. per_dev->offset = si->obj_offset + stripe_unit -
  373. si->unit_off;
  374. cur_len = stripe_unit;
  375. } else if (dev == si->dev) {
  376. per_dev->offset = si->obj_offset;
  377. cur_len = stripe_unit - si->unit_off;
  378. page_off = si->unit_off & ~PAGE_MASK;
  379. BUG_ON(page_off && (page_off != ios->pgbase));
  380. } else { /* dev > si->dev */
  381. per_dev->offset = si->obj_offset - si->unit_off;
  382. cur_len = stripe_unit;
  383. }
  384. } else {
  385. cur_len = stripe_unit;
  386. }
  387. if (cur_len >= length)
  388. cur_len = length;
  389. ret = _add_stripe_unit(ios, &cur_pg, page_off , per_dev,
  390. cur_len);
  391. if (unlikely(ret))
  392. goto out;
  393. dev += mirrors_p1;
  394. dev = (dev % devs_in_group) + first_dev;
  395. length -= cur_len;
  396. }
  397. out:
  398. ios->numdevs = devs_in_group;
  399. ios->pages_consumed = cur_pg;
  400. return ret;
  401. }
  402. int ore_create(struct ore_io_state *ios)
  403. {
  404. int i, ret;
  405. for (i = 0; i < ios->oc->numdevs; i++) {
  406. struct osd_request *or;
  407. or = osd_start_request(_ios_od(ios, i), GFP_KERNEL);
  408. if (unlikely(!or)) {
  409. ORE_ERR("%s: osd_start_request failed\n", __func__);
  410. ret = -ENOMEM;
  411. goto out;
  412. }
  413. ios->per_dev[i].or = or;
  414. ios->numdevs++;
  415. osd_req_create_object(or, _ios_obj(ios, i));
  416. }
  417. ret = ore_io_execute(ios);
  418. out:
  419. return ret;
  420. }
  421. EXPORT_SYMBOL(ore_create);
  422. int ore_remove(struct ore_io_state *ios)
  423. {
  424. int i, ret;
  425. for (i = 0; i < ios->oc->numdevs; i++) {
  426. struct osd_request *or;
  427. or = osd_start_request(_ios_od(ios, i), GFP_KERNEL);
  428. if (unlikely(!or)) {
  429. ORE_ERR("%s: osd_start_request failed\n", __func__);
  430. ret = -ENOMEM;
  431. goto out;
  432. }
  433. ios->per_dev[i].or = or;
  434. ios->numdevs++;
  435. osd_req_remove_object(or, _ios_obj(ios, i));
  436. }
  437. ret = ore_io_execute(ios);
  438. out:
  439. return ret;
  440. }
  441. EXPORT_SYMBOL(ore_remove);
  442. static int _write_mirror(struct ore_io_state *ios, int cur_comp)
  443. {
  444. struct ore_per_dev_state *master_dev = &ios->per_dev[cur_comp];
  445. unsigned dev = ios->per_dev[cur_comp].dev;
  446. unsigned last_comp = cur_comp + ios->layout->mirrors_p1;
  447. int ret = 0;
  448. if (ios->pages && !master_dev->length)
  449. return 0; /* Just an empty slot */
  450. for (; cur_comp < last_comp; ++cur_comp, ++dev) {
  451. struct ore_per_dev_state *per_dev = &ios->per_dev[cur_comp];
  452. struct osd_request *or;
  453. or = osd_start_request(_ios_od(ios, dev), GFP_KERNEL);
  454. if (unlikely(!or)) {
  455. ORE_ERR("%s: osd_start_request failed\n", __func__);
  456. ret = -ENOMEM;
  457. goto out;
  458. }
  459. per_dev->or = or;
  460. if (ios->pages) {
  461. struct bio *bio;
  462. if (per_dev != master_dev) {
  463. bio = bio_kmalloc(GFP_KERNEL,
  464. master_dev->bio->bi_max_vecs);
  465. if (unlikely(!bio)) {
  466. ORE_DBGMSG(
  467. "Failed to allocate BIO size=%u\n",
  468. master_dev->bio->bi_max_vecs);
  469. ret = -ENOMEM;
  470. goto out;
  471. }
  472. __bio_clone(bio, master_dev->bio);
  473. bio->bi_bdev = NULL;
  474. bio->bi_next = NULL;
  475. per_dev->offset = master_dev->offset;
  476. per_dev->length = master_dev->length;
  477. per_dev->bio = bio;
  478. per_dev->dev = dev;
  479. } else {
  480. bio = master_dev->bio;
  481. /* FIXME: bio_set_dir() */
  482. bio->bi_rw |= REQ_WRITE;
  483. }
  484. osd_req_write(or, _ios_obj(ios, dev), per_dev->offset,
  485. bio, per_dev->length);
  486. ORE_DBGMSG("write(0x%llx) offset=0x%llx "
  487. "length=0x%llx dev=%d\n",
  488. _LLU(_ios_obj(ios, dev)->id),
  489. _LLU(per_dev->offset),
  490. _LLU(per_dev->length), dev);
  491. } else if (ios->kern_buff) {
  492. per_dev->offset = ios->si.obj_offset;
  493. per_dev->dev = ios->si.dev + dev;
  494. /* no cross device without page array */
  495. BUG_ON((ios->layout->group_width > 1) &&
  496. (ios->si.unit_off + ios->length >
  497. ios->layout->stripe_unit));
  498. ret = osd_req_write_kern(or, _ios_obj(ios, per_dev->dev),
  499. per_dev->offset,
  500. ios->kern_buff, ios->length);
  501. if (unlikely(ret))
  502. goto out;
  503. ORE_DBGMSG2("write_kern(0x%llx) offset=0x%llx "
  504. "length=0x%llx dev=%d\n",
  505. _LLU(_ios_obj(ios, dev)->id),
  506. _LLU(per_dev->offset),
  507. _LLU(ios->length), per_dev->dev);
  508. } else {
  509. osd_req_set_attributes(or, _ios_obj(ios, dev));
  510. ORE_DBGMSG2("obj(0x%llx) set_attributes=%d dev=%d\n",
  511. _LLU(_ios_obj(ios, dev)->id),
  512. ios->out_attr_len, dev);
  513. }
  514. if (ios->out_attr)
  515. osd_req_add_set_attr_list(or, ios->out_attr,
  516. ios->out_attr_len);
  517. if (ios->in_attr)
  518. osd_req_add_get_attr_list(or, ios->in_attr,
  519. ios->in_attr_len);
  520. }
  521. out:
  522. return ret;
  523. }
  524. int ore_write(struct ore_io_state *ios)
  525. {
  526. int i;
  527. int ret;
  528. ret = _prepare_for_striping(ios);
  529. if (unlikely(ret))
  530. return ret;
  531. for (i = 0; i < ios->numdevs; i += ios->layout->mirrors_p1) {
  532. ret = _write_mirror(ios, i);
  533. if (unlikely(ret))
  534. return ret;
  535. }
  536. ret = ore_io_execute(ios);
  537. return ret;
  538. }
  539. EXPORT_SYMBOL(ore_write);
  540. static int _read_mirror(struct ore_io_state *ios, unsigned cur_comp)
  541. {
  542. struct osd_request *or;
  543. struct ore_per_dev_state *per_dev = &ios->per_dev[cur_comp];
  544. struct osd_obj_id *obj = _ios_obj(ios, cur_comp);
  545. unsigned first_dev = (unsigned)obj->id;
  546. if (ios->pages && !per_dev->length)
  547. return 0; /* Just an empty slot */
  548. first_dev = per_dev->dev + first_dev % ios->layout->mirrors_p1;
  549. or = osd_start_request(_ios_od(ios, first_dev), GFP_KERNEL);
  550. if (unlikely(!or)) {
  551. ORE_ERR("%s: osd_start_request failed\n", __func__);
  552. return -ENOMEM;
  553. }
  554. per_dev->or = or;
  555. if (ios->pages) {
  556. osd_req_read(or, obj, per_dev->offset,
  557. per_dev->bio, per_dev->length);
  558. ORE_DBGMSG("read(0x%llx) offset=0x%llx length=0x%llx"
  559. " dev=%d\n", _LLU(obj->id),
  560. _LLU(per_dev->offset), _LLU(per_dev->length),
  561. first_dev);
  562. } else {
  563. BUG_ON(ios->kern_buff);
  564. osd_req_get_attributes(or, obj);
  565. ORE_DBGMSG2("obj(0x%llx) get_attributes=%d dev=%d\n",
  566. _LLU(obj->id),
  567. ios->in_attr_len, first_dev);
  568. }
  569. if (ios->out_attr)
  570. osd_req_add_set_attr_list(or, ios->out_attr, ios->out_attr_len);
  571. if (ios->in_attr)
  572. osd_req_add_get_attr_list(or, ios->in_attr, ios->in_attr_len);
  573. return 0;
  574. }
  575. int ore_read(struct ore_io_state *ios)
  576. {
  577. int i;
  578. int ret;
  579. ret = _prepare_for_striping(ios);
  580. if (unlikely(ret))
  581. return ret;
  582. for (i = 0; i < ios->numdevs; i += ios->layout->mirrors_p1) {
  583. ret = _read_mirror(ios, i);
  584. if (unlikely(ret))
  585. return ret;
  586. }
  587. ret = ore_io_execute(ios);
  588. return ret;
  589. }
  590. EXPORT_SYMBOL(ore_read);
  591. int extract_attr_from_ios(struct ore_io_state *ios, struct osd_attr *attr)
  592. {
  593. struct osd_attr cur_attr = {.attr_page = 0}; /* start with zeros */
  594. void *iter = NULL;
  595. int nelem;
  596. do {
  597. nelem = 1;
  598. osd_req_decode_get_attr_list(ios->per_dev[0].or,
  599. &cur_attr, &nelem, &iter);
  600. if ((cur_attr.attr_page == attr->attr_page) &&
  601. (cur_attr.attr_id == attr->attr_id)) {
  602. attr->len = cur_attr.len;
  603. attr->val_ptr = cur_attr.val_ptr;
  604. return 0;
  605. }
  606. } while (iter);
  607. return -EIO;
  608. }
  609. EXPORT_SYMBOL(extract_attr_from_ios);
  610. static int _truncate_mirrors(struct ore_io_state *ios, unsigned cur_comp,
  611. struct osd_attr *attr)
  612. {
  613. int last_comp = cur_comp + ios->layout->mirrors_p1;
  614. for (; cur_comp < last_comp; ++cur_comp) {
  615. struct ore_per_dev_state *per_dev = &ios->per_dev[cur_comp];
  616. struct osd_request *or;
  617. or = osd_start_request(_ios_od(ios, cur_comp), GFP_KERNEL);
  618. if (unlikely(!or)) {
  619. ORE_ERR("%s: osd_start_request failed\n", __func__);
  620. return -ENOMEM;
  621. }
  622. per_dev->or = or;
  623. osd_req_set_attributes(or, _ios_obj(ios, cur_comp));
  624. osd_req_add_set_attr_list(or, attr, 1);
  625. }
  626. return 0;
  627. }
  628. struct _trunc_info {
  629. struct ore_striping_info si;
  630. u64 prev_group_obj_off;
  631. u64 next_group_obj_off;
  632. unsigned first_group_dev;
  633. unsigned nex_group_dev;
  634. };
  635. static void _calc_trunk_info(struct ore_layout *layout, u64 file_offset,
  636. struct _trunc_info *ti)
  637. {
  638. unsigned stripe_unit = layout->stripe_unit;
  639. ore_calc_stripe_info(layout, file_offset, &ti->si);
  640. ti->prev_group_obj_off = ti->si.M * stripe_unit;
  641. ti->next_group_obj_off = ti->si.M ? (ti->si.M - 1) * stripe_unit : 0;
  642. ti->first_group_dev = ti->si.dev - (ti->si.dev % layout->group_width);
  643. ti->nex_group_dev = ti->first_group_dev + layout->group_width;
  644. }
  645. int ore_truncate(struct ore_layout *layout, struct ore_components *oc,
  646. u64 size)
  647. {
  648. struct ore_io_state *ios;
  649. struct exofs_trunc_attr {
  650. struct osd_attr attr;
  651. __be64 newsize;
  652. } *size_attrs;
  653. struct _trunc_info ti;
  654. int i, ret;
  655. ret = ore_get_io_state(layout, oc, &ios);
  656. if (unlikely(ret))
  657. return ret;
  658. _calc_trunk_info(ios->layout, size, &ti);
  659. size_attrs = kcalloc(ios->oc->numdevs, sizeof(*size_attrs),
  660. GFP_KERNEL);
  661. if (unlikely(!size_attrs)) {
  662. ret = -ENOMEM;
  663. goto out;
  664. }
  665. ios->numdevs = ios->oc->numdevs;
  666. for (i = 0; i < ios->numdevs; ++i) {
  667. struct exofs_trunc_attr *size_attr = &size_attrs[i];
  668. u64 obj_size;
  669. if (i < ti.first_group_dev)
  670. obj_size = ti.prev_group_obj_off;
  671. else if (i >= ti.nex_group_dev)
  672. obj_size = ti.next_group_obj_off;
  673. else if (i < ti.si.dev) /* dev within this group */
  674. obj_size = ti.si.obj_offset +
  675. ios->layout->stripe_unit - ti.si.unit_off;
  676. else if (i == ti.si.dev)
  677. obj_size = ti.si.obj_offset;
  678. else /* i > ti.dev */
  679. obj_size = ti.si.obj_offset - ti.si.unit_off;
  680. size_attr->newsize = cpu_to_be64(obj_size);
  681. size_attr->attr = g_attr_logical_length;
  682. size_attr->attr.val_ptr = &size_attr->newsize;
  683. ORE_DBGMSG("trunc(0x%llx) obj_offset=0x%llx dev=%d\n",
  684. _LLU(oc->comps->obj.id), _LLU(obj_size), i);
  685. ret = _truncate_mirrors(ios, i * ios->layout->mirrors_p1,
  686. &size_attr->attr);
  687. if (unlikely(ret))
  688. goto out;
  689. }
  690. ret = ore_io_execute(ios);
  691. out:
  692. kfree(size_attrs);
  693. ore_put_io_state(ios);
  694. return ret;
  695. }
  696. EXPORT_SYMBOL(ore_truncate);
  697. const struct osd_attr g_attr_logical_length = ATTR_DEF(
  698. OSD_APAGE_OBJECT_INFORMATION, OSD_ATTR_OI_LOGICAL_LENGTH, 8);
  699. EXPORT_SYMBOL(g_attr_logical_length);