ios.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700
  1. /*
  2. * Copyright (C) 2005, 2006
  3. * Avishay Traeger (avishay@gmail.com)
  4. * Copyright (C) 2008, 2009
  5. * Boaz Harrosh <bharrosh@panasas.com>
  6. *
  7. * This file is part of exofs.
  8. *
  9. * exofs is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation. Since it is based on ext2, and the only
  12. * valid version of GPL for the Linux kernel is version 2, the only valid
  13. * version of GPL for exofs is version 2.
  14. *
  15. * exofs is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with exofs; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. #include <scsi/scsi_device.h>
  25. #include <asm/div64.h>
  26. #include "exofs.h"
  27. #define EXOFS_DBGMSG2(M...) do {} while (0)
  28. /* #define EXOFS_DBGMSG2 EXOFS_DBGMSG */
  29. void exofs_make_credential(u8 cred_a[OSD_CAP_LEN], const struct osd_obj_id *obj)
  30. {
  31. osd_sec_init_nosec_doall_caps(cred_a, obj, false, true);
  32. }
  33. int exofs_read_kern(struct osd_dev *od, u8 *cred, struct osd_obj_id *obj,
  34. u64 offset, void *p, unsigned length)
  35. {
  36. struct osd_request *or = osd_start_request(od, GFP_KERNEL);
  37. /* struct osd_sense_info osi = {.key = 0};*/
  38. int ret;
  39. if (unlikely(!or)) {
  40. EXOFS_DBGMSG("%s: osd_start_request failed.\n", __func__);
  41. return -ENOMEM;
  42. }
  43. ret = osd_req_read_kern(or, obj, offset, p, length);
  44. if (unlikely(ret)) {
  45. EXOFS_DBGMSG("%s: osd_req_read_kern failed.\n", __func__);
  46. goto out;
  47. }
  48. ret = osd_finalize_request(or, 0, cred, NULL);
  49. if (unlikely(ret)) {
  50. EXOFS_DBGMSG("Faild to osd_finalize_request() => %d\n", ret);
  51. goto out;
  52. }
  53. ret = osd_execute_request(or);
  54. if (unlikely(ret))
  55. EXOFS_DBGMSG("osd_execute_request() => %d\n", ret);
  56. /* osd_req_decode_sense(or, ret); */
  57. out:
  58. osd_end_request(or);
  59. return ret;
  60. }
  61. int exofs_get_io_state(struct exofs_layout *layout,
  62. struct exofs_io_state **pios)
  63. {
  64. struct exofs_io_state *ios;
  65. /*TODO: Maybe use kmem_cach per sbi of size
  66. * exofs_io_state_size(layout->s_numdevs)
  67. */
  68. ios = kzalloc(exofs_io_state_size(layout->s_numdevs), GFP_KERNEL);
  69. if (unlikely(!ios)) {
  70. EXOFS_DBGMSG("Faild kzalloc bytes=%d\n",
  71. exofs_io_state_size(layout->s_numdevs));
  72. *pios = NULL;
  73. return -ENOMEM;
  74. }
  75. ios->layout = layout;
  76. ios->obj.partition = layout->s_pid;
  77. *pios = ios;
  78. return 0;
  79. }
  80. void exofs_put_io_state(struct exofs_io_state *ios)
  81. {
  82. if (ios) {
  83. unsigned i;
  84. for (i = 0; i < ios->numdevs; i++) {
  85. struct exofs_per_dev_state *per_dev = &ios->per_dev[i];
  86. if (per_dev->or)
  87. osd_end_request(per_dev->or);
  88. if (per_dev->bio)
  89. bio_put(per_dev->bio);
  90. }
  91. kfree(ios);
  92. }
  93. }
  94. unsigned exofs_layout_od_id(struct exofs_layout *layout,
  95. osd_id obj_no, unsigned layout_index)
  96. {
  97. /* switch (layout->lay_func) {
  98. case LAYOUT_MOVING_WINDOW:
  99. {*/
  100. unsigned dev_mod = obj_no;
  101. return (layout_index + dev_mod * layout->mirrors_p1) %
  102. layout->s_numdevs;
  103. /* }
  104. case LAYOUT_FUNC_IMPLICT:
  105. return layout->devs[layout_index];
  106. }*/
  107. }
  108. static inline struct osd_dev *exofs_ios_od(struct exofs_io_state *ios,
  109. unsigned layout_index)
  110. {
  111. return ios->layout->s_ods[
  112. exofs_layout_od_id(ios->layout, ios->obj.id, layout_index)];
  113. }
  114. static void _sync_done(struct exofs_io_state *ios, void *p)
  115. {
  116. struct completion *waiting = p;
  117. complete(waiting);
  118. }
  119. static void _last_io(struct kref *kref)
  120. {
  121. struct exofs_io_state *ios = container_of(
  122. kref, struct exofs_io_state, kref);
  123. ios->done(ios, ios->private);
  124. }
  125. static void _done_io(struct osd_request *or, void *p)
  126. {
  127. struct exofs_io_state *ios = p;
  128. kref_put(&ios->kref, _last_io);
  129. }
  130. static int exofs_io_execute(struct exofs_io_state *ios)
  131. {
  132. DECLARE_COMPLETION_ONSTACK(wait);
  133. bool sync = (ios->done == NULL);
  134. int i, ret;
  135. if (sync) {
  136. ios->done = _sync_done;
  137. ios->private = &wait;
  138. }
  139. for (i = 0; i < ios->numdevs; i++) {
  140. struct osd_request *or = ios->per_dev[i].or;
  141. if (unlikely(!or))
  142. continue;
  143. ret = osd_finalize_request(or, 0, ios->cred, NULL);
  144. if (unlikely(ret)) {
  145. EXOFS_DBGMSG("Faild to osd_finalize_request() => %d\n",
  146. ret);
  147. return ret;
  148. }
  149. }
  150. kref_init(&ios->kref);
  151. for (i = 0; i < ios->numdevs; i++) {
  152. struct osd_request *or = ios->per_dev[i].or;
  153. if (unlikely(!or))
  154. continue;
  155. kref_get(&ios->kref);
  156. osd_execute_request_async(or, _done_io, ios);
  157. }
  158. kref_put(&ios->kref, _last_io);
  159. ret = 0;
  160. if (sync) {
  161. wait_for_completion(&wait);
  162. ret = exofs_check_io(ios, NULL);
  163. }
  164. return ret;
  165. }
  166. static void _clear_bio(struct bio *bio)
  167. {
  168. struct bio_vec *bv;
  169. unsigned i;
  170. __bio_for_each_segment(bv, bio, i, 0) {
  171. unsigned this_count = bv->bv_len;
  172. if (likely(PAGE_SIZE == this_count))
  173. clear_highpage(bv->bv_page);
  174. else
  175. zero_user(bv->bv_page, bv->bv_offset, this_count);
  176. }
  177. }
  178. int exofs_check_io(struct exofs_io_state *ios, u64 *resid)
  179. {
  180. enum osd_err_priority acumulated_osd_err = 0;
  181. int acumulated_lin_err = 0;
  182. int i;
  183. for (i = 0; i < ios->numdevs; i++) {
  184. struct osd_sense_info osi;
  185. struct osd_request *or = ios->per_dev[i].or;
  186. int ret;
  187. if (unlikely(!or))
  188. continue;
  189. ret = osd_req_decode_sense(or, &osi);
  190. if (likely(!ret))
  191. continue;
  192. if (OSD_ERR_PRI_CLEAR_PAGES == osi.osd_err_pri) {
  193. /* start read offset passed endof file */
  194. _clear_bio(ios->per_dev[i].bio);
  195. EXOFS_DBGMSG("start read offset passed end of file "
  196. "offset=0x%llx, length=0x%llx\n",
  197. _LLU(ios->per_dev[i].offset),
  198. _LLU(ios->per_dev[i].length));
  199. continue; /* we recovered */
  200. }
  201. if (osi.osd_err_pri >= acumulated_osd_err) {
  202. acumulated_osd_err = osi.osd_err_pri;
  203. acumulated_lin_err = ret;
  204. }
  205. }
  206. /* TODO: raid specific residual calculations */
  207. if (resid) {
  208. if (likely(!acumulated_lin_err))
  209. *resid = 0;
  210. else
  211. *resid = ios->length;
  212. }
  213. return acumulated_lin_err;
  214. }
  215. /* REMOVEME: After review
  216. Some quoteing from the standard
  217. L = logical offset into the file
  218. W = number of data components in a stripe
  219. S = W * stripe_unit (S is Stripe length)
  220. N = L / S (N is the stripe Number)
  221. C = (L-(N*S)) / stripe_unit (C is the component)
  222. O = (N*stripe_unit)+(L%stripe_unit) (O is the object's offset)
  223. */
  224. static void _offset_dev_unit_off(struct exofs_io_state *ios, u64 file_offset,
  225. u64 *obj_offset, unsigned *dev, unsigned *unit_off)
  226. {
  227. unsigned stripe_unit = ios->layout->stripe_unit;
  228. unsigned stripe_length = stripe_unit * ios->layout->group_width;
  229. u64 stripe_no = file_offset;
  230. unsigned stripe_mod = do_div(stripe_no, stripe_length);
  231. *unit_off = stripe_mod % stripe_unit;
  232. *obj_offset = stripe_no * stripe_unit + *unit_off;
  233. *dev = stripe_mod / stripe_unit * ios->layout->mirrors_p1;
  234. }
  235. static int _add_stripe_unit(struct exofs_io_state *ios, unsigned *cur_pg,
  236. unsigned pgbase, struct exofs_per_dev_state *per_dev,
  237. int cur_len)
  238. {
  239. unsigned pg = *cur_pg;
  240. struct request_queue *q =
  241. osd_request_queue(exofs_ios_od(ios, per_dev->dev));
  242. per_dev->length += cur_len;
  243. if (per_dev->bio == NULL) {
  244. unsigned pages_in_stripe = ios->layout->group_width *
  245. (ios->layout->stripe_unit / PAGE_SIZE);
  246. unsigned bio_size = (ios->nr_pages + pages_in_stripe) /
  247. ios->layout->group_width;
  248. per_dev->bio = bio_kmalloc(GFP_KERNEL, bio_size);
  249. if (unlikely(!per_dev->bio)) {
  250. EXOFS_DBGMSG("Faild to allocate BIO size=%u\n",
  251. bio_size);
  252. return -ENOMEM;
  253. }
  254. }
  255. while (cur_len > 0) {
  256. unsigned pglen = min_t(unsigned, PAGE_SIZE - pgbase, cur_len);
  257. unsigned added_len;
  258. BUG_ON(ios->nr_pages <= pg);
  259. cur_len -= pglen;
  260. added_len = bio_add_pc_page(q, per_dev->bio, ios->pages[pg],
  261. pglen, pgbase);
  262. if (unlikely(pglen != added_len))
  263. return -ENOMEM;
  264. pgbase = 0;
  265. ++pg;
  266. }
  267. BUG_ON(cur_len);
  268. *cur_pg = pg;
  269. return 0;
  270. }
  271. static int _prepare_for_striping(struct exofs_io_state *ios)
  272. {
  273. u64 length = ios->length;
  274. u64 offset = ios->offset;
  275. unsigned stripe_unit = ios->layout->stripe_unit;
  276. unsigned comp = 0;
  277. unsigned stripes = 0;
  278. unsigned cur_pg = 0;
  279. int ret = 0;
  280. if (!ios->pages) {
  281. if (ios->kern_buff) {
  282. struct exofs_per_dev_state *per_dev = &ios->per_dev[0];
  283. unsigned unit_off;
  284. _offset_dev_unit_off(ios, offset, &per_dev->offset,
  285. &per_dev->dev, &unit_off);
  286. /* no cross device without page array */
  287. BUG_ON((ios->layout->group_width > 1) &&
  288. (unit_off + length > stripe_unit));
  289. }
  290. ios->numdevs = ios->layout->mirrors_p1;
  291. return 0;
  292. }
  293. while (length) {
  294. struct exofs_per_dev_state *per_dev = &ios->per_dev[comp];
  295. unsigned cur_len, page_off;
  296. if (!per_dev->length) {
  297. unsigned unit_off;
  298. _offset_dev_unit_off(ios, offset, &per_dev->offset,
  299. &per_dev->dev, &unit_off);
  300. stripes++;
  301. cur_len = min_t(u64, stripe_unit - unit_off, length);
  302. offset += cur_len;
  303. page_off = unit_off & ~PAGE_MASK;
  304. BUG_ON(page_off != ios->pgbase);
  305. } else {
  306. cur_len = min_t(u64, stripe_unit, length);
  307. page_off = 0;
  308. }
  309. ret = _add_stripe_unit(ios, &cur_pg, page_off , per_dev,
  310. cur_len);
  311. if (unlikely(ret))
  312. goto out;
  313. comp += ios->layout->mirrors_p1;
  314. comp %= ios->layout->s_numdevs;
  315. length -= cur_len;
  316. }
  317. out:
  318. ios->numdevs = stripes * ios->layout->mirrors_p1;
  319. return ret;
  320. }
  321. int exofs_sbi_create(struct exofs_io_state *ios)
  322. {
  323. int i, ret;
  324. for (i = 0; i < ios->layout->s_numdevs; i++) {
  325. struct osd_request *or;
  326. or = osd_start_request(exofs_ios_od(ios, i), GFP_KERNEL);
  327. if (unlikely(!or)) {
  328. EXOFS_ERR("%s: osd_start_request failed\n", __func__);
  329. ret = -ENOMEM;
  330. goto out;
  331. }
  332. ios->per_dev[i].or = or;
  333. ios->numdevs++;
  334. osd_req_create_object(or, &ios->obj);
  335. }
  336. ret = exofs_io_execute(ios);
  337. out:
  338. return ret;
  339. }
  340. int exofs_sbi_remove(struct exofs_io_state *ios)
  341. {
  342. int i, ret;
  343. for (i = 0; i < ios->layout->s_numdevs; i++) {
  344. struct osd_request *or;
  345. or = osd_start_request(exofs_ios_od(ios, i), GFP_KERNEL);
  346. if (unlikely(!or)) {
  347. EXOFS_ERR("%s: osd_start_request failed\n", __func__);
  348. ret = -ENOMEM;
  349. goto out;
  350. }
  351. ios->per_dev[i].or = or;
  352. ios->numdevs++;
  353. osd_req_remove_object(or, &ios->obj);
  354. }
  355. ret = exofs_io_execute(ios);
  356. out:
  357. return ret;
  358. }
  359. static int _sbi_write_mirror(struct exofs_io_state *ios, int cur_comp)
  360. {
  361. struct exofs_per_dev_state *master_dev = &ios->per_dev[cur_comp];
  362. unsigned dev = ios->per_dev[cur_comp].dev;
  363. unsigned last_comp = cur_comp + ios->layout->mirrors_p1;
  364. int ret = 0;
  365. for (; cur_comp < last_comp; ++cur_comp, ++dev) {
  366. struct exofs_per_dev_state *per_dev = &ios->per_dev[cur_comp];
  367. struct osd_request *or;
  368. or = osd_start_request(exofs_ios_od(ios, dev), GFP_KERNEL);
  369. if (unlikely(!or)) {
  370. EXOFS_ERR("%s: osd_start_request failed\n", __func__);
  371. ret = -ENOMEM;
  372. goto out;
  373. }
  374. per_dev->or = or;
  375. per_dev->offset = master_dev->offset;
  376. if (ios->pages) {
  377. struct bio *bio;
  378. if (per_dev != master_dev) {
  379. bio = bio_kmalloc(GFP_KERNEL,
  380. master_dev->bio->bi_max_vecs);
  381. if (unlikely(!bio)) {
  382. EXOFS_DBGMSG(
  383. "Faild to allocate BIO size=%u\n",
  384. master_dev->bio->bi_max_vecs);
  385. ret = -ENOMEM;
  386. goto out;
  387. }
  388. __bio_clone(bio, master_dev->bio);
  389. bio->bi_bdev = NULL;
  390. bio->bi_next = NULL;
  391. per_dev->length = master_dev->length;
  392. per_dev->bio = bio;
  393. per_dev->dev = dev;
  394. } else {
  395. bio = master_dev->bio;
  396. /* FIXME: bio_set_dir() */
  397. bio->bi_rw |= (1 << BIO_RW);
  398. }
  399. osd_req_write(or, &ios->obj, per_dev->offset, bio,
  400. per_dev->length);
  401. EXOFS_DBGMSG("write(0x%llx) offset=0x%llx "
  402. "length=0x%llx dev=%d\n",
  403. _LLU(ios->obj.id), _LLU(per_dev->offset),
  404. _LLU(per_dev->length), dev);
  405. } else if (ios->kern_buff) {
  406. ret = osd_req_write_kern(or, &ios->obj, per_dev->offset,
  407. ios->kern_buff, ios->length);
  408. if (unlikely(ret))
  409. goto out;
  410. EXOFS_DBGMSG2("write_kern(0x%llx) offset=0x%llx "
  411. "length=0x%llx dev=%d\n",
  412. _LLU(ios->obj.id), _LLU(per_dev->offset),
  413. _LLU(ios->length), dev);
  414. } else {
  415. osd_req_set_attributes(or, &ios->obj);
  416. EXOFS_DBGMSG2("obj(0x%llx) set_attributes=%d dev=%d\n",
  417. _LLU(ios->obj.id), ios->out_attr_len, dev);
  418. }
  419. if (ios->out_attr)
  420. osd_req_add_set_attr_list(or, ios->out_attr,
  421. ios->out_attr_len);
  422. if (ios->in_attr)
  423. osd_req_add_get_attr_list(or, ios->in_attr,
  424. ios->in_attr_len);
  425. }
  426. out:
  427. return ret;
  428. }
  429. int exofs_sbi_write(struct exofs_io_state *ios)
  430. {
  431. int i;
  432. int ret;
  433. ret = _prepare_for_striping(ios);
  434. if (unlikely(ret))
  435. return ret;
  436. for (i = 0; i < ios->numdevs; i += ios->layout->mirrors_p1) {
  437. ret = _sbi_write_mirror(ios, i);
  438. if (unlikely(ret))
  439. return ret;
  440. }
  441. ret = exofs_io_execute(ios);
  442. return ret;
  443. }
  444. static int _sbi_read_mirror(struct exofs_io_state *ios, unsigned cur_comp)
  445. {
  446. struct osd_request *or;
  447. struct exofs_per_dev_state *per_dev = &ios->per_dev[cur_comp];
  448. unsigned first_dev = (unsigned)ios->obj.id;
  449. first_dev = per_dev->dev + first_dev % ios->layout->mirrors_p1;
  450. or = osd_start_request(exofs_ios_od(ios, first_dev), GFP_KERNEL);
  451. if (unlikely(!or)) {
  452. EXOFS_ERR("%s: osd_start_request failed\n", __func__);
  453. return -ENOMEM;
  454. }
  455. per_dev->or = or;
  456. if (ios->pages) {
  457. osd_req_read(or, &ios->obj, per_dev->offset,
  458. per_dev->bio, per_dev->length);
  459. EXOFS_DBGMSG("read(0x%llx) offset=0x%llx length=0x%llx"
  460. " dev=%d\n", _LLU(ios->obj.id),
  461. _LLU(per_dev->offset), _LLU(per_dev->length),
  462. first_dev);
  463. } else if (ios->kern_buff) {
  464. int ret = osd_req_read_kern(or, &ios->obj, per_dev->offset,
  465. ios->kern_buff, ios->length);
  466. EXOFS_DBGMSG2("read_kern(0x%llx) offset=0x%llx "
  467. "length=0x%llx dev=%d ret=>%d\n",
  468. _LLU(ios->obj.id), _LLU(per_dev->offset),
  469. _LLU(ios->length), first_dev, ret);
  470. if (unlikely(ret))
  471. return ret;
  472. } else {
  473. osd_req_get_attributes(or, &ios->obj);
  474. EXOFS_DBGMSG2("obj(0x%llx) get_attributes=%d dev=%d\n",
  475. _LLU(ios->obj.id), ios->in_attr_len, first_dev);
  476. }
  477. if (ios->out_attr)
  478. osd_req_add_set_attr_list(or, ios->out_attr, ios->out_attr_len);
  479. if (ios->in_attr)
  480. osd_req_add_get_attr_list(or, ios->in_attr, ios->in_attr_len);
  481. return 0;
  482. }
  483. int exofs_sbi_read(struct exofs_io_state *ios)
  484. {
  485. int i;
  486. int ret;
  487. ret = _prepare_for_striping(ios);
  488. if (unlikely(ret))
  489. return ret;
  490. for (i = 0; i < ios->numdevs; i += ios->layout->mirrors_p1) {
  491. ret = _sbi_read_mirror(ios, i);
  492. if (unlikely(ret))
  493. return ret;
  494. }
  495. ret = exofs_io_execute(ios);
  496. return ret;
  497. }
  498. int extract_attr_from_ios(struct exofs_io_state *ios, struct osd_attr *attr)
  499. {
  500. struct osd_attr cur_attr = {.attr_page = 0}; /* start with zeros */
  501. void *iter = NULL;
  502. int nelem;
  503. do {
  504. nelem = 1;
  505. osd_req_decode_get_attr_list(ios->per_dev[0].or,
  506. &cur_attr, &nelem, &iter);
  507. if ((cur_attr.attr_page == attr->attr_page) &&
  508. (cur_attr.attr_id == attr->attr_id)) {
  509. attr->len = cur_attr.len;
  510. attr->val_ptr = cur_attr.val_ptr;
  511. return 0;
  512. }
  513. } while (iter);
  514. return -EIO;
  515. }
  516. static int _truncate_mirrors(struct exofs_io_state *ios, unsigned cur_comp,
  517. struct osd_attr *attr)
  518. {
  519. int last_comp = cur_comp + ios->layout->mirrors_p1;
  520. for (; cur_comp < last_comp; ++cur_comp) {
  521. struct exofs_per_dev_state *per_dev = &ios->per_dev[cur_comp];
  522. struct osd_request *or;
  523. or = osd_start_request(exofs_ios_od(ios, cur_comp), GFP_KERNEL);
  524. if (unlikely(!or)) {
  525. EXOFS_ERR("%s: osd_start_request failed\n", __func__);
  526. return -ENOMEM;
  527. }
  528. per_dev->or = or;
  529. osd_req_set_attributes(or, &ios->obj);
  530. osd_req_add_set_attr_list(or, attr, 1);
  531. }
  532. return 0;
  533. }
  534. int exofs_oi_truncate(struct exofs_i_info *oi, u64 size)
  535. {
  536. struct exofs_sb_info *sbi = oi->vfs_inode.i_sb->s_fs_info;
  537. struct exofs_io_state *ios;
  538. struct exofs_trunc_attr {
  539. struct osd_attr attr;
  540. __be64 newsize;
  541. } *size_attrs;
  542. u64 this_obj_size;
  543. unsigned dev;
  544. unsigned unit_off;
  545. int i, ret;
  546. ret = exofs_get_io_state(&sbi->layout, &ios);
  547. if (unlikely(ret))
  548. return ret;
  549. size_attrs = kcalloc(ios->layout->group_width, sizeof(*size_attrs),
  550. GFP_KERNEL);
  551. if (unlikely(!size_attrs)) {
  552. ret = -ENOMEM;
  553. goto out;
  554. }
  555. ios->obj.id = exofs_oi_objno(oi);
  556. ios->cred = oi->i_cred;
  557. ios->numdevs = ios->layout->s_numdevs;
  558. _offset_dev_unit_off(ios, size, &this_obj_size, &dev, &unit_off);
  559. for (i = 0; i < ios->layout->group_width; ++i) {
  560. struct exofs_trunc_attr *size_attr = &size_attrs[i];
  561. u64 obj_size;
  562. if (i < dev)
  563. obj_size = this_obj_size +
  564. ios->layout->stripe_unit - unit_off;
  565. else if (i == dev)
  566. obj_size = this_obj_size;
  567. else /* i > dev */
  568. obj_size = this_obj_size - unit_off;
  569. size_attr->newsize = cpu_to_be64(obj_size);
  570. size_attr->attr = g_attr_logical_length;
  571. size_attr->attr.val_ptr = &size_attr->newsize;
  572. ret = _truncate_mirrors(ios, i * ios->layout->mirrors_p1,
  573. &size_attr->attr);
  574. if (unlikely(ret))
  575. goto out;
  576. }
  577. ret = exofs_io_execute(ios);
  578. out:
  579. kfree(size_attrs);
  580. exofs_put_io_state(ios);
  581. return ret;
  582. }