ios.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823
  1. /*
  2. * Copyright (C) 2005, 2006
  3. * Avishay Traeger (avishay@gmail.com)
  4. * Copyright (C) 2008, 2009
  5. * Boaz Harrosh <bharrosh@panasas.com>
  6. *
  7. * This file is part of exofs.
  8. *
  9. * exofs is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation. Since it is based on ext2, and the only
  12. * valid version of GPL for the Linux kernel is version 2, the only valid
  13. * version of GPL for exofs is version 2.
  14. *
  15. * exofs is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with exofs; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. #include <linux/slab.h>
  25. #include <scsi/scsi_device.h>
  26. #include <asm/div64.h>
  27. #include "exofs.h"
  28. #define EXOFS_DBGMSG2(M...) do {} while (0)
  29. /* #define EXOFS_DBGMSG2 EXOFS_DBGMSG */
  30. void exofs_make_credential(u8 cred_a[OSD_CAP_LEN], const struct osd_obj_id *obj)
  31. {
  32. osd_sec_init_nosec_doall_caps(cred_a, obj, false, true);
  33. }
  34. int exofs_read_kern(struct osd_dev *od, u8 *cred, struct osd_obj_id *obj,
  35. u64 offset, void *p, unsigned length)
  36. {
  37. struct osd_request *or = osd_start_request(od, GFP_KERNEL);
  38. /* struct osd_sense_info osi = {.key = 0};*/
  39. int ret;
  40. if (unlikely(!or)) {
  41. EXOFS_DBGMSG("%s: osd_start_request failed.\n", __func__);
  42. return -ENOMEM;
  43. }
  44. ret = osd_req_read_kern(or, obj, offset, p, length);
  45. if (unlikely(ret)) {
  46. EXOFS_DBGMSG("%s: osd_req_read_kern failed.\n", __func__);
  47. goto out;
  48. }
  49. ret = osd_finalize_request(or, 0, cred, NULL);
  50. if (unlikely(ret)) {
  51. EXOFS_DBGMSG("Faild to osd_finalize_request() => %d\n", ret);
  52. goto out;
  53. }
  54. ret = osd_execute_request(or);
  55. if (unlikely(ret))
  56. EXOFS_DBGMSG("osd_execute_request() => %d\n", ret);
  57. /* osd_req_decode_sense(or, ret); */
  58. out:
  59. osd_end_request(or);
  60. return ret;
  61. }
  62. int exofs_get_io_state(struct exofs_layout *layout,
  63. struct exofs_io_state **pios)
  64. {
  65. struct exofs_io_state *ios;
  66. /*TODO: Maybe use kmem_cach per sbi of size
  67. * exofs_io_state_size(layout->s_numdevs)
  68. */
  69. ios = kzalloc(exofs_io_state_size(layout->s_numdevs), GFP_KERNEL);
  70. if (unlikely(!ios)) {
  71. EXOFS_DBGMSG("Faild kzalloc bytes=%d\n",
  72. exofs_io_state_size(layout->s_numdevs));
  73. *pios = NULL;
  74. return -ENOMEM;
  75. }
  76. ios->layout = layout;
  77. ios->obj.partition = layout->s_pid;
  78. *pios = ios;
  79. return 0;
  80. }
  81. void exofs_put_io_state(struct exofs_io_state *ios)
  82. {
  83. if (ios) {
  84. unsigned i;
  85. for (i = 0; i < ios->numdevs; i++) {
  86. struct exofs_per_dev_state *per_dev = &ios->per_dev[i];
  87. if (per_dev->or)
  88. osd_end_request(per_dev->or);
  89. if (per_dev->bio)
  90. bio_put(per_dev->bio);
  91. }
  92. kfree(ios);
  93. }
  94. }
  95. unsigned exofs_layout_od_id(struct exofs_layout *layout,
  96. osd_id obj_no, unsigned layout_index)
  97. {
  98. /* switch (layout->lay_func) {
  99. case LAYOUT_MOVING_WINDOW:
  100. {*/
  101. unsigned dev_mod = obj_no;
  102. return (layout_index + dev_mod * layout->mirrors_p1) %
  103. layout->s_numdevs;
  104. /* }
  105. case LAYOUT_FUNC_IMPLICT:
  106. return layout->devs[layout_index];
  107. }*/
  108. }
  109. static inline struct osd_dev *exofs_ios_od(struct exofs_io_state *ios,
  110. unsigned layout_index)
  111. {
  112. return ios->layout->s_ods[
  113. exofs_layout_od_id(ios->layout, ios->obj.id, layout_index)];
  114. }
  115. static void _sync_done(struct exofs_io_state *ios, void *p)
  116. {
  117. struct completion *waiting = p;
  118. complete(waiting);
  119. }
  120. static void _last_io(struct kref *kref)
  121. {
  122. struct exofs_io_state *ios = container_of(
  123. kref, struct exofs_io_state, kref);
  124. ios->done(ios, ios->private);
  125. }
  126. static void _done_io(struct osd_request *or, void *p)
  127. {
  128. struct exofs_io_state *ios = p;
  129. kref_put(&ios->kref, _last_io);
  130. }
  131. static int exofs_io_execute(struct exofs_io_state *ios)
  132. {
  133. DECLARE_COMPLETION_ONSTACK(wait);
  134. bool sync = (ios->done == NULL);
  135. int i, ret;
  136. if (sync) {
  137. ios->done = _sync_done;
  138. ios->private = &wait;
  139. }
  140. for (i = 0; i < ios->numdevs; i++) {
  141. struct osd_request *or = ios->per_dev[i].or;
  142. if (unlikely(!or))
  143. continue;
  144. ret = osd_finalize_request(or, 0, ios->cred, NULL);
  145. if (unlikely(ret)) {
  146. EXOFS_DBGMSG("Faild to osd_finalize_request() => %d\n",
  147. ret);
  148. return ret;
  149. }
  150. }
  151. kref_init(&ios->kref);
  152. for (i = 0; i < ios->numdevs; i++) {
  153. struct osd_request *or = ios->per_dev[i].or;
  154. if (unlikely(!or))
  155. continue;
  156. kref_get(&ios->kref);
  157. osd_execute_request_async(or, _done_io, ios);
  158. }
  159. kref_put(&ios->kref, _last_io);
  160. ret = 0;
  161. if (sync) {
  162. wait_for_completion(&wait);
  163. ret = exofs_check_io(ios, NULL);
  164. }
  165. return ret;
  166. }
  167. static void _clear_bio(struct bio *bio)
  168. {
  169. struct bio_vec *bv;
  170. unsigned i;
  171. __bio_for_each_segment(bv, bio, i, 0) {
  172. unsigned this_count = bv->bv_len;
  173. if (likely(PAGE_SIZE == this_count))
  174. clear_highpage(bv->bv_page);
  175. else
  176. zero_user(bv->bv_page, bv->bv_offset, this_count);
  177. }
  178. }
  179. int exofs_check_io(struct exofs_io_state *ios, u64 *resid)
  180. {
  181. enum osd_err_priority acumulated_osd_err = 0;
  182. int acumulated_lin_err = 0;
  183. int i;
  184. for (i = 0; i < ios->numdevs; i++) {
  185. struct osd_sense_info osi;
  186. struct osd_request *or = ios->per_dev[i].or;
  187. int ret;
  188. if (unlikely(!or))
  189. continue;
  190. ret = osd_req_decode_sense(or, &osi);
  191. if (likely(!ret))
  192. continue;
  193. if (OSD_ERR_PRI_CLEAR_PAGES == osi.osd_err_pri) {
  194. /* start read offset passed endof file */
  195. _clear_bio(ios->per_dev[i].bio);
  196. EXOFS_DBGMSG("start read offset passed end of file "
  197. "offset=0x%llx, length=0x%llx\n",
  198. _LLU(ios->per_dev[i].offset),
  199. _LLU(ios->per_dev[i].length));
  200. continue; /* we recovered */
  201. }
  202. if (osi.osd_err_pri >= acumulated_osd_err) {
  203. acumulated_osd_err = osi.osd_err_pri;
  204. acumulated_lin_err = ret;
  205. }
  206. }
  207. /* TODO: raid specific residual calculations */
  208. if (resid) {
  209. if (likely(!acumulated_lin_err))
  210. *resid = 0;
  211. else
  212. *resid = ios->length;
  213. }
  214. return acumulated_lin_err;
  215. }
  216. /*
  217. * L - logical offset into the file
  218. *
  219. * U - The number of bytes in a stripe within a group
  220. *
  221. * U = stripe_unit * group_width
  222. *
  223. * T - The number of bytes striped within a group of component objects
  224. * (before advancing to the next group)
  225. *
  226. * T = stripe_unit * group_width * group_depth
  227. *
  228. * S - The number of bytes striped across all component objects
  229. * before the pattern repeats
  230. *
  231. * S = stripe_unit * group_width * group_depth * group_count
  232. *
  233. * M - The "major" (i.e., across all components) stripe number
  234. *
  235. * M = L / S
  236. *
  237. * G - Counts the groups from the beginning of the major stripe
  238. *
  239. * G = (L - (M * S)) / T [or (L % S) / T]
  240. *
  241. * H - The byte offset within the group
  242. *
  243. * H = (L - (M * S)) % T [or (L % S) % T]
  244. *
  245. * N - The "minor" (i.e., across the group) stripe number
  246. *
  247. * N = H / U
  248. *
  249. * C - The component index coresponding to L
  250. *
  251. * C = (H - (N * U)) / stripe_unit + G * group_width
  252. * [or (L % U) / stripe_unit + G * group_width]
  253. *
  254. * O - The component offset coresponding to L
  255. *
  256. * O = L % stripe_unit + N * stripe_unit + M * group_depth * stripe_unit
  257. */
  258. struct _striping_info {
  259. u64 obj_offset;
  260. u64 group_length;
  261. u64 total_group_length;
  262. u64 Major;
  263. unsigned dev;
  264. unsigned unit_off;
  265. };
  266. static void _calc_stripe_info(struct exofs_io_state *ios, u64 file_offset,
  267. struct _striping_info *si)
  268. {
  269. u32 stripe_unit = ios->layout->stripe_unit;
  270. u32 group_width = ios->layout->group_width;
  271. u64 group_depth = ios->layout->group_depth;
  272. u32 U = stripe_unit * group_width;
  273. u64 T = U * group_depth;
  274. u64 S = T * ios->layout->group_count;
  275. u64 M = div64_u64(file_offset, S);
  276. /*
  277. G = (L - (M * S)) / T
  278. H = (L - (M * S)) % T
  279. */
  280. u64 LmodS = file_offset - M * S;
  281. u32 G = div64_u64(LmodS, T);
  282. u64 H = LmodS - G * T;
  283. u32 N = div_u64(H, U);
  284. /* "H - (N * U)" is just "H % U" so it's bound to u32 */
  285. si->dev = (u32)(H - (N * U)) / stripe_unit + G * group_width;
  286. si->dev *= ios->layout->mirrors_p1;
  287. div_u64_rem(file_offset, stripe_unit, &si->unit_off);
  288. si->obj_offset = si->unit_off + (N * stripe_unit) +
  289. (M * group_depth * stripe_unit);
  290. si->group_length = T - H;
  291. si->total_group_length = T;
  292. si->Major = M;
  293. }
  294. static int _add_stripe_unit(struct exofs_io_state *ios, unsigned *cur_pg,
  295. unsigned pgbase, struct exofs_per_dev_state *per_dev,
  296. int cur_len)
  297. {
  298. unsigned pg = *cur_pg;
  299. struct request_queue *q =
  300. osd_request_queue(exofs_ios_od(ios, per_dev->dev));
  301. per_dev->length += cur_len;
  302. if (per_dev->bio == NULL) {
  303. unsigned pages_in_stripe = ios->layout->group_width *
  304. (ios->layout->stripe_unit / PAGE_SIZE);
  305. unsigned bio_size = (ios->nr_pages + pages_in_stripe) /
  306. ios->layout->group_width;
  307. per_dev->bio = bio_kmalloc(GFP_KERNEL, bio_size);
  308. if (unlikely(!per_dev->bio)) {
  309. EXOFS_DBGMSG("Faild to allocate BIO size=%u\n",
  310. bio_size);
  311. return -ENOMEM;
  312. }
  313. }
  314. while (cur_len > 0) {
  315. unsigned pglen = min_t(unsigned, PAGE_SIZE - pgbase, cur_len);
  316. unsigned added_len;
  317. BUG_ON(ios->nr_pages <= pg);
  318. cur_len -= pglen;
  319. added_len = bio_add_pc_page(q, per_dev->bio, ios->pages[pg],
  320. pglen, pgbase);
  321. if (unlikely(pglen != added_len))
  322. return -ENOMEM;
  323. pgbase = 0;
  324. ++pg;
  325. }
  326. BUG_ON(cur_len);
  327. *cur_pg = pg;
  328. return 0;
  329. }
  330. static int _prepare_one_group(struct exofs_io_state *ios, u64 length,
  331. struct _striping_info *si, unsigned first_comp)
  332. {
  333. unsigned stripe_unit = ios->layout->stripe_unit;
  334. unsigned mirrors_p1 = ios->layout->mirrors_p1;
  335. unsigned devs_in_group = ios->layout->group_width * mirrors_p1;
  336. unsigned dev = si->dev;
  337. unsigned first_dev = dev - (dev % devs_in_group);
  338. unsigned comp = first_comp + (dev - first_dev);
  339. unsigned max_comp = ios->numdevs ? ios->numdevs - mirrors_p1 : 0;
  340. unsigned cur_pg = ios->pages_consumed;
  341. int ret = 0;
  342. while (length) {
  343. struct exofs_per_dev_state *per_dev = &ios->per_dev[comp];
  344. unsigned cur_len, page_off = 0;
  345. if (!per_dev->length) {
  346. per_dev->dev = dev;
  347. if (dev < si->dev) {
  348. per_dev->offset = si->obj_offset + stripe_unit -
  349. si->unit_off;
  350. cur_len = stripe_unit;
  351. } else if (dev == si->dev) {
  352. per_dev->offset = si->obj_offset;
  353. cur_len = stripe_unit - si->unit_off;
  354. page_off = si->unit_off & ~PAGE_MASK;
  355. BUG_ON(page_off && (page_off != ios->pgbase));
  356. } else { /* dev > si->dev */
  357. per_dev->offset = si->obj_offset - si->unit_off;
  358. cur_len = stripe_unit;
  359. }
  360. if (max_comp < comp)
  361. max_comp = comp;
  362. dev += mirrors_p1;
  363. dev = (dev % devs_in_group) + first_dev;
  364. } else {
  365. cur_len = stripe_unit;
  366. }
  367. if (cur_len >= length)
  368. cur_len = length;
  369. ret = _add_stripe_unit(ios, &cur_pg, page_off , per_dev,
  370. cur_len);
  371. if (unlikely(ret))
  372. goto out;
  373. comp += mirrors_p1;
  374. comp = (comp % devs_in_group) + first_comp;
  375. length -= cur_len;
  376. }
  377. out:
  378. ios->numdevs = max_comp + mirrors_p1;
  379. ios->pages_consumed = cur_pg;
  380. return ret;
  381. }
  382. static int _prepare_for_striping(struct exofs_io_state *ios)
  383. {
  384. u64 length = ios->length;
  385. struct _striping_info si;
  386. unsigned devs_in_group = ios->layout->group_width *
  387. ios->layout->mirrors_p1;
  388. unsigned first_comp = 0;
  389. int ret = 0;
  390. _calc_stripe_info(ios, ios->offset, &si);
  391. if (!ios->pages) {
  392. if (ios->kern_buff) {
  393. struct exofs_per_dev_state *per_dev = &ios->per_dev[0];
  394. per_dev->offset = si.obj_offset;
  395. per_dev->dev = si.dev;
  396. /* no cross device without page array */
  397. BUG_ON((ios->layout->group_width > 1) &&
  398. (si.unit_off + ios->length >
  399. ios->layout->stripe_unit));
  400. }
  401. ios->numdevs = ios->layout->mirrors_p1;
  402. return 0;
  403. }
  404. while (length) {
  405. if (length < si.group_length)
  406. si.group_length = length;
  407. ret = _prepare_one_group(ios, si.group_length, &si, first_comp);
  408. if (unlikely(ret))
  409. goto out;
  410. length -= si.group_length;
  411. si.group_length = si.total_group_length;
  412. si.unit_off = 0;
  413. ++si.Major;
  414. si.obj_offset = si.Major * ios->layout->stripe_unit *
  415. ios->layout->group_depth;
  416. si.dev = (si.dev - (si.dev % devs_in_group)) + devs_in_group;
  417. si.dev %= ios->layout->s_numdevs;
  418. first_comp += devs_in_group;
  419. first_comp %= ios->layout->s_numdevs;
  420. }
  421. out:
  422. return ret;
  423. }
  424. int exofs_sbi_create(struct exofs_io_state *ios)
  425. {
  426. int i, ret;
  427. for (i = 0; i < ios->layout->s_numdevs; i++) {
  428. struct osd_request *or;
  429. or = osd_start_request(exofs_ios_od(ios, i), GFP_KERNEL);
  430. if (unlikely(!or)) {
  431. EXOFS_ERR("%s: osd_start_request failed\n", __func__);
  432. ret = -ENOMEM;
  433. goto out;
  434. }
  435. ios->per_dev[i].or = or;
  436. ios->numdevs++;
  437. osd_req_create_object(or, &ios->obj);
  438. }
  439. ret = exofs_io_execute(ios);
  440. out:
  441. return ret;
  442. }
  443. int exofs_sbi_remove(struct exofs_io_state *ios)
  444. {
  445. int i, ret;
  446. for (i = 0; i < ios->layout->s_numdevs; i++) {
  447. struct osd_request *or;
  448. or = osd_start_request(exofs_ios_od(ios, i), GFP_KERNEL);
  449. if (unlikely(!or)) {
  450. EXOFS_ERR("%s: osd_start_request failed\n", __func__);
  451. ret = -ENOMEM;
  452. goto out;
  453. }
  454. ios->per_dev[i].or = or;
  455. ios->numdevs++;
  456. osd_req_remove_object(or, &ios->obj);
  457. }
  458. ret = exofs_io_execute(ios);
  459. out:
  460. return ret;
  461. }
  462. static int _sbi_write_mirror(struct exofs_io_state *ios, int cur_comp)
  463. {
  464. struct exofs_per_dev_state *master_dev = &ios->per_dev[cur_comp];
  465. unsigned dev = ios->per_dev[cur_comp].dev;
  466. unsigned last_comp = cur_comp + ios->layout->mirrors_p1;
  467. int ret = 0;
  468. if (ios->pages && !master_dev->length)
  469. return 0; /* Just an empty slot */
  470. for (; cur_comp < last_comp; ++cur_comp, ++dev) {
  471. struct exofs_per_dev_state *per_dev = &ios->per_dev[cur_comp];
  472. struct osd_request *or;
  473. or = osd_start_request(exofs_ios_od(ios, dev), GFP_KERNEL);
  474. if (unlikely(!or)) {
  475. EXOFS_ERR("%s: osd_start_request failed\n", __func__);
  476. ret = -ENOMEM;
  477. goto out;
  478. }
  479. per_dev->or = or;
  480. per_dev->offset = master_dev->offset;
  481. if (ios->pages) {
  482. struct bio *bio;
  483. if (per_dev != master_dev) {
  484. bio = bio_kmalloc(GFP_KERNEL,
  485. master_dev->bio->bi_max_vecs);
  486. if (unlikely(!bio)) {
  487. EXOFS_DBGMSG(
  488. "Faild to allocate BIO size=%u\n",
  489. master_dev->bio->bi_max_vecs);
  490. ret = -ENOMEM;
  491. goto out;
  492. }
  493. __bio_clone(bio, master_dev->bio);
  494. bio->bi_bdev = NULL;
  495. bio->bi_next = NULL;
  496. per_dev->length = master_dev->length;
  497. per_dev->bio = bio;
  498. per_dev->dev = dev;
  499. } else {
  500. bio = master_dev->bio;
  501. /* FIXME: bio_set_dir() */
  502. bio->bi_rw |= (1 << BIO_RW);
  503. }
  504. osd_req_write(or, &ios->obj, per_dev->offset, bio,
  505. per_dev->length);
  506. EXOFS_DBGMSG("write(0x%llx) offset=0x%llx "
  507. "length=0x%llx dev=%d\n",
  508. _LLU(ios->obj.id), _LLU(per_dev->offset),
  509. _LLU(per_dev->length), dev);
  510. } else if (ios->kern_buff) {
  511. ret = osd_req_write_kern(or, &ios->obj, per_dev->offset,
  512. ios->kern_buff, ios->length);
  513. if (unlikely(ret))
  514. goto out;
  515. EXOFS_DBGMSG2("write_kern(0x%llx) offset=0x%llx "
  516. "length=0x%llx dev=%d\n",
  517. _LLU(ios->obj.id), _LLU(per_dev->offset),
  518. _LLU(ios->length), dev);
  519. } else {
  520. osd_req_set_attributes(or, &ios->obj);
  521. EXOFS_DBGMSG2("obj(0x%llx) set_attributes=%d dev=%d\n",
  522. _LLU(ios->obj.id), ios->out_attr_len, dev);
  523. }
  524. if (ios->out_attr)
  525. osd_req_add_set_attr_list(or, ios->out_attr,
  526. ios->out_attr_len);
  527. if (ios->in_attr)
  528. osd_req_add_get_attr_list(or, ios->in_attr,
  529. ios->in_attr_len);
  530. }
  531. out:
  532. return ret;
  533. }
  534. int exofs_sbi_write(struct exofs_io_state *ios)
  535. {
  536. int i;
  537. int ret;
  538. ret = _prepare_for_striping(ios);
  539. if (unlikely(ret))
  540. return ret;
  541. for (i = 0; i < ios->numdevs; i += ios->layout->mirrors_p1) {
  542. ret = _sbi_write_mirror(ios, i);
  543. if (unlikely(ret))
  544. return ret;
  545. }
  546. ret = exofs_io_execute(ios);
  547. return ret;
  548. }
  549. static int _sbi_read_mirror(struct exofs_io_state *ios, unsigned cur_comp)
  550. {
  551. struct osd_request *or;
  552. struct exofs_per_dev_state *per_dev = &ios->per_dev[cur_comp];
  553. unsigned first_dev = (unsigned)ios->obj.id;
  554. if (ios->pages && !per_dev->length)
  555. return 0; /* Just an empty slot */
  556. first_dev = per_dev->dev + first_dev % ios->layout->mirrors_p1;
  557. or = osd_start_request(exofs_ios_od(ios, first_dev), GFP_KERNEL);
  558. if (unlikely(!or)) {
  559. EXOFS_ERR("%s: osd_start_request failed\n", __func__);
  560. return -ENOMEM;
  561. }
  562. per_dev->or = or;
  563. if (ios->pages) {
  564. osd_req_read(or, &ios->obj, per_dev->offset,
  565. per_dev->bio, per_dev->length);
  566. EXOFS_DBGMSG("read(0x%llx) offset=0x%llx length=0x%llx"
  567. " dev=%d\n", _LLU(ios->obj.id),
  568. _LLU(per_dev->offset), _LLU(per_dev->length),
  569. first_dev);
  570. } else if (ios->kern_buff) {
  571. int ret = osd_req_read_kern(or, &ios->obj, per_dev->offset,
  572. ios->kern_buff, ios->length);
  573. EXOFS_DBGMSG2("read_kern(0x%llx) offset=0x%llx "
  574. "length=0x%llx dev=%d ret=>%d\n",
  575. _LLU(ios->obj.id), _LLU(per_dev->offset),
  576. _LLU(ios->length), first_dev, ret);
  577. if (unlikely(ret))
  578. return ret;
  579. } else {
  580. osd_req_get_attributes(or, &ios->obj);
  581. EXOFS_DBGMSG2("obj(0x%llx) get_attributes=%d dev=%d\n",
  582. _LLU(ios->obj.id), ios->in_attr_len, first_dev);
  583. }
  584. if (ios->out_attr)
  585. osd_req_add_set_attr_list(or, ios->out_attr, ios->out_attr_len);
  586. if (ios->in_attr)
  587. osd_req_add_get_attr_list(or, ios->in_attr, ios->in_attr_len);
  588. return 0;
  589. }
  590. int exofs_sbi_read(struct exofs_io_state *ios)
  591. {
  592. int i;
  593. int ret;
  594. ret = _prepare_for_striping(ios);
  595. if (unlikely(ret))
  596. return ret;
  597. for (i = 0; i < ios->numdevs; i += ios->layout->mirrors_p1) {
  598. ret = _sbi_read_mirror(ios, i);
  599. if (unlikely(ret))
  600. return ret;
  601. }
  602. ret = exofs_io_execute(ios);
  603. return ret;
  604. }
  605. int extract_attr_from_ios(struct exofs_io_state *ios, struct osd_attr *attr)
  606. {
  607. struct osd_attr cur_attr = {.attr_page = 0}; /* start with zeros */
  608. void *iter = NULL;
  609. int nelem;
  610. do {
  611. nelem = 1;
  612. osd_req_decode_get_attr_list(ios->per_dev[0].or,
  613. &cur_attr, &nelem, &iter);
  614. if ((cur_attr.attr_page == attr->attr_page) &&
  615. (cur_attr.attr_id == attr->attr_id)) {
  616. attr->len = cur_attr.len;
  617. attr->val_ptr = cur_attr.val_ptr;
  618. return 0;
  619. }
  620. } while (iter);
  621. return -EIO;
  622. }
  623. static int _truncate_mirrors(struct exofs_io_state *ios, unsigned cur_comp,
  624. struct osd_attr *attr)
  625. {
  626. int last_comp = cur_comp + ios->layout->mirrors_p1;
  627. for (; cur_comp < last_comp; ++cur_comp) {
  628. struct exofs_per_dev_state *per_dev = &ios->per_dev[cur_comp];
  629. struct osd_request *or;
  630. or = osd_start_request(exofs_ios_od(ios, cur_comp), GFP_KERNEL);
  631. if (unlikely(!or)) {
  632. EXOFS_ERR("%s: osd_start_request failed\n", __func__);
  633. return -ENOMEM;
  634. }
  635. per_dev->or = or;
  636. osd_req_set_attributes(or, &ios->obj);
  637. osd_req_add_set_attr_list(or, attr, 1);
  638. }
  639. return 0;
  640. }
  641. int exofs_oi_truncate(struct exofs_i_info *oi, u64 size)
  642. {
  643. struct exofs_sb_info *sbi = oi->vfs_inode.i_sb->s_fs_info;
  644. struct exofs_io_state *ios;
  645. struct exofs_trunc_attr {
  646. struct osd_attr attr;
  647. __be64 newsize;
  648. } *size_attrs;
  649. struct _striping_info si;
  650. int i, ret;
  651. ret = exofs_get_io_state(&sbi->layout, &ios);
  652. if (unlikely(ret))
  653. return ret;
  654. size_attrs = kcalloc(ios->layout->group_width, sizeof(*size_attrs),
  655. GFP_KERNEL);
  656. if (unlikely(!size_attrs)) {
  657. ret = -ENOMEM;
  658. goto out;
  659. }
  660. ios->obj.id = exofs_oi_objno(oi);
  661. ios->cred = oi->i_cred;
  662. ios->numdevs = ios->layout->s_numdevs;
  663. _calc_stripe_info(ios, size, &si);
  664. for (i = 0; i < ios->layout->group_width; ++i) {
  665. struct exofs_trunc_attr *size_attr = &size_attrs[i];
  666. u64 obj_size;
  667. if (i < si.dev)
  668. obj_size = si.obj_offset +
  669. ios->layout->stripe_unit - si.unit_off;
  670. else if (i == si.dev)
  671. obj_size = si.obj_offset;
  672. else /* i > si.dev */
  673. obj_size = si.obj_offset - si.unit_off;
  674. size_attr->newsize = cpu_to_be64(obj_size);
  675. size_attr->attr = g_attr_logical_length;
  676. size_attr->attr.val_ptr = &size_attr->newsize;
  677. ret = _truncate_mirrors(ios, i * ios->layout->mirrors_p1,
  678. &size_attr->attr);
  679. if (unlikely(ret))
  680. goto out;
  681. }
  682. ret = exofs_io_execute(ios);
  683. out:
  684. kfree(size_attrs);
  685. exofs_put_io_state(ios);
  686. return ret;
  687. }