objlayout.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653
  1. /*
  2. * pNFS Objects layout driver high level definitions
  3. *
  4. * Copyright (C) 2007 Panasas Inc. [year of first publication]
  5. * All rights reserved.
  6. *
  7. * Benny Halevy <bhalevy@panasas.com>
  8. * Boaz Harrosh <bharrosh@panasas.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License version 2
  12. * See the file COPYING included with this distribution for more details.
  13. *
  14. * Redistribution and use in source and binary forms, with or without
  15. * modification, are permitted provided that the following conditions
  16. * are met:
  17. *
  18. * 1. Redistributions of source code must retain the above copyright
  19. * notice, this list of conditions and the following disclaimer.
  20. * 2. Redistributions in binary form must reproduce the above copyright
  21. * notice, this list of conditions and the following disclaimer in the
  22. * documentation and/or other materials provided with the distribution.
  23. * 3. Neither the name of the Panasas company nor the names of its
  24. * contributors may be used to endorse or promote products derived
  25. * from this software without specific prior written permission.
  26. *
  27. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
  28. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  29. * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  30. * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  31. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  32. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  33. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  34. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  35. * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  36. * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  37. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  38. */
  39. #include <scsi/osd_initiator.h>
  40. #include "objlayout.h"
  41. #define NFSDBG_FACILITY NFSDBG_PNFS_LD
  42. /*
  43. * Create a objlayout layout structure for the given inode and return it.
  44. */
  45. struct pnfs_layout_hdr *
  46. objlayout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
  47. {
  48. struct objlayout *objlay;
  49. objlay = kzalloc(sizeof(struct objlayout), gfp_flags);
  50. if (objlay) {
  51. spin_lock_init(&objlay->lock);
  52. INIT_LIST_HEAD(&objlay->err_list);
  53. }
  54. dprintk("%s: Return %p\n", __func__, objlay);
  55. return &objlay->pnfs_layout;
  56. }
  57. /*
  58. * Free an objlayout layout structure
  59. */
  60. void
  61. objlayout_free_layout_hdr(struct pnfs_layout_hdr *lo)
  62. {
  63. struct objlayout *objlay = OBJLAYOUT(lo);
  64. dprintk("%s: objlay %p\n", __func__, objlay);
  65. WARN_ON(!list_empty(&objlay->err_list));
  66. kfree(objlay);
  67. }
  68. /*
  69. * Unmarshall layout and store it in pnfslay.
  70. */
  71. struct pnfs_layout_segment *
  72. objlayout_alloc_lseg(struct pnfs_layout_hdr *pnfslay,
  73. struct nfs4_layoutget_res *lgr,
  74. gfp_t gfp_flags)
  75. {
  76. int status = -ENOMEM;
  77. struct xdr_stream stream;
  78. struct xdr_buf buf = {
  79. .pages = lgr->layoutp->pages,
  80. .page_len = lgr->layoutp->len,
  81. .buflen = lgr->layoutp->len,
  82. .len = lgr->layoutp->len,
  83. };
  84. struct page *scratch;
  85. struct pnfs_layout_segment *lseg;
  86. dprintk("%s: Begin pnfslay %p\n", __func__, pnfslay);
  87. scratch = alloc_page(gfp_flags);
  88. if (!scratch)
  89. goto err_nofree;
  90. xdr_init_decode(&stream, &buf, NULL);
  91. xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
  92. status = objio_alloc_lseg(&lseg, pnfslay, &lgr->range, &stream, gfp_flags);
  93. if (unlikely(status)) {
  94. dprintk("%s: objio_alloc_lseg Return err %d\n", __func__,
  95. status);
  96. goto err;
  97. }
  98. __free_page(scratch);
  99. dprintk("%s: Return %p\n", __func__, lseg);
  100. return lseg;
  101. err:
  102. __free_page(scratch);
  103. err_nofree:
  104. dprintk("%s: Err Return=>%d\n", __func__, status);
  105. return ERR_PTR(status);
  106. }
  107. /*
  108. * Free a layout segement
  109. */
  110. void
  111. objlayout_free_lseg(struct pnfs_layout_segment *lseg)
  112. {
  113. dprintk("%s: freeing layout segment %p\n", __func__, lseg);
  114. if (unlikely(!lseg))
  115. return;
  116. objio_free_lseg(lseg);
  117. }
  118. /*
  119. * I/O Operations
  120. */
  121. static inline u64
  122. end_offset(u64 start, u64 len)
  123. {
  124. u64 end;
  125. end = start + len;
  126. return end >= start ? end : NFS4_MAX_UINT64;
  127. }
  128. /* last octet in a range */
  129. static inline u64
  130. last_byte_offset(u64 start, u64 len)
  131. {
  132. u64 end;
  133. BUG_ON(!len);
  134. end = start + len;
  135. return end > start ? end - 1 : NFS4_MAX_UINT64;
  136. }
  137. void _fix_verify_io_params(struct pnfs_layout_segment *lseg,
  138. struct page ***p_pages, unsigned *p_pgbase,
  139. u64 offset, unsigned long count)
  140. {
  141. u64 lseg_end_offset;
  142. BUG_ON(offset < lseg->pls_range.offset);
  143. lseg_end_offset = end_offset(lseg->pls_range.offset,
  144. lseg->pls_range.length);
  145. BUG_ON(offset >= lseg_end_offset);
  146. WARN_ON(offset + count > lseg_end_offset);
  147. if (*p_pgbase > PAGE_SIZE) {
  148. dprintk("%s: pgbase(0x%x) > PAGE_SIZE\n", __func__, *p_pgbase);
  149. *p_pages += *p_pgbase >> PAGE_SHIFT;
  150. *p_pgbase &= ~PAGE_MASK;
  151. }
  152. }
  153. /*
  154. * I/O done common code
  155. */
  156. static void
  157. objlayout_iodone(struct objlayout_io_res *oir)
  158. {
  159. if (likely(oir->status >= 0)) {
  160. objio_free_result(oir);
  161. } else {
  162. struct objlayout *objlay = oir->objlay;
  163. spin_lock(&objlay->lock);
  164. objlay->delta_space_valid = OBJ_DSU_INVALID;
  165. list_add(&objlay->err_list, &oir->err_list);
  166. spin_unlock(&objlay->lock);
  167. }
  168. }
  169. /*
  170. * objlayout_io_set_result - Set an osd_error code on a specific osd comp.
  171. *
  172. * The @index component IO failed (error returned from target). Register
  173. * the error for later reporting at layout-return.
  174. */
  175. void
  176. objlayout_io_set_result(struct objlayout_io_res *oir, unsigned index,
  177. struct pnfs_osd_objid *pooid, int osd_error,
  178. u64 offset, u64 length, bool is_write)
  179. {
  180. struct pnfs_osd_ioerr *ioerr = &oir->ioerrs[index];
  181. BUG_ON(index >= oir->num_comps);
  182. if (osd_error) {
  183. ioerr->oer_component = *pooid;
  184. ioerr->oer_comp_offset = offset;
  185. ioerr->oer_comp_length = length;
  186. ioerr->oer_iswrite = is_write;
  187. ioerr->oer_errno = osd_error;
  188. dprintk("%s: err[%d]: errno=%d is_write=%d dev(%llx:%llx) "
  189. "par=0x%llx obj=0x%llx offset=0x%llx length=0x%llx\n",
  190. __func__, index, ioerr->oer_errno,
  191. ioerr->oer_iswrite,
  192. _DEVID_LO(&ioerr->oer_component.oid_device_id),
  193. _DEVID_HI(&ioerr->oer_component.oid_device_id),
  194. ioerr->oer_component.oid_partition_id,
  195. ioerr->oer_component.oid_object_id,
  196. ioerr->oer_comp_offset,
  197. ioerr->oer_comp_length);
  198. } else {
  199. /* User need not call if no error is reported */
  200. ioerr->oer_errno = 0;
  201. }
  202. }
  203. /* Function scheduled on rpc workqueue to call ->nfs_readlist_complete().
  204. * This is because the osd completion is called with ints-off from
  205. * the block layer
  206. */
  207. static void _rpc_read_complete(struct work_struct *work)
  208. {
  209. struct rpc_task *task;
  210. struct nfs_read_data *rdata;
  211. dprintk("%s enter\n", __func__);
  212. task = container_of(work, struct rpc_task, u.tk_work);
  213. rdata = container_of(task, struct nfs_read_data, task);
  214. pnfs_ld_read_done(rdata);
  215. }
  216. void
  217. objlayout_read_done(struct objlayout_io_res *oir, ssize_t status, bool sync)
  218. {
  219. struct nfs_read_data *rdata = oir->rpcdata;
  220. oir->status = rdata->task.tk_status = status;
  221. if (status >= 0)
  222. rdata->res.count = status;
  223. else
  224. rdata->pnfs_error = status;
  225. objlayout_iodone(oir);
  226. /* must not use oir after this point */
  227. dprintk("%s: Return status=%zd eof=%d sync=%d\n", __func__,
  228. status, rdata->res.eof, sync);
  229. if (sync)
  230. pnfs_ld_read_done(rdata);
  231. else {
  232. INIT_WORK(&rdata->task.u.tk_work, _rpc_read_complete);
  233. schedule_work(&rdata->task.u.tk_work);
  234. }
  235. }
  236. /*
  237. * Perform sync or async reads.
  238. */
  239. enum pnfs_try_status
  240. objlayout_read_pagelist(struct nfs_read_data *rdata)
  241. {
  242. loff_t offset = rdata->args.offset;
  243. size_t count = rdata->args.count;
  244. int err;
  245. loff_t eof;
  246. eof = i_size_read(rdata->inode);
  247. if (unlikely(offset + count > eof)) {
  248. if (offset >= eof) {
  249. err = 0;
  250. rdata->res.count = 0;
  251. rdata->res.eof = 1;
  252. /*FIXME: do we need to call pnfs_ld_read_done() */
  253. goto out;
  254. }
  255. count = eof - offset;
  256. }
  257. rdata->res.eof = (offset + count) >= eof;
  258. _fix_verify_io_params(rdata->lseg, &rdata->args.pages,
  259. &rdata->args.pgbase,
  260. rdata->args.offset, rdata->args.count);
  261. dprintk("%s: inode(%lx) offset 0x%llx count 0x%Zx eof=%d\n",
  262. __func__, rdata->inode->i_ino, offset, count, rdata->res.eof);
  263. err = objio_read_pagelist(rdata);
  264. out:
  265. if (unlikely(err)) {
  266. rdata->pnfs_error = err;
  267. dprintk("%s: Returned Error %d\n", __func__, err);
  268. return PNFS_NOT_ATTEMPTED;
  269. }
  270. return PNFS_ATTEMPTED;
  271. }
  272. /* Function scheduled on rpc workqueue to call ->nfs_writelist_complete().
  273. * This is because the osd completion is called with ints-off from
  274. * the block layer
  275. */
  276. static void _rpc_write_complete(struct work_struct *work)
  277. {
  278. struct rpc_task *task;
  279. struct nfs_write_data *wdata;
  280. dprintk("%s enter\n", __func__);
  281. task = container_of(work, struct rpc_task, u.tk_work);
  282. wdata = container_of(task, struct nfs_write_data, task);
  283. pnfs_ld_write_done(wdata);
  284. }
  285. void
  286. objlayout_write_done(struct objlayout_io_res *oir, ssize_t status, bool sync)
  287. {
  288. struct nfs_write_data *wdata = oir->rpcdata;
  289. oir->status = wdata->task.tk_status = status;
  290. if (status >= 0) {
  291. wdata->res.count = status;
  292. wdata->verf.committed = oir->committed;
  293. } else {
  294. wdata->pnfs_error = status;
  295. }
  296. objlayout_iodone(oir);
  297. /* must not use oir after this point */
  298. dprintk("%s: Return status %zd committed %d sync=%d\n", __func__,
  299. status, wdata->verf.committed, sync);
  300. if (sync)
  301. pnfs_ld_write_done(wdata);
  302. else {
  303. INIT_WORK(&wdata->task.u.tk_work, _rpc_write_complete);
  304. schedule_work(&wdata->task.u.tk_work);
  305. }
  306. }
  307. /*
  308. * Perform sync or async writes.
  309. */
  310. enum pnfs_try_status
  311. objlayout_write_pagelist(struct nfs_write_data *wdata,
  312. int how)
  313. {
  314. int err;
  315. _fix_verify_io_params(wdata->lseg, &wdata->args.pages,
  316. &wdata->args.pgbase,
  317. wdata->args.offset, wdata->args.count);
  318. err = objio_write_pagelist(wdata, how);
  319. if (unlikely(err)) {
  320. wdata->pnfs_error = err;
  321. dprintk("%s: Returned Error %d\n", __func__, err);
  322. return PNFS_NOT_ATTEMPTED;
  323. }
  324. return PNFS_ATTEMPTED;
  325. }
  326. void
  327. objlayout_encode_layoutcommit(struct pnfs_layout_hdr *pnfslay,
  328. struct xdr_stream *xdr,
  329. const struct nfs4_layoutcommit_args *args)
  330. {
  331. struct objlayout *objlay = OBJLAYOUT(pnfslay);
  332. struct pnfs_osd_layoutupdate lou;
  333. __be32 *start;
  334. dprintk("%s: Begin\n", __func__);
  335. spin_lock(&objlay->lock);
  336. lou.dsu_valid = (objlay->delta_space_valid == OBJ_DSU_VALID);
  337. lou.dsu_delta = objlay->delta_space_used;
  338. objlay->delta_space_used = 0;
  339. objlay->delta_space_valid = OBJ_DSU_INIT;
  340. lou.olu_ioerr_flag = !list_empty(&objlay->err_list);
  341. spin_unlock(&objlay->lock);
  342. start = xdr_reserve_space(xdr, 4);
  343. BUG_ON(pnfs_osd_xdr_encode_layoutupdate(xdr, &lou));
  344. *start = cpu_to_be32((xdr->p - start - 1) * 4);
  345. dprintk("%s: Return delta_space_used %lld err %d\n", __func__,
  346. lou.dsu_delta, lou.olu_ioerr_flag);
  347. }
  348. static int
  349. err_prio(u32 oer_errno)
  350. {
  351. switch (oer_errno) {
  352. case 0:
  353. return 0;
  354. case PNFS_OSD_ERR_RESOURCE:
  355. return OSD_ERR_PRI_RESOURCE;
  356. case PNFS_OSD_ERR_BAD_CRED:
  357. return OSD_ERR_PRI_BAD_CRED;
  358. case PNFS_OSD_ERR_NO_ACCESS:
  359. return OSD_ERR_PRI_NO_ACCESS;
  360. case PNFS_OSD_ERR_UNREACHABLE:
  361. return OSD_ERR_PRI_UNREACHABLE;
  362. case PNFS_OSD_ERR_NOT_FOUND:
  363. return OSD_ERR_PRI_NOT_FOUND;
  364. case PNFS_OSD_ERR_NO_SPACE:
  365. return OSD_ERR_PRI_NO_SPACE;
  366. default:
  367. WARN_ON(1);
  368. /* fallthrough */
  369. case PNFS_OSD_ERR_EIO:
  370. return OSD_ERR_PRI_EIO;
  371. }
  372. }
  373. static void
  374. merge_ioerr(struct pnfs_osd_ioerr *dest_err,
  375. const struct pnfs_osd_ioerr *src_err)
  376. {
  377. u64 dest_end, src_end;
  378. if (!dest_err->oer_errno) {
  379. *dest_err = *src_err;
  380. /* accumulated device must be blank */
  381. memset(&dest_err->oer_component.oid_device_id, 0,
  382. sizeof(dest_err->oer_component.oid_device_id));
  383. return;
  384. }
  385. if (dest_err->oer_component.oid_partition_id !=
  386. src_err->oer_component.oid_partition_id)
  387. dest_err->oer_component.oid_partition_id = 0;
  388. if (dest_err->oer_component.oid_object_id !=
  389. src_err->oer_component.oid_object_id)
  390. dest_err->oer_component.oid_object_id = 0;
  391. if (dest_err->oer_comp_offset > src_err->oer_comp_offset)
  392. dest_err->oer_comp_offset = src_err->oer_comp_offset;
  393. dest_end = end_offset(dest_err->oer_comp_offset,
  394. dest_err->oer_comp_length);
  395. src_end = end_offset(src_err->oer_comp_offset,
  396. src_err->oer_comp_length);
  397. if (dest_end < src_end)
  398. dest_end = src_end;
  399. dest_err->oer_comp_length = dest_end - dest_err->oer_comp_offset;
  400. if ((src_err->oer_iswrite == dest_err->oer_iswrite) &&
  401. (err_prio(src_err->oer_errno) > err_prio(dest_err->oer_errno))) {
  402. dest_err->oer_errno = src_err->oer_errno;
  403. } else if (src_err->oer_iswrite) {
  404. dest_err->oer_iswrite = true;
  405. dest_err->oer_errno = src_err->oer_errno;
  406. }
  407. }
  408. static void
  409. encode_accumulated_error(struct objlayout *objlay, __be32 *p)
  410. {
  411. struct objlayout_io_res *oir, *tmp;
  412. struct pnfs_osd_ioerr accumulated_err = {.oer_errno = 0};
  413. list_for_each_entry_safe(oir, tmp, &objlay->err_list, err_list) {
  414. unsigned i;
  415. for (i = 0; i < oir->num_comps; i++) {
  416. struct pnfs_osd_ioerr *ioerr = &oir->ioerrs[i];
  417. if (!ioerr->oer_errno)
  418. continue;
  419. printk(KERN_ERR "NFS: %s: err[%d]: errno=%d "
  420. "is_write=%d dev(%llx:%llx) par=0x%llx "
  421. "obj=0x%llx offset=0x%llx length=0x%llx\n",
  422. __func__, i, ioerr->oer_errno,
  423. ioerr->oer_iswrite,
  424. _DEVID_LO(&ioerr->oer_component.oid_device_id),
  425. _DEVID_HI(&ioerr->oer_component.oid_device_id),
  426. ioerr->oer_component.oid_partition_id,
  427. ioerr->oer_component.oid_object_id,
  428. ioerr->oer_comp_offset,
  429. ioerr->oer_comp_length);
  430. merge_ioerr(&accumulated_err, ioerr);
  431. }
  432. list_del(&oir->err_list);
  433. objio_free_result(oir);
  434. }
  435. pnfs_osd_xdr_encode_ioerr(p, &accumulated_err);
  436. }
  437. void
  438. objlayout_encode_layoutreturn(struct pnfs_layout_hdr *pnfslay,
  439. struct xdr_stream *xdr,
  440. const struct nfs4_layoutreturn_args *args)
  441. {
  442. struct objlayout *objlay = OBJLAYOUT(pnfslay);
  443. struct objlayout_io_res *oir, *tmp;
  444. __be32 *start;
  445. dprintk("%s: Begin\n", __func__);
  446. start = xdr_reserve_space(xdr, 4);
  447. BUG_ON(!start);
  448. spin_lock(&objlay->lock);
  449. list_for_each_entry_safe(oir, tmp, &objlay->err_list, err_list) {
  450. __be32 *last_xdr = NULL, *p;
  451. unsigned i;
  452. int res = 0;
  453. for (i = 0; i < oir->num_comps; i++) {
  454. struct pnfs_osd_ioerr *ioerr = &oir->ioerrs[i];
  455. if (!ioerr->oer_errno)
  456. continue;
  457. dprintk("%s: err[%d]: errno=%d is_write=%d "
  458. "dev(%llx:%llx) par=0x%llx obj=0x%llx "
  459. "offset=0x%llx length=0x%llx\n",
  460. __func__, i, ioerr->oer_errno,
  461. ioerr->oer_iswrite,
  462. _DEVID_LO(&ioerr->oer_component.oid_device_id),
  463. _DEVID_HI(&ioerr->oer_component.oid_device_id),
  464. ioerr->oer_component.oid_partition_id,
  465. ioerr->oer_component.oid_object_id,
  466. ioerr->oer_comp_offset,
  467. ioerr->oer_comp_length);
  468. p = pnfs_osd_xdr_ioerr_reserve_space(xdr);
  469. if (unlikely(!p)) {
  470. res = -E2BIG;
  471. break; /* accumulated_error */
  472. }
  473. last_xdr = p;
  474. pnfs_osd_xdr_encode_ioerr(p, &oir->ioerrs[i]);
  475. }
  476. /* TODO: use xdr_write_pages */
  477. if (unlikely(res)) {
  478. /* no space for even one error descriptor */
  479. BUG_ON(!last_xdr);
  480. /* we've encountered a situation with lots and lots of
  481. * errors and no space to encode them all. Use the last
  482. * available slot to report the union of all the
  483. * remaining errors.
  484. */
  485. encode_accumulated_error(objlay, last_xdr);
  486. goto loop_done;
  487. }
  488. list_del(&oir->err_list);
  489. objio_free_result(oir);
  490. }
  491. loop_done:
  492. spin_unlock(&objlay->lock);
  493. *start = cpu_to_be32((xdr->p - start - 1) * 4);
  494. dprintk("%s: Return\n", __func__);
  495. }
  496. /*
  497. * Get Device Info API for io engines
  498. */
  499. struct objlayout_deviceinfo {
  500. struct page *page;
  501. struct pnfs_osd_deviceaddr da; /* This must be last */
  502. };
  503. /* Initialize and call nfs_getdeviceinfo, then decode and return a
  504. * "struct pnfs_osd_deviceaddr *" Eventually objlayout_put_deviceinfo()
  505. * should be called.
  506. */
  507. int objlayout_get_deviceinfo(struct pnfs_layout_hdr *pnfslay,
  508. struct nfs4_deviceid *d_id, struct pnfs_osd_deviceaddr **deviceaddr,
  509. gfp_t gfp_flags)
  510. {
  511. struct objlayout_deviceinfo *odi;
  512. struct pnfs_device pd;
  513. struct super_block *sb;
  514. struct page *page, **pages;
  515. u32 *p;
  516. int err;
  517. page = alloc_page(gfp_flags);
  518. if (!page)
  519. return -ENOMEM;
  520. pages = &page;
  521. pd.pages = pages;
  522. memcpy(&pd.dev_id, d_id, sizeof(*d_id));
  523. pd.layout_type = LAYOUT_OSD2_OBJECTS;
  524. pd.pages = &page;
  525. pd.pgbase = 0;
  526. pd.pglen = PAGE_SIZE;
  527. pd.mincount = 0;
  528. sb = pnfslay->plh_inode->i_sb;
  529. err = nfs4_proc_getdeviceinfo(NFS_SERVER(pnfslay->plh_inode), &pd);
  530. dprintk("%s nfs_getdeviceinfo returned %d\n", __func__, err);
  531. if (err)
  532. goto err_out;
  533. p = page_address(page);
  534. odi = kzalloc(sizeof(*odi), gfp_flags);
  535. if (!odi) {
  536. err = -ENOMEM;
  537. goto err_out;
  538. }
  539. pnfs_osd_xdr_decode_deviceaddr(&odi->da, p);
  540. odi->page = page;
  541. *deviceaddr = &odi->da;
  542. return 0;
  543. err_out:
  544. __free_page(page);
  545. return err;
  546. }
  547. void objlayout_put_deviceinfo(struct pnfs_osd_deviceaddr *deviceaddr)
  548. {
  549. struct objlayout_deviceinfo *odi = container_of(deviceaddr,
  550. struct objlayout_deviceinfo,
  551. da);
  552. __free_page(odi->page);
  553. kfree(odi);
  554. }