objlayout.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682
  1. /*
  2. * pNFS Objects layout driver high level definitions
  3. *
  4. * Copyright (C) 2007 Panasas Inc. [year of first publication]
  5. * All rights reserved.
  6. *
  7. * Benny Halevy <bhalevy@panasas.com>
  8. * Boaz Harrosh <bharrosh@panasas.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License version 2
  12. * See the file COPYING included with this distribution for more details.
  13. *
  14. * Redistribution and use in source and binary forms, with or without
  15. * modification, are permitted provided that the following conditions
  16. * are met:
  17. *
  18. * 1. Redistributions of source code must retain the above copyright
  19. * notice, this list of conditions and the following disclaimer.
  20. * 2. Redistributions in binary form must reproduce the above copyright
  21. * notice, this list of conditions and the following disclaimer in the
  22. * documentation and/or other materials provided with the distribution.
  23. * 3. Neither the name of the Panasas company nor the names of its
  24. * contributors may be used to endorse or promote products derived
  25. * from this software without specific prior written permission.
  26. *
  27. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
  28. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  29. * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  30. * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  31. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  32. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  33. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  34. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  35. * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  36. * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  37. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  38. */
  39. #include <scsi/osd_initiator.h>
  40. #include "objlayout.h"
  41. #define NFSDBG_FACILITY NFSDBG_PNFS_LD
  42. /*
  43. * Create a objlayout layout structure for the given inode and return it.
  44. */
  45. struct pnfs_layout_hdr *
  46. objlayout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
  47. {
  48. struct objlayout *objlay;
  49. objlay = kzalloc(sizeof(struct objlayout), gfp_flags);
  50. if (objlay) {
  51. spin_lock_init(&objlay->lock);
  52. INIT_LIST_HEAD(&objlay->err_list);
  53. }
  54. dprintk("%s: Return %p\n", __func__, objlay);
  55. return &objlay->pnfs_layout;
  56. }
  57. /*
  58. * Free an objlayout layout structure
  59. */
  60. void
  61. objlayout_free_layout_hdr(struct pnfs_layout_hdr *lo)
  62. {
  63. struct objlayout *objlay = OBJLAYOUT(lo);
  64. dprintk("%s: objlay %p\n", __func__, objlay);
  65. WARN_ON(!list_empty(&objlay->err_list));
  66. kfree(objlay);
  67. }
  68. /*
  69. * Unmarshall layout and store it in pnfslay.
  70. */
  71. struct pnfs_layout_segment *
  72. objlayout_alloc_lseg(struct pnfs_layout_hdr *pnfslay,
  73. struct nfs4_layoutget_res *lgr,
  74. gfp_t gfp_flags)
  75. {
  76. int status = -ENOMEM;
  77. struct xdr_stream stream;
  78. struct xdr_buf buf = {
  79. .pages = lgr->layoutp->pages,
  80. .page_len = lgr->layoutp->len,
  81. .buflen = lgr->layoutp->len,
  82. .len = lgr->layoutp->len,
  83. };
  84. struct page *scratch;
  85. struct pnfs_layout_segment *lseg;
  86. dprintk("%s: Begin pnfslay %p\n", __func__, pnfslay);
  87. scratch = alloc_page(gfp_flags);
  88. if (!scratch)
  89. goto err_nofree;
  90. xdr_init_decode(&stream, &buf, NULL);
  91. xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
  92. status = objio_alloc_lseg(&lseg, pnfslay, &lgr->range, &stream, gfp_flags);
  93. if (unlikely(status)) {
  94. dprintk("%s: objio_alloc_lseg Return err %d\n", __func__,
  95. status);
  96. goto err;
  97. }
  98. __free_page(scratch);
  99. dprintk("%s: Return %p\n", __func__, lseg);
  100. return lseg;
  101. err:
  102. __free_page(scratch);
  103. err_nofree:
  104. dprintk("%s: Err Return=>%d\n", __func__, status);
  105. return ERR_PTR(status);
  106. }
  107. /*
  108. * Free a layout segement
  109. */
  110. void
  111. objlayout_free_lseg(struct pnfs_layout_segment *lseg)
  112. {
  113. dprintk("%s: freeing layout segment %p\n", __func__, lseg);
  114. if (unlikely(!lseg))
  115. return;
  116. objio_free_lseg(lseg);
  117. }
  118. /*
  119. * I/O Operations
  120. */
  121. static inline u64
  122. end_offset(u64 start, u64 len)
  123. {
  124. u64 end;
  125. end = start + len;
  126. return end >= start ? end : NFS4_MAX_UINT64;
  127. }
  128. /* last octet in a range */
  129. static inline u64
  130. last_byte_offset(u64 start, u64 len)
  131. {
  132. u64 end;
  133. BUG_ON(!len);
  134. end = start + len;
  135. return end > start ? end - 1 : NFS4_MAX_UINT64;
  136. }
  137. static struct objlayout_io_state *
  138. objlayout_alloc_io_state(struct pnfs_layout_hdr *pnfs_layout_type,
  139. struct page **pages,
  140. unsigned pgbase,
  141. loff_t offset,
  142. size_t count,
  143. struct pnfs_layout_segment *lseg,
  144. void *rpcdata,
  145. gfp_t gfp_flags)
  146. {
  147. struct objlayout_io_state *state;
  148. u64 lseg_end_offset;
  149. dprintk("%s: allocating io_state\n", __func__);
  150. if (objio_alloc_io_state(lseg, &state, gfp_flags))
  151. return NULL;
  152. BUG_ON(offset < lseg->pls_range.offset);
  153. lseg_end_offset = end_offset(lseg->pls_range.offset,
  154. lseg->pls_range.length);
  155. BUG_ON(offset >= lseg_end_offset);
  156. if (offset + count > lseg_end_offset) {
  157. count = lseg->pls_range.length -
  158. (offset - lseg->pls_range.offset);
  159. dprintk("%s: truncated count %Zd\n", __func__, count);
  160. }
  161. if (pgbase > PAGE_SIZE) {
  162. pages += pgbase >> PAGE_SHIFT;
  163. pgbase &= ~PAGE_MASK;
  164. }
  165. INIT_LIST_HEAD(&state->err_list);
  166. state->lseg = lseg;
  167. state->rpcdata = rpcdata;
  168. state->pages = pages;
  169. state->pgbase = pgbase;
  170. state->nr_pages = (pgbase + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
  171. state->offset = offset;
  172. state->count = count;
  173. state->sync = 0;
  174. return state;
  175. }
  176. static void
  177. objlayout_free_io_state(struct objlayout_io_state *state)
  178. {
  179. dprintk("%s: freeing io_state\n", __func__);
  180. if (unlikely(!state))
  181. return;
  182. objio_free_io_state(state);
  183. }
  184. /*
  185. * I/O done common code
  186. */
  187. static void
  188. objlayout_iodone(struct objlayout_io_state *state)
  189. {
  190. dprintk("%s: state %p status\n", __func__, state);
  191. if (likely(state->status >= 0)) {
  192. objlayout_free_io_state(state);
  193. } else {
  194. struct objlayout *objlay = OBJLAYOUT(state->lseg->pls_layout);
  195. spin_lock(&objlay->lock);
  196. list_add(&objlay->err_list, &state->err_list);
  197. spin_unlock(&objlay->lock);
  198. }
  199. }
  200. /*
  201. * objlayout_io_set_result - Set an osd_error code on a specific osd comp.
  202. *
  203. * The @index component IO failed (error returned from target). Register
  204. * the error for later reporting at layout-return.
  205. */
  206. void
  207. objlayout_io_set_result(struct objlayout_io_state *state, unsigned index,
  208. struct pnfs_osd_objid *pooid, int osd_error,
  209. u64 offset, u64 length, bool is_write)
  210. {
  211. struct pnfs_osd_ioerr *ioerr = &state->ioerrs[index];
  212. BUG_ON(index >= state->num_comps);
  213. if (osd_error) {
  214. ioerr->oer_component = *pooid;
  215. ioerr->oer_comp_offset = offset;
  216. ioerr->oer_comp_length = length;
  217. ioerr->oer_iswrite = is_write;
  218. ioerr->oer_errno = osd_error;
  219. dprintk("%s: err[%d]: errno=%d is_write=%d dev(%llx:%llx) "
  220. "par=0x%llx obj=0x%llx offset=0x%llx length=0x%llx\n",
  221. __func__, index, ioerr->oer_errno,
  222. ioerr->oer_iswrite,
  223. _DEVID_LO(&ioerr->oer_component.oid_device_id),
  224. _DEVID_HI(&ioerr->oer_component.oid_device_id),
  225. ioerr->oer_component.oid_partition_id,
  226. ioerr->oer_component.oid_object_id,
  227. ioerr->oer_comp_offset,
  228. ioerr->oer_comp_length);
  229. } else {
  230. /* User need not call if no error is reported */
  231. ioerr->oer_errno = 0;
  232. }
  233. }
  234. /* Function scheduled on rpc workqueue to call ->nfs_readlist_complete().
  235. * This is because the osd completion is called with ints-off from
  236. * the block layer
  237. */
  238. static void _rpc_read_complete(struct work_struct *work)
  239. {
  240. struct rpc_task *task;
  241. struct nfs_read_data *rdata;
  242. dprintk("%s enter\n", __func__);
  243. task = container_of(work, struct rpc_task, u.tk_work);
  244. rdata = container_of(task, struct nfs_read_data, task);
  245. pnfs_ld_read_done(rdata);
  246. }
  247. void
  248. objlayout_read_done(struct objlayout_io_state *state, ssize_t status, bool sync)
  249. {
  250. int eof = state->eof;
  251. struct nfs_read_data *rdata;
  252. state->status = status;
  253. dprintk("%s: Begin status=%ld eof=%d\n", __func__, status, eof);
  254. rdata = state->rpcdata;
  255. rdata->task.tk_status = status;
  256. if (status >= 0) {
  257. rdata->res.count = status;
  258. rdata->res.eof = eof;
  259. }
  260. objlayout_iodone(state);
  261. /* must not use state after this point */
  262. if (sync)
  263. pnfs_ld_read_done(rdata);
  264. else {
  265. INIT_WORK(&rdata->task.u.tk_work, _rpc_read_complete);
  266. schedule_work(&rdata->task.u.tk_work);
  267. }
  268. }
  269. /*
  270. * Perform sync or async reads.
  271. */
  272. enum pnfs_try_status
  273. objlayout_read_pagelist(struct nfs_read_data *rdata)
  274. {
  275. loff_t offset = rdata->args.offset;
  276. size_t count = rdata->args.count;
  277. struct objlayout_io_state *state;
  278. ssize_t status = 0;
  279. loff_t eof;
  280. dprintk("%s: Begin inode %p offset %llu count %d\n",
  281. __func__, rdata->inode, offset, (int)count);
  282. eof = i_size_read(rdata->inode);
  283. if (unlikely(offset + count > eof)) {
  284. if (offset >= eof) {
  285. status = 0;
  286. rdata->res.count = 0;
  287. rdata->res.eof = 1;
  288. goto out;
  289. }
  290. count = eof - offset;
  291. }
  292. state = objlayout_alloc_io_state(NFS_I(rdata->inode)->layout,
  293. rdata->args.pages, rdata->args.pgbase,
  294. offset, count,
  295. rdata->lseg, rdata,
  296. GFP_KERNEL);
  297. if (unlikely(!state)) {
  298. status = -ENOMEM;
  299. goto out;
  300. }
  301. state->eof = state->offset + state->count >= eof;
  302. status = objio_read_pagelist(state);
  303. out:
  304. dprintk("%s: Return status %Zd\n", __func__, status);
  305. rdata->pnfs_error = status;
  306. return PNFS_ATTEMPTED;
  307. }
  308. /* Function scheduled on rpc workqueue to call ->nfs_writelist_complete().
  309. * This is because the osd completion is called with ints-off from
  310. * the block layer
  311. */
  312. static void _rpc_write_complete(struct work_struct *work)
  313. {
  314. struct rpc_task *task;
  315. struct nfs_write_data *wdata;
  316. dprintk("%s enter\n", __func__);
  317. task = container_of(work, struct rpc_task, u.tk_work);
  318. wdata = container_of(task, struct nfs_write_data, task);
  319. pnfs_ld_write_done(wdata);
  320. }
  321. void
  322. objlayout_write_done(struct objlayout_io_state *state, ssize_t status,
  323. bool sync)
  324. {
  325. struct nfs_write_data *wdata;
  326. dprintk("%s: Begin\n", __func__);
  327. wdata = state->rpcdata;
  328. state->status = status;
  329. wdata->task.tk_status = status;
  330. if (status >= 0) {
  331. wdata->res.count = status;
  332. wdata->verf.committed = state->committed;
  333. dprintk("%s: Return status %d committed %d\n",
  334. __func__, wdata->task.tk_status,
  335. wdata->verf.committed);
  336. } else
  337. dprintk("%s: Return status %d\n",
  338. __func__, wdata->task.tk_status);
  339. objlayout_iodone(state);
  340. /* must not use state after this point */
  341. if (sync)
  342. pnfs_ld_write_done(wdata);
  343. else {
  344. INIT_WORK(&wdata->task.u.tk_work, _rpc_write_complete);
  345. schedule_work(&wdata->task.u.tk_work);
  346. }
  347. }
  348. /*
  349. * Perform sync or async writes.
  350. */
  351. enum pnfs_try_status
  352. objlayout_write_pagelist(struct nfs_write_data *wdata,
  353. int how)
  354. {
  355. struct objlayout_io_state *state;
  356. ssize_t status;
  357. dprintk("%s: Begin inode %p offset %llu count %u\n",
  358. __func__, wdata->inode, wdata->args.offset, wdata->args.count);
  359. state = objlayout_alloc_io_state(NFS_I(wdata->inode)->layout,
  360. wdata->args.pages,
  361. wdata->args.pgbase,
  362. wdata->args.offset,
  363. wdata->args.count,
  364. wdata->lseg, wdata,
  365. GFP_NOFS);
  366. if (unlikely(!state)) {
  367. status = -ENOMEM;
  368. goto out;
  369. }
  370. state->sync = how & FLUSH_SYNC;
  371. status = objio_write_pagelist(state, how & FLUSH_STABLE);
  372. out:
  373. dprintk("%s: Return status %Zd\n", __func__, status);
  374. wdata->pnfs_error = status;
  375. return PNFS_ATTEMPTED;
  376. }
  377. static int
  378. err_prio(u32 oer_errno)
  379. {
  380. switch (oer_errno) {
  381. case 0:
  382. return 0;
  383. case PNFS_OSD_ERR_RESOURCE:
  384. return OSD_ERR_PRI_RESOURCE;
  385. case PNFS_OSD_ERR_BAD_CRED:
  386. return OSD_ERR_PRI_BAD_CRED;
  387. case PNFS_OSD_ERR_NO_ACCESS:
  388. return OSD_ERR_PRI_NO_ACCESS;
  389. case PNFS_OSD_ERR_UNREACHABLE:
  390. return OSD_ERR_PRI_UNREACHABLE;
  391. case PNFS_OSD_ERR_NOT_FOUND:
  392. return OSD_ERR_PRI_NOT_FOUND;
  393. case PNFS_OSD_ERR_NO_SPACE:
  394. return OSD_ERR_PRI_NO_SPACE;
  395. default:
  396. WARN_ON(1);
  397. /* fallthrough */
  398. case PNFS_OSD_ERR_EIO:
  399. return OSD_ERR_PRI_EIO;
  400. }
  401. }
  402. static void
  403. merge_ioerr(struct pnfs_osd_ioerr *dest_err,
  404. const struct pnfs_osd_ioerr *src_err)
  405. {
  406. u64 dest_end, src_end;
  407. if (!dest_err->oer_errno) {
  408. *dest_err = *src_err;
  409. /* accumulated device must be blank */
  410. memset(&dest_err->oer_component.oid_device_id, 0,
  411. sizeof(dest_err->oer_component.oid_device_id));
  412. return;
  413. }
  414. if (dest_err->oer_component.oid_partition_id !=
  415. src_err->oer_component.oid_partition_id)
  416. dest_err->oer_component.oid_partition_id = 0;
  417. if (dest_err->oer_component.oid_object_id !=
  418. src_err->oer_component.oid_object_id)
  419. dest_err->oer_component.oid_object_id = 0;
  420. if (dest_err->oer_comp_offset > src_err->oer_comp_offset)
  421. dest_err->oer_comp_offset = src_err->oer_comp_offset;
  422. dest_end = end_offset(dest_err->oer_comp_offset,
  423. dest_err->oer_comp_length);
  424. src_end = end_offset(src_err->oer_comp_offset,
  425. src_err->oer_comp_length);
  426. if (dest_end < src_end)
  427. dest_end = src_end;
  428. dest_err->oer_comp_length = dest_end - dest_err->oer_comp_offset;
  429. if ((src_err->oer_iswrite == dest_err->oer_iswrite) &&
  430. (err_prio(src_err->oer_errno) > err_prio(dest_err->oer_errno))) {
  431. dest_err->oer_errno = src_err->oer_errno;
  432. } else if (src_err->oer_iswrite) {
  433. dest_err->oer_iswrite = true;
  434. dest_err->oer_errno = src_err->oer_errno;
  435. }
  436. }
  437. static void
  438. encode_accumulated_error(struct objlayout *objlay, __be32 *p)
  439. {
  440. struct objlayout_io_state *state, *tmp;
  441. struct pnfs_osd_ioerr accumulated_err = {.oer_errno = 0};
  442. list_for_each_entry_safe(state, tmp, &objlay->err_list, err_list) {
  443. unsigned i;
  444. for (i = 0; i < state->num_comps; i++) {
  445. struct pnfs_osd_ioerr *ioerr = &state->ioerrs[i];
  446. if (!ioerr->oer_errno)
  447. continue;
  448. printk(KERN_ERR "%s: err[%d]: errno=%d is_write=%d "
  449. "dev(%llx:%llx) par=0x%llx obj=0x%llx "
  450. "offset=0x%llx length=0x%llx\n",
  451. __func__, i, ioerr->oer_errno,
  452. ioerr->oer_iswrite,
  453. _DEVID_LO(&ioerr->oer_component.oid_device_id),
  454. _DEVID_HI(&ioerr->oer_component.oid_device_id),
  455. ioerr->oer_component.oid_partition_id,
  456. ioerr->oer_component.oid_object_id,
  457. ioerr->oer_comp_offset,
  458. ioerr->oer_comp_length);
  459. merge_ioerr(&accumulated_err, ioerr);
  460. }
  461. list_del(&state->err_list);
  462. objlayout_free_io_state(state);
  463. }
  464. pnfs_osd_xdr_encode_ioerr(p, &accumulated_err);
  465. }
  466. void
  467. objlayout_encode_layoutreturn(struct pnfs_layout_hdr *pnfslay,
  468. struct xdr_stream *xdr,
  469. const struct nfs4_layoutreturn_args *args)
  470. {
  471. struct objlayout *objlay = OBJLAYOUT(pnfslay);
  472. struct objlayout_io_state *state, *tmp;
  473. __be32 *start;
  474. dprintk("%s: Begin\n", __func__);
  475. start = xdr_reserve_space(xdr, 4);
  476. BUG_ON(!start);
  477. spin_lock(&objlay->lock);
  478. list_for_each_entry_safe(state, tmp, &objlay->err_list, err_list) {
  479. __be32 *last_xdr = NULL, *p;
  480. unsigned i;
  481. int res = 0;
  482. for (i = 0; i < state->num_comps; i++) {
  483. struct pnfs_osd_ioerr *ioerr = &state->ioerrs[i];
  484. if (!ioerr->oer_errno)
  485. continue;
  486. dprintk("%s: err[%d]: errno=%d is_write=%d "
  487. "dev(%llx:%llx) par=0x%llx obj=0x%llx "
  488. "offset=0x%llx length=0x%llx\n",
  489. __func__, i, ioerr->oer_errno,
  490. ioerr->oer_iswrite,
  491. _DEVID_LO(&ioerr->oer_component.oid_device_id),
  492. _DEVID_HI(&ioerr->oer_component.oid_device_id),
  493. ioerr->oer_component.oid_partition_id,
  494. ioerr->oer_component.oid_object_id,
  495. ioerr->oer_comp_offset,
  496. ioerr->oer_comp_length);
  497. p = pnfs_osd_xdr_ioerr_reserve_space(xdr);
  498. if (unlikely(!p)) {
  499. res = -E2BIG;
  500. break; /* accumulated_error */
  501. }
  502. last_xdr = p;
  503. pnfs_osd_xdr_encode_ioerr(p, &state->ioerrs[i]);
  504. }
  505. /* TODO: use xdr_write_pages */
  506. if (unlikely(res)) {
  507. /* no space for even one error descriptor */
  508. BUG_ON(!last_xdr);
  509. /* we've encountered a situation with lots and lots of
  510. * errors and no space to encode them all. Use the last
  511. * available slot to report the union of all the
  512. * remaining errors.
  513. */
  514. encode_accumulated_error(objlay, last_xdr);
  515. goto loop_done;
  516. }
  517. list_del(&state->err_list);
  518. objlayout_free_io_state(state);
  519. }
  520. loop_done:
  521. spin_unlock(&objlay->lock);
  522. *start = cpu_to_be32((xdr->p - start - 1) * 4);
  523. dprintk("%s: Return\n", __func__);
  524. }
  525. /*
  526. * Get Device Info API for io engines
  527. */
  528. struct objlayout_deviceinfo {
  529. struct page *page;
  530. struct pnfs_osd_deviceaddr da; /* This must be last */
  531. };
  532. /* Initialize and call nfs_getdeviceinfo, then decode and return a
  533. * "struct pnfs_osd_deviceaddr *" Eventually objlayout_put_deviceinfo()
  534. * should be called.
  535. */
  536. int objlayout_get_deviceinfo(struct pnfs_layout_hdr *pnfslay,
  537. struct nfs4_deviceid *d_id, struct pnfs_osd_deviceaddr **deviceaddr,
  538. gfp_t gfp_flags)
  539. {
  540. struct objlayout_deviceinfo *odi;
  541. struct pnfs_device pd;
  542. struct super_block *sb;
  543. struct page *page, **pages;
  544. u32 *p;
  545. int err;
  546. page = alloc_page(gfp_flags);
  547. if (!page)
  548. return -ENOMEM;
  549. pages = &page;
  550. pd.pages = pages;
  551. memcpy(&pd.dev_id, d_id, sizeof(*d_id));
  552. pd.layout_type = LAYOUT_OSD2_OBJECTS;
  553. pd.pages = &page;
  554. pd.pgbase = 0;
  555. pd.pglen = PAGE_SIZE;
  556. pd.mincount = 0;
  557. sb = pnfslay->plh_inode->i_sb;
  558. err = nfs4_proc_getdeviceinfo(NFS_SERVER(pnfslay->plh_inode), &pd);
  559. dprintk("%s nfs_getdeviceinfo returned %d\n", __func__, err);
  560. if (err)
  561. goto err_out;
  562. p = page_address(page);
  563. odi = kzalloc(sizeof(*odi), gfp_flags);
  564. if (!odi) {
  565. err = -ENOMEM;
  566. goto err_out;
  567. }
  568. pnfs_osd_xdr_decode_deviceaddr(&odi->da, p);
  569. odi->page = page;
  570. *deviceaddr = &odi->da;
  571. return 0;
  572. err_out:
  573. __free_page(page);
  574. return err;
  575. }
  576. void objlayout_put_deviceinfo(struct pnfs_osd_deviceaddr *deviceaddr)
  577. {
  578. struct objlayout_deviceinfo *odi = container_of(deviceaddr,
  579. struct objlayout_deviceinfo,
  580. da);
  581. __free_page(odi->page);
  582. kfree(odi);
  583. }