nfs4filelayoutdev.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653
  1. /*
  2. * Device operations for the pnfs nfs4 file layout driver.
  3. *
  4. * Copyright (c) 2002
  5. * The Regents of the University of Michigan
  6. * All Rights Reserved
  7. *
  8. * Dean Hildebrand <dhildebz@umich.edu>
  9. * Garth Goodson <Garth.Goodson@netapp.com>
  10. *
  11. * Permission is granted to use, copy, create derivative works, and
  12. * redistribute this software and such derivative works for any purpose,
  13. * so long as the name of the University of Michigan is not used in
  14. * any advertising or publicity pertaining to the use or distribution
  15. * of this software without specific, written prior authorization. If
  16. * the above copyright notice or any other identification of the
  17. * University of Michigan is included in any copy of any portion of
  18. * this software, then the disclaimer below must also be included.
  19. *
  20. * This software is provided as is, without representation or warranty
  21. * of any kind either express or implied, including without limitation
  22. * the implied warranties of merchantability, fitness for a particular
  23. * purpose, or noninfringement. The Regents of the University of
  24. * Michigan shall not be liable for any damages, including special,
  25. * indirect, incidental, or consequential damages, with respect to any
  26. * claim arising out of or in connection with the use of the software,
  27. * even if it has been or is hereafter advised of the possibility of
  28. * such damages.
  29. */
  30. #include <linux/nfs_fs.h>
  31. #include <linux/vmalloc.h>
  32. #include "internal.h"
  33. #include "nfs4filelayout.h"
  34. #define NFSDBG_FACILITY NFSDBG_PNFS_LD
  35. /*
  36. * Device ID RCU cache. A device ID is unique per client ID and layout type.
  37. */
  38. #define NFS4_FL_DEVICE_ID_HASH_BITS 5
  39. #define NFS4_FL_DEVICE_ID_HASH_SIZE (1 << NFS4_FL_DEVICE_ID_HASH_BITS)
  40. #define NFS4_FL_DEVICE_ID_HASH_MASK (NFS4_FL_DEVICE_ID_HASH_SIZE - 1)
  41. static inline u32
  42. nfs4_fl_deviceid_hash(struct nfs4_deviceid *id)
  43. {
  44. unsigned char *cptr = (unsigned char *)id->data;
  45. unsigned int nbytes = NFS4_DEVICEID4_SIZE;
  46. u32 x = 0;
  47. while (nbytes--) {
  48. x *= 37;
  49. x += *cptr++;
  50. }
  51. return x & NFS4_FL_DEVICE_ID_HASH_MASK;
  52. }
  53. static struct hlist_head filelayout_deviceid_cache[NFS4_FL_DEVICE_ID_HASH_SIZE];
  54. static DEFINE_SPINLOCK(filelayout_deviceid_lock);
  55. /*
  56. * Data server cache
  57. *
  58. * Data servers can be mapped to different device ids.
  59. * nfs4_pnfs_ds reference counting
  60. * - set to 1 on allocation
  61. * - incremented when a device id maps a data server already in the cache.
  62. * - decremented when deviceid is removed from the cache.
  63. */
  64. DEFINE_SPINLOCK(nfs4_ds_cache_lock);
  65. static LIST_HEAD(nfs4_data_server_cache);
  66. /* Debug routines */
  67. void
  68. print_ds(struct nfs4_pnfs_ds *ds)
  69. {
  70. if (ds == NULL) {
  71. printk("%s NULL device\n", __func__);
  72. return;
  73. }
  74. printk(" ip_addr %x port %hu\n"
  75. " ref count %d\n"
  76. " client %p\n"
  77. " cl_exchange_flags %x\n",
  78. ntohl(ds->ds_ip_addr), ntohs(ds->ds_port),
  79. atomic_read(&ds->ds_count), ds->ds_clp,
  80. ds->ds_clp ? ds->ds_clp->cl_exchange_flags : 0);
  81. }
  82. void
  83. print_ds_list(struct nfs4_file_layout_dsaddr *dsaddr)
  84. {
  85. int i;
  86. ifdebug(FACILITY) {
  87. printk("%s dsaddr->ds_num %d\n", __func__,
  88. dsaddr->ds_num);
  89. for (i = 0; i < dsaddr->ds_num; i++)
  90. print_ds(dsaddr->ds_list[i]);
  91. }
  92. }
  93. void print_deviceid(struct nfs4_deviceid *id)
  94. {
  95. u32 *p = (u32 *)id;
  96. dprintk("%s: device id= [%x%x%x%x]\n", __func__,
  97. p[0], p[1], p[2], p[3]);
  98. }
  99. /* nfs4_ds_cache_lock is held */
  100. static struct nfs4_pnfs_ds *
  101. _data_server_lookup_locked(u32 ip_addr, u32 port)
  102. {
  103. struct nfs4_pnfs_ds *ds;
  104. dprintk("_data_server_lookup: ip_addr=%x port=%hu\n",
  105. ntohl(ip_addr), ntohs(port));
  106. list_for_each_entry(ds, &nfs4_data_server_cache, ds_node) {
  107. if (ds->ds_ip_addr == ip_addr &&
  108. ds->ds_port == port) {
  109. return ds;
  110. }
  111. }
  112. return NULL;
  113. }
  114. /*
  115. * Create an rpc connection to the nfs4_pnfs_ds data server
  116. * Currently only support IPv4
  117. */
  118. static int
  119. nfs4_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds)
  120. {
  121. struct nfs_client *clp;
  122. struct sockaddr_in sin;
  123. int status = 0;
  124. dprintk("--> %s ip:port %x:%hu au_flavor %d\n", __func__,
  125. ntohl(ds->ds_ip_addr), ntohs(ds->ds_port),
  126. mds_srv->nfs_client->cl_rpcclient->cl_auth->au_flavor);
  127. sin.sin_family = AF_INET;
  128. sin.sin_addr.s_addr = ds->ds_ip_addr;
  129. sin.sin_port = ds->ds_port;
  130. clp = nfs4_set_ds_client(mds_srv->nfs_client, (struct sockaddr *)&sin,
  131. sizeof(sin), IPPROTO_TCP);
  132. if (IS_ERR(clp)) {
  133. status = PTR_ERR(clp);
  134. goto out;
  135. }
  136. if ((clp->cl_exchange_flags & EXCHGID4_FLAG_MASK_PNFS) != 0) {
  137. if (!is_ds_client(clp)) {
  138. status = -ENODEV;
  139. goto out_put;
  140. }
  141. ds->ds_clp = clp;
  142. dprintk("%s [existing] ip=%x, port=%hu\n", __func__,
  143. ntohl(ds->ds_ip_addr), ntohs(ds->ds_port));
  144. goto out;
  145. }
  146. /*
  147. * Do not set NFS_CS_CHECK_LEASE_TIME instead set the DS lease to
  148. * be equal to the MDS lease. Renewal is scheduled in create_session.
  149. */
  150. spin_lock(&mds_srv->nfs_client->cl_lock);
  151. clp->cl_lease_time = mds_srv->nfs_client->cl_lease_time;
  152. spin_unlock(&mds_srv->nfs_client->cl_lock);
  153. clp->cl_last_renewal = jiffies;
  154. /* New nfs_client */
  155. status = nfs4_init_ds_session(clp);
  156. if (status)
  157. goto out_put;
  158. ds->ds_clp = clp;
  159. dprintk("%s [new] ip=%x, port=%hu\n", __func__, ntohl(ds->ds_ip_addr),
  160. ntohs(ds->ds_port));
  161. out:
  162. return status;
  163. out_put:
  164. nfs_put_client(clp);
  165. goto out;
  166. }
  167. static void
  168. destroy_ds(struct nfs4_pnfs_ds *ds)
  169. {
  170. dprintk("--> %s\n", __func__);
  171. ifdebug(FACILITY)
  172. print_ds(ds);
  173. if (ds->ds_clp)
  174. nfs_put_client(ds->ds_clp);
  175. kfree(ds);
  176. }
  177. static void
  178. nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr)
  179. {
  180. struct nfs4_pnfs_ds *ds;
  181. int i;
  182. print_deviceid(&dsaddr->deviceid);
  183. for (i = 0; i < dsaddr->ds_num; i++) {
  184. ds = dsaddr->ds_list[i];
  185. if (ds != NULL) {
  186. if (atomic_dec_and_lock(&ds->ds_count,
  187. &nfs4_ds_cache_lock)) {
  188. list_del_init(&ds->ds_node);
  189. spin_unlock(&nfs4_ds_cache_lock);
  190. destroy_ds(ds);
  191. }
  192. }
  193. }
  194. kfree(dsaddr->stripe_indices);
  195. kfree(dsaddr);
  196. }
  197. static struct nfs4_pnfs_ds *
  198. nfs4_pnfs_ds_add(struct inode *inode, u32 ip_addr, u32 port)
  199. {
  200. struct nfs4_pnfs_ds *tmp_ds, *ds;
  201. ds = kzalloc(sizeof(*tmp_ds), GFP_KERNEL);
  202. if (!ds)
  203. goto out;
  204. spin_lock(&nfs4_ds_cache_lock);
  205. tmp_ds = _data_server_lookup_locked(ip_addr, port);
  206. if (tmp_ds == NULL) {
  207. ds->ds_ip_addr = ip_addr;
  208. ds->ds_port = port;
  209. atomic_set(&ds->ds_count, 1);
  210. INIT_LIST_HEAD(&ds->ds_node);
  211. ds->ds_clp = NULL;
  212. list_add(&ds->ds_node, &nfs4_data_server_cache);
  213. dprintk("%s add new data server ip 0x%x\n", __func__,
  214. ds->ds_ip_addr);
  215. } else {
  216. kfree(ds);
  217. atomic_inc(&tmp_ds->ds_count);
  218. dprintk("%s data server found ip 0x%x, inc'ed ds_count to %d\n",
  219. __func__, tmp_ds->ds_ip_addr,
  220. atomic_read(&tmp_ds->ds_count));
  221. ds = tmp_ds;
  222. }
  223. spin_unlock(&nfs4_ds_cache_lock);
  224. out:
  225. return ds;
  226. }
  227. /*
  228. * Currently only support ipv4, and one multi-path address.
  229. */
  230. static struct nfs4_pnfs_ds *
  231. decode_and_add_ds(__be32 **pp, struct inode *inode)
  232. {
  233. struct nfs4_pnfs_ds *ds = NULL;
  234. char *buf;
  235. const char *ipend, *pstr;
  236. u32 ip_addr, port;
  237. int nlen, rlen, i;
  238. int tmp[2];
  239. __be32 *r_netid, *r_addr, *p = *pp;
  240. /* r_netid */
  241. nlen = be32_to_cpup(p++);
  242. r_netid = p;
  243. p += XDR_QUADLEN(nlen);
  244. /* r_addr */
  245. rlen = be32_to_cpup(p++);
  246. r_addr = p;
  247. p += XDR_QUADLEN(rlen);
  248. *pp = p;
  249. /* Check that netid is "tcp" */
  250. if (nlen != 3 || memcmp((char *)r_netid, "tcp", 3)) {
  251. dprintk("%s: ERROR: non ipv4 TCP r_netid\n", __func__);
  252. goto out_err;
  253. }
  254. /* ipv6 length plus port is legal */
  255. if (rlen > INET6_ADDRSTRLEN + 8) {
  256. dprintk("%s: Invalid address, length %d\n", __func__,
  257. rlen);
  258. goto out_err;
  259. }
  260. buf = kmalloc(rlen + 1, GFP_KERNEL);
  261. if (!buf) {
  262. dprintk("%s: Not enough memory\n", __func__);
  263. goto out_err;
  264. }
  265. buf[rlen] = '\0';
  266. memcpy(buf, r_addr, rlen);
  267. /* replace the port dots with dashes for the in4_pton() delimiter*/
  268. for (i = 0; i < 2; i++) {
  269. char *res = strrchr(buf, '.');
  270. if (!res) {
  271. dprintk("%s: Failed finding expected dots in port\n",
  272. __func__);
  273. goto out_free;
  274. }
  275. *res = '-';
  276. }
  277. /* Currently only support ipv4 address */
  278. if (in4_pton(buf, rlen, (u8 *)&ip_addr, '-', &ipend) == 0) {
  279. dprintk("%s: Only ipv4 addresses supported\n", __func__);
  280. goto out_free;
  281. }
  282. /* port */
  283. pstr = ipend;
  284. sscanf(pstr, "-%d-%d", &tmp[0], &tmp[1]);
  285. port = htons((tmp[0] << 8) | (tmp[1]));
  286. ds = nfs4_pnfs_ds_add(inode, ip_addr, port);
  287. dprintk("%s: Decoded address and port %s\n", __func__, buf);
  288. out_free:
  289. kfree(buf);
  290. out_err:
  291. return ds;
  292. }
  293. /* Decode opaque device data and return the result */
  294. static struct nfs4_file_layout_dsaddr*
  295. decode_device(struct inode *ino, struct pnfs_device *pdev)
  296. {
  297. int i, dummy;
  298. u32 cnt, num;
  299. u8 *indexp;
  300. __be32 *p = (__be32 *)pdev->area, *indicesp;
  301. struct nfs4_file_layout_dsaddr *dsaddr;
  302. /* Get the stripe count (number of stripe index) */
  303. cnt = be32_to_cpup(p++);
  304. dprintk("%s stripe count %d\n", __func__, cnt);
  305. if (cnt > NFS4_PNFS_MAX_STRIPE_CNT) {
  306. printk(KERN_WARNING "%s: stripe count %d greater than "
  307. "supported maximum %d\n", __func__,
  308. cnt, NFS4_PNFS_MAX_STRIPE_CNT);
  309. goto out_err;
  310. }
  311. /* Check the multipath list count */
  312. indicesp = p;
  313. p += XDR_QUADLEN(cnt << 2);
  314. num = be32_to_cpup(p++);
  315. dprintk("%s ds_num %u\n", __func__, num);
  316. if (num > NFS4_PNFS_MAX_MULTI_CNT) {
  317. printk(KERN_WARNING "%s: multipath count %d greater than "
  318. "supported maximum %d\n", __func__,
  319. num, NFS4_PNFS_MAX_MULTI_CNT);
  320. goto out_err;
  321. }
  322. dsaddr = kzalloc(sizeof(*dsaddr) +
  323. (sizeof(struct nfs4_pnfs_ds *) * (num - 1)),
  324. GFP_KERNEL);
  325. if (!dsaddr)
  326. goto out_err;
  327. dsaddr->stripe_indices = kzalloc(sizeof(u8) * cnt, GFP_KERNEL);
  328. if (!dsaddr->stripe_indices)
  329. goto out_err_free;
  330. dsaddr->stripe_count = cnt;
  331. dsaddr->ds_num = num;
  332. memcpy(&dsaddr->deviceid, &pdev->dev_id, sizeof(pdev->dev_id));
  333. /* Go back an read stripe indices */
  334. p = indicesp;
  335. indexp = &dsaddr->stripe_indices[0];
  336. for (i = 0; i < dsaddr->stripe_count; i++) {
  337. *indexp = be32_to_cpup(p++);
  338. if (*indexp >= num)
  339. goto out_err_free;
  340. indexp++;
  341. }
  342. /* Skip already read multipath list count */
  343. p++;
  344. for (i = 0; i < dsaddr->ds_num; i++) {
  345. int j;
  346. dummy = be32_to_cpup(p++); /* multipath count */
  347. if (dummy > 1) {
  348. printk(KERN_WARNING
  349. "%s: Multipath count %d not supported, "
  350. "skipping all greater than 1\n", __func__,
  351. dummy);
  352. }
  353. for (j = 0; j < dummy; j++) {
  354. if (j == 0) {
  355. dsaddr->ds_list[i] = decode_and_add_ds(&p, ino);
  356. if (dsaddr->ds_list[i] == NULL)
  357. goto out_err_free;
  358. } else {
  359. u32 len;
  360. /* skip extra multipath */
  361. len = be32_to_cpup(p++);
  362. p += XDR_QUADLEN(len);
  363. len = be32_to_cpup(p++);
  364. p += XDR_QUADLEN(len);
  365. continue;
  366. }
  367. }
  368. }
  369. return dsaddr;
  370. out_err_free:
  371. nfs4_fl_free_deviceid(dsaddr);
  372. out_err:
  373. dprintk("%s ERROR: returning NULL\n", __func__);
  374. return NULL;
  375. }
  376. /*
  377. * Decode the opaque device specified in 'dev' and add it to the cache of
  378. * available devices.
  379. */
  380. static struct nfs4_file_layout_dsaddr *
  381. decode_and_add_device(struct inode *inode, struct pnfs_device *dev)
  382. {
  383. struct nfs4_file_layout_dsaddr *d, *new;
  384. long hash;
  385. new = decode_device(inode, dev);
  386. if (!new) {
  387. printk(KERN_WARNING "%s: Could not decode or add device\n",
  388. __func__);
  389. return NULL;
  390. }
  391. spin_lock(&filelayout_deviceid_lock);
  392. d = nfs4_fl_find_get_deviceid(&new->deviceid);
  393. if (d) {
  394. spin_unlock(&filelayout_deviceid_lock);
  395. nfs4_fl_free_deviceid(new);
  396. return d;
  397. }
  398. INIT_HLIST_NODE(&new->node);
  399. atomic_set(&new->ref, 1);
  400. hash = nfs4_fl_deviceid_hash(&new->deviceid);
  401. hlist_add_head_rcu(&new->node, &filelayout_deviceid_cache[hash]);
  402. spin_unlock(&filelayout_deviceid_lock);
  403. return new;
  404. }
  405. /*
  406. * Retrieve the information for dev_id, add it to the list
  407. * of available devices, and return it.
  408. */
  409. struct nfs4_file_layout_dsaddr *
  410. get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id)
  411. {
  412. struct pnfs_device *pdev = NULL;
  413. u32 max_resp_sz;
  414. int max_pages;
  415. struct page **pages = NULL;
  416. struct nfs4_file_layout_dsaddr *dsaddr = NULL;
  417. int rc, i;
  418. struct nfs_server *server = NFS_SERVER(inode);
  419. /*
  420. * Use the session max response size as the basis for setting
  421. * GETDEVICEINFO's maxcount
  422. */
  423. max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
  424. max_pages = max_resp_sz >> PAGE_SHIFT;
  425. dprintk("%s inode %p max_resp_sz %u max_pages %d\n",
  426. __func__, inode, max_resp_sz, max_pages);
  427. pdev = kzalloc(sizeof(struct pnfs_device), GFP_KERNEL);
  428. if (pdev == NULL)
  429. return NULL;
  430. pages = kzalloc(max_pages * sizeof(struct page *), GFP_KERNEL);
  431. if (pages == NULL) {
  432. kfree(pdev);
  433. return NULL;
  434. }
  435. for (i = 0; i < max_pages; i++) {
  436. pages[i] = alloc_page(GFP_KERNEL);
  437. if (!pages[i])
  438. goto out_free;
  439. }
  440. /* set pdev->area */
  441. pdev->area = vmap(pages, max_pages, VM_MAP, PAGE_KERNEL);
  442. if (!pdev->area)
  443. goto out_free;
  444. memcpy(&pdev->dev_id, dev_id, sizeof(*dev_id));
  445. pdev->layout_type = LAYOUT_NFSV4_1_FILES;
  446. pdev->pages = pages;
  447. pdev->pgbase = 0;
  448. pdev->pglen = PAGE_SIZE * max_pages;
  449. pdev->mincount = 0;
  450. rc = nfs4_proc_getdeviceinfo(server, pdev);
  451. dprintk("%s getdevice info returns %d\n", __func__, rc);
  452. if (rc)
  453. goto out_free;
  454. /*
  455. * Found new device, need to decode it and then add it to the
  456. * list of known devices for this mountpoint.
  457. */
  458. dsaddr = decode_and_add_device(inode, pdev);
  459. out_free:
  460. if (pdev->area != NULL)
  461. vunmap(pdev->area);
  462. for (i = 0; i < max_pages; i++)
  463. __free_page(pages[i]);
  464. kfree(pages);
  465. kfree(pdev);
  466. dprintk("<-- %s dsaddr %p\n", __func__, dsaddr);
  467. return dsaddr;
  468. }
  469. void
  470. nfs4_fl_put_deviceid(struct nfs4_file_layout_dsaddr *dsaddr)
  471. {
  472. if (atomic_dec_and_lock(&dsaddr->ref, &filelayout_deviceid_lock)) {
  473. hlist_del_rcu(&dsaddr->node);
  474. spin_unlock(&filelayout_deviceid_lock);
  475. synchronize_rcu();
  476. nfs4_fl_free_deviceid(dsaddr);
  477. }
  478. }
  479. struct nfs4_file_layout_dsaddr *
  480. nfs4_fl_find_get_deviceid(struct nfs4_deviceid *id)
  481. {
  482. struct nfs4_file_layout_dsaddr *d;
  483. struct hlist_node *n;
  484. long hash = nfs4_fl_deviceid_hash(id);
  485. rcu_read_lock();
  486. hlist_for_each_entry_rcu(d, n, &filelayout_deviceid_cache[hash], node) {
  487. if (!memcmp(&d->deviceid, id, sizeof(*id))) {
  488. if (!atomic_inc_not_zero(&d->ref))
  489. goto fail;
  490. rcu_read_unlock();
  491. return d;
  492. }
  493. }
  494. fail:
  495. rcu_read_unlock();
  496. return NULL;
  497. }
  498. /*
  499. * Want res = (offset - layout->pattern_offset)/ layout->stripe_unit
  500. * Then: ((res + fsi) % dsaddr->stripe_count)
  501. */
  502. u32
  503. nfs4_fl_calc_j_index(struct pnfs_layout_segment *lseg, loff_t offset)
  504. {
  505. struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg);
  506. u64 tmp;
  507. tmp = offset - flseg->pattern_offset;
  508. do_div(tmp, flseg->stripe_unit);
  509. tmp += flseg->first_stripe_index;
  510. return do_div(tmp, flseg->dsaddr->stripe_count);
  511. }
  512. u32
  513. nfs4_fl_calc_ds_index(struct pnfs_layout_segment *lseg, u32 j)
  514. {
  515. return FILELAYOUT_LSEG(lseg)->dsaddr->stripe_indices[j];
  516. }
  517. struct nfs_fh *
  518. nfs4_fl_select_ds_fh(struct pnfs_layout_segment *lseg, u32 j)
  519. {
  520. struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg);
  521. u32 i;
  522. if (flseg->stripe_type == STRIPE_SPARSE) {
  523. if (flseg->num_fh == 1)
  524. i = 0;
  525. else if (flseg->num_fh == 0)
  526. /* Use the MDS OPEN fh set in nfs_read_rpcsetup */
  527. return NULL;
  528. else
  529. i = nfs4_fl_calc_ds_index(lseg, j);
  530. } else
  531. i = j;
  532. return flseg->fh_array[i];
  533. }
  534. static void
  535. filelayout_mark_devid_negative(struct nfs4_file_layout_dsaddr *dsaddr,
  536. int err, u32 ds_addr)
  537. {
  538. u32 *p = (u32 *)&dsaddr->deviceid;
  539. printk(KERN_ERR "NFS: data server %x connection error %d."
  540. " Deviceid [%x%x%x%x] marked out of use.\n",
  541. ds_addr, err, p[0], p[1], p[2], p[3]);
  542. spin_lock(&filelayout_deviceid_lock);
  543. dsaddr->flags |= NFS4_DEVICE_ID_NEG_ENTRY;
  544. spin_unlock(&filelayout_deviceid_lock);
  545. }
  546. struct nfs4_pnfs_ds *
  547. nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
  548. {
  549. struct nfs4_file_layout_dsaddr *dsaddr = FILELAYOUT_LSEG(lseg)->dsaddr;
  550. struct nfs4_pnfs_ds *ds = dsaddr->ds_list[ds_idx];
  551. if (ds == NULL) {
  552. printk(KERN_ERR "%s: No data server for offset index %d\n",
  553. __func__, ds_idx);
  554. return NULL;
  555. }
  556. if (!ds->ds_clp) {
  557. struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode);
  558. int err;
  559. if (dsaddr->flags & NFS4_DEVICE_ID_NEG_ENTRY) {
  560. /* Already tried to connect, don't try again */
  561. dprintk("%s Deviceid marked out of use\n", __func__);
  562. return NULL;
  563. }
  564. err = nfs4_ds_connect(s, ds);
  565. if (err) {
  566. filelayout_mark_devid_negative(dsaddr, err,
  567. ntohl(ds->ds_ip_addr));
  568. return NULL;
  569. }
  570. }
  571. return ds;
  572. }