trans_fd.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477
  1. /*
  2. * linux/fs/9p/trans_fd.c
  3. *
  4. * Fd transport layer. Includes deprecated socket layer.
  5. *
  6. * Copyright (C) 2006 by Russ Cox <rsc@swtch.com>
  7. * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
  8. * Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
  9. * Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com>
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License version 2
  13. * as published by the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to:
  22. * Free Software Foundation
  23. * 51 Franklin Street, Fifth Floor
  24. * Boston, MA 02111-1301 USA
  25. *
  26. */
  27. #include <linux/in.h>
  28. #include <linux/module.h>
  29. #include <linux/net.h>
  30. #include <linux/ipv6.h>
  31. #include <linux/kthread.h>
  32. #include <linux/errno.h>
  33. #include <linux/kernel.h>
  34. #include <linux/un.h>
  35. #include <linux/uaccess.h>
  36. #include <linux/inet.h>
  37. #include <linux/idr.h>
  38. #include <linux/file.h>
  39. #include <linux/parser.h>
  40. #include <net/9p/9p.h>
  41. #include <net/9p/client.h>
  42. #include <net/9p/transport.h>
  43. #define P9_PORT 564
  44. #define MAX_SOCK_BUF (64*1024)
  45. #define ERREQFLUSH 1
  46. #define MAXPOLLWADDR 2
  47. /**
  48. * struct p9_fd_opts - per-transport options
  49. * @rfd: file descriptor for reading (trans=fd)
  50. * @wfd: file descriptor for writing (trans=fd)
  51. * @port: port to connect to (trans=tcp)
  52. *
  53. */
  54. struct p9_fd_opts {
  55. int rfd;
  56. int wfd;
  57. u16 port;
  58. };
  59. /**
  60. * struct p9_trans_fd - transport state
  61. * @rd: reference to file to read from
  62. * @wr: reference of file to write to
  63. * @conn: connection state reference
  64. *
  65. */
  66. struct p9_trans_fd {
  67. struct file *rd;
  68. struct file *wr;
  69. struct p9_conn *conn;
  70. };
  71. /*
  72. * Option Parsing (code inspired by NFS code)
  73. * - a little lazy - parse all fd-transport options
  74. */
  75. enum {
  76. /* Options that take integer arguments */
  77. Opt_port, Opt_rfdno, Opt_wfdno, Opt_err,
  78. };
  79. static const match_table_t tokens = {
  80. {Opt_port, "port=%u"},
  81. {Opt_rfdno, "rfdno=%u"},
  82. {Opt_wfdno, "wfdno=%u"},
  83. {Opt_err, NULL},
  84. };
  85. enum {
  86. Rworksched = 1, /* read work scheduled or running */
  87. Rpending = 2, /* can read */
  88. Wworksched = 4, /* write work scheduled or running */
  89. Wpending = 8, /* can write */
  90. };
  91. enum {
  92. None,
  93. Flushing,
  94. Flushed,
  95. };
  96. struct p9_req;
  97. typedef void (*p9_conn_req_callback)(struct p9_req *req, void *a);
  98. /**
  99. * struct p9_req - fd mux encoding of an rpc transaction
  100. * @lock: protects req_list
  101. * @tag: numeric tag for rpc transaction
  102. * @tcall: request &p9_fcall structure
  103. * @rcall: response &p9_fcall structure
  104. * @err: error state
  105. * @cb: callback for when response is received
  106. * @cba: argument to pass to callback
  107. * @flush: flag to indicate RPC has been flushed
  108. * @req_list: list link for higher level objects to chain requests
  109. *
  110. */
  111. struct p9_req {
  112. spinlock_t lock;
  113. int tag;
  114. struct p9_fcall *tcall;
  115. struct p9_fcall *rcall;
  116. int err;
  117. p9_conn_req_callback cb;
  118. void *cba;
  119. int flush;
  120. struct list_head req_list;
  121. };
  122. struct p9_poll_wait {
  123. struct p9_conn *conn;
  124. wait_queue_t wait;
  125. wait_queue_head_t *wait_addr;
  126. };
  127. /**
  128. * struct p9_conn - fd mux connection state information
  129. * @lock: protects mux_list (?)
  130. * @mux_list: list link for mux to manage multiple connections (?)
  131. * @client: reference to client instance for this connection
  132. * @tagpool: id accounting for transactions
  133. * @err: error state
  134. * @req_list: accounting for requests which have been sent
  135. * @unsent_req_list: accounting for requests that haven't been sent
  136. * @rcall: current response &p9_fcall structure
  137. * @rpos: read position in current frame
  138. * @rbuf: current read buffer
  139. * @wpos: write position for current frame
  140. * @wsize: amount of data to write for current frame
  141. * @wbuf: current write buffer
  142. * @poll_wait: array of wait_q's for various worker threads
  143. * @poll_waddr: ????
  144. * @pt: poll state
  145. * @rq: current read work
  146. * @wq: current write work
  147. * @wsched: ????
  148. *
  149. */
  150. struct p9_conn {
  151. spinlock_t lock; /* protect lock structure */
  152. struct list_head mux_list;
  153. struct p9_client *client;
  154. struct p9_idpool *tagpool;
  155. int err;
  156. struct list_head req_list;
  157. struct list_head unsent_req_list;
  158. struct p9_fcall *rcall;
  159. int rpos;
  160. char *rbuf;
  161. int wpos;
  162. int wsize;
  163. char *wbuf;
  164. struct list_head poll_pending_link;
  165. struct p9_poll_wait poll_wait[MAXPOLLWADDR];
  166. poll_table pt;
  167. struct work_struct rq;
  168. struct work_struct wq;
  169. unsigned long wsched;
  170. };
  171. /**
  172. * struct p9_mux_rpc - fd mux rpc accounting structure
  173. * @m: connection this request was issued on
  174. * @err: error state
  175. * @tcall: request &p9_fcall
  176. * @rcall: response &p9_fcall
  177. * @wqueue: wait queue that client is blocked on for this rpc
  178. *
  179. * Bug: isn't this information duplicated elsewhere like &p9_req
  180. */
  181. struct p9_mux_rpc {
  182. struct p9_conn *m;
  183. int err;
  184. struct p9_fcall *tcall;
  185. struct p9_fcall *rcall;
  186. wait_queue_head_t wqueue;
  187. };
  188. static DEFINE_SPINLOCK(p9_poll_lock);
  189. static LIST_HEAD(p9_poll_pending_list);
  190. static struct workqueue_struct *p9_mux_wq;
  191. static struct task_struct *p9_poll_task;
  192. static u16 p9_mux_get_tag(struct p9_conn *m)
  193. {
  194. int tag;
  195. tag = p9_idpool_get(m->tagpool);
  196. if (tag < 0)
  197. return P9_NOTAG;
  198. else
  199. return (u16) tag;
  200. }
  201. static void p9_mux_put_tag(struct p9_conn *m, u16 tag)
  202. {
  203. if (tag != P9_NOTAG && p9_idpool_check(tag, m->tagpool))
  204. p9_idpool_put(tag, m->tagpool);
  205. }
  206. static void p9_mux_poll_stop(struct p9_conn *m)
  207. {
  208. unsigned long flags;
  209. int i;
  210. for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
  211. struct p9_poll_wait *pwait = &m->poll_wait[i];
  212. if (pwait->wait_addr) {
  213. remove_wait_queue(pwait->wait_addr, &pwait->wait);
  214. pwait->wait_addr = NULL;
  215. }
  216. }
  217. spin_lock_irqsave(&p9_poll_lock, flags);
  218. list_del_init(&m->poll_pending_link);
  219. spin_unlock_irqrestore(&p9_poll_lock, flags);
  220. }
  221. /**
  222. * p9_conn_cancel - cancel all pending requests with error
  223. * @m: mux data
  224. * @err: error code
  225. *
  226. */
  227. void p9_conn_cancel(struct p9_conn *m, int err)
  228. {
  229. struct p9_req *req, *rtmp;
  230. LIST_HEAD(cancel_list);
  231. P9_DPRINTK(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
  232. m->err = err;
  233. spin_lock(&m->lock);
  234. list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
  235. list_move(&req->req_list, &cancel_list);
  236. }
  237. list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
  238. list_move(&req->req_list, &cancel_list);
  239. }
  240. spin_unlock(&m->lock);
  241. list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
  242. list_del(&req->req_list);
  243. if (!req->err)
  244. req->err = err;
  245. if (req->cb)
  246. (*req->cb) (req, req->cba);
  247. else
  248. kfree(req->rcall);
  249. }
  250. }
  251. static void process_request(struct p9_conn *m, struct p9_req *req)
  252. {
  253. int ecode;
  254. struct p9_str *ename;
  255. if (!req->err && req->rcall->id == P9_RERROR) {
  256. ecode = req->rcall->params.rerror.errno;
  257. ename = &req->rcall->params.rerror.error;
  258. P9_DPRINTK(P9_DEBUG_MUX, "Rerror %.*s\n", ename->len,
  259. ename->str);
  260. if (m->client->dotu)
  261. req->err = -ecode;
  262. if (!req->err) {
  263. req->err = p9_errstr2errno(ename->str, ename->len);
  264. /* string match failed */
  265. if (!req->err) {
  266. PRINT_FCALL_ERROR("unknown error", req->rcall);
  267. req->err = -ESERVERFAULT;
  268. }
  269. }
  270. } else if (req->tcall && req->rcall->id != req->tcall->id + 1) {
  271. P9_DPRINTK(P9_DEBUG_ERROR,
  272. "fcall mismatch: expected %d, got %d\n",
  273. req->tcall->id + 1, req->rcall->id);
  274. if (!req->err)
  275. req->err = -EIO;
  276. }
  277. }
  278. static unsigned int
  279. p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt)
  280. {
  281. int ret, n;
  282. struct p9_trans_fd *ts = NULL;
  283. if (client && client->status == Connected)
  284. ts = client->trans;
  285. if (!ts)
  286. return -EREMOTEIO;
  287. if (!ts->rd->f_op || !ts->rd->f_op->poll)
  288. return -EIO;
  289. if (!ts->wr->f_op || !ts->wr->f_op->poll)
  290. return -EIO;
  291. ret = ts->rd->f_op->poll(ts->rd, pt);
  292. if (ret < 0)
  293. return ret;
  294. if (ts->rd != ts->wr) {
  295. n = ts->wr->f_op->poll(ts->wr, pt);
  296. if (n < 0)
  297. return n;
  298. ret = (ret & ~POLLOUT) | (n & ~POLLIN);
  299. }
  300. return ret;
  301. }
  302. /**
  303. * p9_fd_read- read from a fd
  304. * @client: client instance
  305. * @v: buffer to receive data into
  306. * @len: size of receive buffer
  307. *
  308. */
  309. static int p9_fd_read(struct p9_client *client, void *v, int len)
  310. {
  311. int ret;
  312. struct p9_trans_fd *ts = NULL;
  313. if (client && client->status != Disconnected)
  314. ts = client->trans;
  315. if (!ts)
  316. return -EREMOTEIO;
  317. if (!(ts->rd->f_flags & O_NONBLOCK))
  318. P9_DPRINTK(P9_DEBUG_ERROR, "blocking read ...\n");
  319. ret = kernel_read(ts->rd, ts->rd->f_pos, v, len);
  320. if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
  321. client->status = Disconnected;
  322. return ret;
  323. }
  324. /**
  325. * p9_read_work - called when there is some data to be read from a transport
  326. * @work: container of work to be done
  327. *
  328. */
  329. static void p9_read_work(struct work_struct *work)
  330. {
  331. int n, err;
  332. struct p9_conn *m;
  333. struct p9_req *req, *rptr, *rreq;
  334. struct p9_fcall *rcall;
  335. char *rbuf;
  336. m = container_of(work, struct p9_conn, rq);
  337. if (m->err < 0)
  338. return;
  339. rcall = NULL;
  340. P9_DPRINTK(P9_DEBUG_MUX, "start mux %p pos %d\n", m, m->rpos);
  341. if (!m->rcall) {
  342. m->rcall =
  343. kmalloc(sizeof(struct p9_fcall) + m->client->msize,
  344. GFP_KERNEL);
  345. if (!m->rcall) {
  346. err = -ENOMEM;
  347. goto error;
  348. }
  349. m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall);
  350. m->rpos = 0;
  351. }
  352. clear_bit(Rpending, &m->wsched);
  353. err = p9_fd_read(m->client, m->rbuf + m->rpos,
  354. m->client->msize - m->rpos);
  355. P9_DPRINTK(P9_DEBUG_MUX, "mux %p got %d bytes\n", m, err);
  356. if (err == -EAGAIN) {
  357. clear_bit(Rworksched, &m->wsched);
  358. return;
  359. }
  360. if (err <= 0)
  361. goto error;
  362. m->rpos += err;
  363. while (m->rpos > 4) {
  364. n = le32_to_cpu(*(__le32 *) m->rbuf);
  365. if (n >= m->client->msize) {
  366. P9_DPRINTK(P9_DEBUG_ERROR,
  367. "requested packet size too big: %d\n", n);
  368. err = -EIO;
  369. goto error;
  370. }
  371. if (m->rpos < n)
  372. break;
  373. err =
  374. p9_deserialize_fcall(m->rbuf, n, m->rcall, m->client->dotu);
  375. if (err < 0)
  376. goto error;
  377. #ifdef CONFIG_NET_9P_DEBUG
  378. if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) {
  379. char buf[150];
  380. p9_printfcall(buf, sizeof(buf), m->rcall,
  381. m->client->dotu);
  382. printk(KERN_NOTICE ">>> %p %s\n", m, buf);
  383. }
  384. #endif
  385. rcall = m->rcall;
  386. rbuf = m->rbuf;
  387. if (m->rpos > n) {
  388. m->rcall = kmalloc(sizeof(struct p9_fcall) +
  389. m->client->msize, GFP_KERNEL);
  390. if (!m->rcall) {
  391. err = -ENOMEM;
  392. goto error;
  393. }
  394. m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall);
  395. memmove(m->rbuf, rbuf + n, m->rpos - n);
  396. m->rpos -= n;
  397. } else {
  398. m->rcall = NULL;
  399. m->rbuf = NULL;
  400. m->rpos = 0;
  401. }
  402. P9_DPRINTK(P9_DEBUG_MUX, "mux %p fcall id %d tag %d\n", m,
  403. rcall->id, rcall->tag);
  404. req = NULL;
  405. spin_lock(&m->lock);
  406. list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
  407. if (rreq->tag == rcall->tag) {
  408. req = rreq;
  409. if (req->flush != Flushing)
  410. list_del(&req->req_list);
  411. break;
  412. }
  413. }
  414. spin_unlock(&m->lock);
  415. if (req) {
  416. req->rcall = rcall;
  417. process_request(m, req);
  418. if (req->flush != Flushing) {
  419. if (req->cb)
  420. (*req->cb) (req, req->cba);
  421. else
  422. kfree(req->rcall);
  423. }
  424. } else {
  425. if (err >= 0 && rcall->id != P9_RFLUSH)
  426. P9_DPRINTK(P9_DEBUG_ERROR,
  427. "unexpected response mux %p id %d tag %d\n",
  428. m, rcall->id, rcall->tag);
  429. kfree(rcall);
  430. }
  431. }
  432. if (!list_empty(&m->req_list)) {
  433. if (test_and_clear_bit(Rpending, &m->wsched))
  434. n = POLLIN;
  435. else
  436. n = p9_fd_poll(m->client, NULL);
  437. if (n & POLLIN) {
  438. P9_DPRINTK(P9_DEBUG_MUX, "schedule read work %p\n", m);
  439. queue_work(p9_mux_wq, &m->rq);
  440. } else
  441. clear_bit(Rworksched, &m->wsched);
  442. } else
  443. clear_bit(Rworksched, &m->wsched);
  444. return;
  445. error:
  446. p9_conn_cancel(m, err);
  447. clear_bit(Rworksched, &m->wsched);
  448. }
  449. /**
  450. * p9_fd_write - write to a socket
  451. * @client: client instance
  452. * @v: buffer to send data from
  453. * @len: size of send buffer
  454. *
  455. */
  456. static int p9_fd_write(struct p9_client *client, void *v, int len)
  457. {
  458. int ret;
  459. mm_segment_t oldfs;
  460. struct p9_trans_fd *ts = NULL;
  461. if (client && client->status != Disconnected)
  462. ts = client->trans;
  463. if (!ts)
  464. return -EREMOTEIO;
  465. if (!(ts->wr->f_flags & O_NONBLOCK))
  466. P9_DPRINTK(P9_DEBUG_ERROR, "blocking write ...\n");
  467. oldfs = get_fs();
  468. set_fs(get_ds());
  469. /* The cast to a user pointer is valid due to the set_fs() */
  470. ret = vfs_write(ts->wr, (void __user *)v, len, &ts->wr->f_pos);
  471. set_fs(oldfs);
  472. if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
  473. client->status = Disconnected;
  474. return ret;
  475. }
  476. /**
  477. * p9_write_work - called when a transport can send some data
  478. * @work: container for work to be done
  479. *
  480. */
  481. static void p9_write_work(struct work_struct *work)
  482. {
  483. int n, err;
  484. struct p9_conn *m;
  485. struct p9_req *req;
  486. m = container_of(work, struct p9_conn, wq);
  487. if (m->err < 0) {
  488. clear_bit(Wworksched, &m->wsched);
  489. return;
  490. }
  491. if (!m->wsize) {
  492. if (list_empty(&m->unsent_req_list)) {
  493. clear_bit(Wworksched, &m->wsched);
  494. return;
  495. }
  496. spin_lock(&m->lock);
  497. again:
  498. req = list_entry(m->unsent_req_list.next, struct p9_req,
  499. req_list);
  500. list_move_tail(&req->req_list, &m->req_list);
  501. if (req->err == ERREQFLUSH)
  502. goto again;
  503. m->wbuf = req->tcall->sdata;
  504. m->wsize = req->tcall->size;
  505. m->wpos = 0;
  506. spin_unlock(&m->lock);
  507. }
  508. P9_DPRINTK(P9_DEBUG_MUX, "mux %p pos %d size %d\n", m, m->wpos,
  509. m->wsize);
  510. clear_bit(Wpending, &m->wsched);
  511. err = p9_fd_write(m->client, m->wbuf + m->wpos, m->wsize - m->wpos);
  512. P9_DPRINTK(P9_DEBUG_MUX, "mux %p sent %d bytes\n", m, err);
  513. if (err == -EAGAIN) {
  514. clear_bit(Wworksched, &m->wsched);
  515. return;
  516. }
  517. if (err < 0)
  518. goto error;
  519. else if (err == 0) {
  520. err = -EREMOTEIO;
  521. goto error;
  522. }
  523. m->wpos += err;
  524. if (m->wpos == m->wsize)
  525. m->wpos = m->wsize = 0;
  526. if (m->wsize == 0 && !list_empty(&m->unsent_req_list)) {
  527. if (test_and_clear_bit(Wpending, &m->wsched))
  528. n = POLLOUT;
  529. else
  530. n = p9_fd_poll(m->client, NULL);
  531. if (n & POLLOUT) {
  532. P9_DPRINTK(P9_DEBUG_MUX, "schedule write work %p\n", m);
  533. queue_work(p9_mux_wq, &m->wq);
  534. } else
  535. clear_bit(Wworksched, &m->wsched);
  536. } else
  537. clear_bit(Wworksched, &m->wsched);
  538. return;
  539. error:
  540. p9_conn_cancel(m, err);
  541. clear_bit(Wworksched, &m->wsched);
  542. }
  543. static int p9_pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
  544. {
  545. struct p9_poll_wait *pwait =
  546. container_of(wait, struct p9_poll_wait, wait);
  547. struct p9_conn *m = pwait->conn;
  548. unsigned long flags;
  549. DECLARE_WAITQUEUE(dummy_wait, p9_poll_task);
  550. spin_lock_irqsave(&p9_poll_lock, flags);
  551. if (list_empty(&m->poll_pending_link))
  552. list_add_tail(&m->poll_pending_link, &p9_poll_pending_list);
  553. spin_unlock_irqrestore(&p9_poll_lock, flags);
  554. /* perform the default wake up operation */
  555. return default_wake_function(&dummy_wait, mode, sync, key);
  556. }
  557. /**
  558. * p9_pollwait - add poll task to the wait queue
  559. * @filp: file pointer being polled
  560. * @wait_address: wait_q to block on
  561. * @p: poll state
  562. *
  563. * called by files poll operation to add v9fs-poll task to files wait queue
  564. */
  565. static void
  566. p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p)
  567. {
  568. struct p9_conn *m = container_of(p, struct p9_conn, pt);
  569. struct p9_poll_wait *pwait = NULL;
  570. int i;
  571. for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
  572. if (m->poll_wait[i].wait_addr == NULL) {
  573. pwait = &m->poll_wait[i];
  574. break;
  575. }
  576. }
  577. if (!pwait) {
  578. P9_DPRINTK(P9_DEBUG_ERROR, "not enough wait_address slots\n");
  579. return;
  580. }
  581. if (!wait_address) {
  582. P9_DPRINTK(P9_DEBUG_ERROR, "no wait_address\n");
  583. pwait->wait_addr = ERR_PTR(-EIO);
  584. return;
  585. }
  586. pwait->conn = m;
  587. pwait->wait_addr = wait_address;
  588. init_waitqueue_func_entry(&pwait->wait, p9_pollwake);
  589. add_wait_queue(wait_address, &pwait->wait);
  590. }
  591. /**
  592. * p9_conn_create - allocate and initialize the per-session mux data
  593. * @client: client instance
  594. *
  595. * Note: Creates the polling task if this is the first session.
  596. */
  597. static struct p9_conn *p9_conn_create(struct p9_client *client)
  598. {
  599. int i, n;
  600. struct p9_conn *m;
  601. P9_DPRINTK(P9_DEBUG_MUX, "client %p msize %d\n", client, client->msize);
  602. m = kzalloc(sizeof(struct p9_conn), GFP_KERNEL);
  603. if (!m)
  604. return ERR_PTR(-ENOMEM);
  605. spin_lock_init(&m->lock);
  606. INIT_LIST_HEAD(&m->mux_list);
  607. m->client = client;
  608. m->tagpool = p9_idpool_create();
  609. if (IS_ERR(m->tagpool)) {
  610. kfree(m);
  611. return ERR_PTR(-ENOMEM);
  612. }
  613. INIT_LIST_HEAD(&m->req_list);
  614. INIT_LIST_HEAD(&m->unsent_req_list);
  615. INIT_WORK(&m->rq, p9_read_work);
  616. INIT_WORK(&m->wq, p9_write_work);
  617. INIT_LIST_HEAD(&m->poll_pending_link);
  618. init_poll_funcptr(&m->pt, p9_pollwait);
  619. n = p9_fd_poll(client, &m->pt);
  620. if (n & POLLIN) {
  621. P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m);
  622. set_bit(Rpending, &m->wsched);
  623. }
  624. if (n & POLLOUT) {
  625. P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m);
  626. set_bit(Wpending, &m->wsched);
  627. }
  628. for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
  629. if (IS_ERR(m->poll_wait[i].wait_addr)) {
  630. p9_mux_poll_stop(m);
  631. kfree(m);
  632. /* return the error code */
  633. return (void *)m->poll_wait[i].wait_addr;
  634. }
  635. }
  636. return m;
  637. }
  638. /**
  639. * p9_poll_mux - polls a mux and schedules read or write works if necessary
  640. * @m: connection to poll
  641. *
  642. */
  643. static void p9_poll_mux(struct p9_conn *m)
  644. {
  645. int n;
  646. if (m->err < 0)
  647. return;
  648. n = p9_fd_poll(m->client, NULL);
  649. if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) {
  650. P9_DPRINTK(P9_DEBUG_MUX, "error mux %p err %d\n", m, n);
  651. if (n >= 0)
  652. n = -ECONNRESET;
  653. p9_conn_cancel(m, n);
  654. }
  655. if (n & POLLIN) {
  656. set_bit(Rpending, &m->wsched);
  657. P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m);
  658. if (!test_and_set_bit(Rworksched, &m->wsched)) {
  659. P9_DPRINTK(P9_DEBUG_MUX, "schedule read work %p\n", m);
  660. queue_work(p9_mux_wq, &m->rq);
  661. }
  662. }
  663. if (n & POLLOUT) {
  664. set_bit(Wpending, &m->wsched);
  665. P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m);
  666. if ((m->wsize || !list_empty(&m->unsent_req_list))
  667. && !test_and_set_bit(Wworksched, &m->wsched)) {
  668. P9_DPRINTK(P9_DEBUG_MUX, "schedule write work %p\n", m);
  669. queue_work(p9_mux_wq, &m->wq);
  670. }
  671. }
  672. }
  673. /**
  674. * p9_send_request - send 9P request
  675. * The function can sleep until the request is scheduled for sending.
  676. * The function can be interrupted. Return from the function is not
  677. * a guarantee that the request is sent successfully. Can return errors
  678. * that can be retrieved by PTR_ERR macros.
  679. *
  680. * @m: mux data
  681. * @tc: request to be sent
  682. * @cb: callback function to call when response is received
  683. * @cba: parameter to pass to the callback function
  684. *
  685. */
  686. static struct p9_req *p9_send_request(struct p9_conn *m,
  687. struct p9_fcall *tc,
  688. p9_conn_req_callback cb, void *cba)
  689. {
  690. int n;
  691. struct p9_req *req;
  692. P9_DPRINTK(P9_DEBUG_MUX, "mux %p task %p tcall %p id %d\n", m, current,
  693. tc, tc->id);
  694. if (m->err < 0)
  695. return ERR_PTR(m->err);
  696. req = kmalloc(sizeof(struct p9_req), GFP_KERNEL);
  697. if (!req)
  698. return ERR_PTR(-ENOMEM);
  699. if (tc->id == P9_TVERSION)
  700. n = P9_NOTAG;
  701. else
  702. n = p9_mux_get_tag(m);
  703. if (n < 0) {
  704. kfree(req);
  705. return ERR_PTR(-ENOMEM);
  706. }
  707. p9_set_tag(tc, n);
  708. #ifdef CONFIG_NET_9P_DEBUG
  709. if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) {
  710. char buf[150];
  711. p9_printfcall(buf, sizeof(buf), tc, m->client->dotu);
  712. printk(KERN_NOTICE "<<< %p %s\n", m, buf);
  713. }
  714. #endif
  715. spin_lock_init(&req->lock);
  716. req->tag = n;
  717. req->tcall = tc;
  718. req->rcall = NULL;
  719. req->err = 0;
  720. req->cb = cb;
  721. req->cba = cba;
  722. req->flush = None;
  723. spin_lock(&m->lock);
  724. list_add_tail(&req->req_list, &m->unsent_req_list);
  725. spin_unlock(&m->lock);
  726. if (test_and_clear_bit(Wpending, &m->wsched))
  727. n = POLLOUT;
  728. else
  729. n = p9_fd_poll(m->client, NULL);
  730. if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
  731. queue_work(p9_mux_wq, &m->wq);
  732. return req;
  733. }
  734. static void p9_mux_free_request(struct p9_conn *m, struct p9_req *req)
  735. {
  736. p9_mux_put_tag(m, req->tag);
  737. kfree(req);
  738. }
  739. static void p9_mux_flush_cb(struct p9_req *freq, void *a)
  740. {
  741. int tag;
  742. struct p9_conn *m;
  743. struct p9_req *req, *rreq, *rptr;
  744. m = a;
  745. P9_DPRINTK(P9_DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m,
  746. freq->tcall, freq->rcall, freq->err,
  747. freq->tcall->params.tflush.oldtag);
  748. spin_lock(&m->lock);
  749. tag = freq->tcall->params.tflush.oldtag;
  750. req = NULL;
  751. list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
  752. if (rreq->tag == tag) {
  753. req = rreq;
  754. list_del(&req->req_list);
  755. break;
  756. }
  757. }
  758. spin_unlock(&m->lock);
  759. if (req) {
  760. spin_lock(&req->lock);
  761. req->flush = Flushed;
  762. spin_unlock(&req->lock);
  763. if (req->cb)
  764. (*req->cb) (req, req->cba);
  765. else
  766. kfree(req->rcall);
  767. }
  768. kfree(freq->tcall);
  769. kfree(freq->rcall);
  770. p9_mux_free_request(m, freq);
  771. }
  772. static int
  773. p9_mux_flush_request(struct p9_conn *m, struct p9_req *req)
  774. {
  775. struct p9_fcall *fc;
  776. struct p9_req *rreq, *rptr;
  777. P9_DPRINTK(P9_DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag);
  778. /* if a response was received for a request, do nothing */
  779. spin_lock(&req->lock);
  780. if (req->rcall || req->err) {
  781. spin_unlock(&req->lock);
  782. P9_DPRINTK(P9_DEBUG_MUX,
  783. "mux %p req %p response already received\n", m, req);
  784. return 0;
  785. }
  786. req->flush = Flushing;
  787. spin_unlock(&req->lock);
  788. spin_lock(&m->lock);
  789. /* if the request is not sent yet, just remove it from the list */
  790. list_for_each_entry_safe(rreq, rptr, &m->unsent_req_list, req_list) {
  791. if (rreq->tag == req->tag) {
  792. P9_DPRINTK(P9_DEBUG_MUX,
  793. "mux %p req %p request is not sent yet\n", m, req);
  794. list_del(&rreq->req_list);
  795. req->flush = Flushed;
  796. spin_unlock(&m->lock);
  797. if (req->cb)
  798. (*req->cb) (req, req->cba);
  799. return 0;
  800. }
  801. }
  802. spin_unlock(&m->lock);
  803. clear_thread_flag(TIF_SIGPENDING);
  804. fc = p9_create_tflush(req->tag);
  805. p9_send_request(m, fc, p9_mux_flush_cb, m);
  806. return 1;
  807. }
  808. static void
  809. p9_conn_rpc_cb(struct p9_req *req, void *a)
  810. {
  811. struct p9_mux_rpc *r;
  812. P9_DPRINTK(P9_DEBUG_MUX, "req %p r %p\n", req, a);
  813. r = a;
  814. r->rcall = req->rcall;
  815. r->err = req->err;
  816. if (req->flush != None && !req->err)
  817. r->err = -ERESTARTSYS;
  818. wake_up(&r->wqueue);
  819. }
  820. /**
  821. * p9_fd_rpc- sends 9P request and waits until a response is available.
  822. * The function can be interrupted.
  823. * @client: client instance
  824. * @tc: request to be sent
  825. * @rc: pointer where a pointer to the response is stored
  826. *
  827. */
  828. int
  829. p9_fd_rpc(struct p9_client *client, struct p9_fcall *tc, struct p9_fcall **rc)
  830. {
  831. struct p9_trans_fd *p = client->trans;
  832. struct p9_conn *m = p->conn;
  833. int err, sigpending;
  834. unsigned long flags;
  835. struct p9_req *req;
  836. struct p9_mux_rpc r;
  837. r.err = 0;
  838. r.tcall = tc;
  839. r.rcall = NULL;
  840. r.m = m;
  841. init_waitqueue_head(&r.wqueue);
  842. if (rc)
  843. *rc = NULL;
  844. sigpending = 0;
  845. if (signal_pending(current)) {
  846. sigpending = 1;
  847. clear_thread_flag(TIF_SIGPENDING);
  848. }
  849. req = p9_send_request(m, tc, p9_conn_rpc_cb, &r);
  850. if (IS_ERR(req)) {
  851. err = PTR_ERR(req);
  852. P9_DPRINTK(P9_DEBUG_MUX, "error %d\n", err);
  853. return err;
  854. }
  855. err = wait_event_interruptible(r.wqueue, r.rcall != NULL || r.err < 0);
  856. if (r.err < 0)
  857. err = r.err;
  858. if (err == -ERESTARTSYS && client->status == Connected
  859. && m->err == 0) {
  860. if (p9_mux_flush_request(m, req)) {
  861. /* wait until we get response of the flush message */
  862. do {
  863. clear_thread_flag(TIF_SIGPENDING);
  864. err = wait_event_interruptible(r.wqueue,
  865. r.rcall || r.err);
  866. } while (!r.rcall && !r.err && err == -ERESTARTSYS &&
  867. client->status == Connected && !m->err);
  868. err = -ERESTARTSYS;
  869. }
  870. sigpending = 1;
  871. }
  872. if (sigpending) {
  873. spin_lock_irqsave(&current->sighand->siglock, flags);
  874. recalc_sigpending();
  875. spin_unlock_irqrestore(&current->sighand->siglock, flags);
  876. }
  877. if (rc)
  878. *rc = r.rcall;
  879. else
  880. kfree(r.rcall);
  881. p9_mux_free_request(m, req);
  882. if (err > 0)
  883. err = -EIO;
  884. return err;
  885. }
  886. /**
  887. * parse_options - parse mount options into session structure
  888. * @options: options string passed from mount
  889. * @opts: transport-specific structure to parse options into
  890. *
  891. * Returns 0 upon success, -ERRNO upon failure
  892. */
  893. static int parse_opts(char *params, struct p9_fd_opts *opts)
  894. {
  895. char *p;
  896. substring_t args[MAX_OPT_ARGS];
  897. int option;
  898. char *options;
  899. int ret;
  900. opts->port = P9_PORT;
  901. opts->rfd = ~0;
  902. opts->wfd = ~0;
  903. if (!params)
  904. return 0;
  905. options = kstrdup(params, GFP_KERNEL);
  906. if (!options) {
  907. P9_DPRINTK(P9_DEBUG_ERROR,
  908. "failed to allocate copy of option string\n");
  909. return -ENOMEM;
  910. }
  911. while ((p = strsep(&options, ",")) != NULL) {
  912. int token;
  913. int r;
  914. if (!*p)
  915. continue;
  916. token = match_token(p, tokens, args);
  917. r = match_int(&args[0], &option);
  918. if (r < 0) {
  919. P9_DPRINTK(P9_DEBUG_ERROR,
  920. "integer field, but no integer?\n");
  921. ret = r;
  922. continue;
  923. }
  924. switch (token) {
  925. case Opt_port:
  926. opts->port = option;
  927. break;
  928. case Opt_rfdno:
  929. opts->rfd = option;
  930. break;
  931. case Opt_wfdno:
  932. opts->wfd = option;
  933. break;
  934. default:
  935. continue;
  936. }
  937. }
  938. kfree(options);
  939. return 0;
  940. }
  941. static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
  942. {
  943. struct p9_trans_fd *ts = kmalloc(sizeof(struct p9_trans_fd),
  944. GFP_KERNEL);
  945. if (!ts)
  946. return -ENOMEM;
  947. ts->rd = fget(rfd);
  948. ts->wr = fget(wfd);
  949. if (!ts->rd || !ts->wr) {
  950. if (ts->rd)
  951. fput(ts->rd);
  952. if (ts->wr)
  953. fput(ts->wr);
  954. kfree(ts);
  955. return -EIO;
  956. }
  957. client->trans = ts;
  958. client->status = Connected;
  959. return 0;
  960. }
  961. static int p9_socket_open(struct p9_client *client, struct socket *csocket)
  962. {
  963. int fd, ret;
  964. csocket->sk->sk_allocation = GFP_NOIO;
  965. fd = sock_map_fd(csocket, 0);
  966. if (fd < 0) {
  967. P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to map fd\n");
  968. return fd;
  969. }
  970. ret = p9_fd_open(client, fd, fd);
  971. if (ret < 0) {
  972. P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to open fd\n");
  973. sockfd_put(csocket);
  974. return ret;
  975. }
  976. ((struct p9_trans_fd *)client->trans)->rd->f_flags |= O_NONBLOCK;
  977. return 0;
  978. }
  979. /**
  980. * p9_mux_destroy - cancels all pending requests and frees mux resources
  981. * @m: mux to destroy
  982. *
  983. */
  984. static void p9_conn_destroy(struct p9_conn *m)
  985. {
  986. P9_DPRINTK(P9_DEBUG_MUX, "mux %p prev %p next %p\n", m,
  987. m->mux_list.prev, m->mux_list.next);
  988. p9_mux_poll_stop(m);
  989. cancel_work_sync(&m->rq);
  990. cancel_work_sync(&m->wq);
  991. p9_conn_cancel(m, -ECONNRESET);
  992. m->client = NULL;
  993. p9_idpool_destroy(m->tagpool);
  994. kfree(m);
  995. }
  996. /**
  997. * p9_fd_close - shutdown file descriptor transport
  998. * @client: client instance
  999. *
  1000. */
  1001. static void p9_fd_close(struct p9_client *client)
  1002. {
  1003. struct p9_trans_fd *ts;
  1004. if (!client)
  1005. return;
  1006. ts = client->trans;
  1007. if (!ts)
  1008. return;
  1009. client->status = Disconnected;
  1010. p9_conn_destroy(ts->conn);
  1011. if (ts->rd)
  1012. fput(ts->rd);
  1013. if (ts->wr)
  1014. fput(ts->wr);
  1015. kfree(ts);
  1016. }
  1017. /*
  1018. * stolen from NFS - maybe should be made a generic function?
  1019. */
  1020. static inline int valid_ipaddr4(const char *buf)
  1021. {
  1022. int rc, count, in[4];
  1023. rc = sscanf(buf, "%d.%d.%d.%d", &in[0], &in[1], &in[2], &in[3]);
  1024. if (rc != 4)
  1025. return -EINVAL;
  1026. for (count = 0; count < 4; count++) {
  1027. if (in[count] > 255)
  1028. return -EINVAL;
  1029. }
  1030. return 0;
  1031. }
  1032. static int
  1033. p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
  1034. {
  1035. int err;
  1036. struct socket *csocket;
  1037. struct sockaddr_in sin_server;
  1038. struct p9_fd_opts opts;
  1039. struct p9_trans_fd *p = NULL; /* this gets allocated in p9_fd_open */
  1040. err = parse_opts(args, &opts);
  1041. if (err < 0)
  1042. return err;
  1043. if (valid_ipaddr4(addr) < 0)
  1044. return -EINVAL;
  1045. csocket = NULL;
  1046. sin_server.sin_family = AF_INET;
  1047. sin_server.sin_addr.s_addr = in_aton(addr);
  1048. sin_server.sin_port = htons(opts.port);
  1049. sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &csocket);
  1050. if (!csocket) {
  1051. P9_EPRINTK(KERN_ERR, "p9_trans_tcp: problem creating socket\n");
  1052. err = -EIO;
  1053. goto error;
  1054. }
  1055. err = csocket->ops->connect(csocket,
  1056. (struct sockaddr *)&sin_server,
  1057. sizeof(struct sockaddr_in), 0);
  1058. if (err < 0) {
  1059. P9_EPRINTK(KERN_ERR,
  1060. "p9_trans_tcp: problem connecting socket to %s\n",
  1061. addr);
  1062. goto error;
  1063. }
  1064. err = p9_socket_open(client, csocket);
  1065. if (err < 0)
  1066. goto error;
  1067. p = (struct p9_trans_fd *) client->trans;
  1068. p->conn = p9_conn_create(client);
  1069. if (IS_ERR(p->conn)) {
  1070. err = PTR_ERR(p->conn);
  1071. p->conn = NULL;
  1072. goto error;
  1073. }
  1074. return 0;
  1075. error:
  1076. if (csocket)
  1077. sock_release(csocket);
  1078. kfree(p);
  1079. return err;
  1080. }
  1081. static int
  1082. p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
  1083. {
  1084. int err;
  1085. struct socket *csocket;
  1086. struct sockaddr_un sun_server;
  1087. struct p9_trans_fd *p = NULL; /* this gets allocated in p9_fd_open */
  1088. csocket = NULL;
  1089. if (strlen(addr) > UNIX_PATH_MAX) {
  1090. P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n",
  1091. addr);
  1092. err = -ENAMETOOLONG;
  1093. goto error;
  1094. }
  1095. sun_server.sun_family = PF_UNIX;
  1096. strcpy(sun_server.sun_path, addr);
  1097. sock_create_kern(PF_UNIX, SOCK_STREAM, 0, &csocket);
  1098. err = csocket->ops->connect(csocket, (struct sockaddr *)&sun_server,
  1099. sizeof(struct sockaddr_un) - 1, 0);
  1100. if (err < 0) {
  1101. P9_EPRINTK(KERN_ERR,
  1102. "p9_trans_unix: problem connecting socket: %s: %d\n",
  1103. addr, err);
  1104. goto error;
  1105. }
  1106. err = p9_socket_open(client, csocket);
  1107. if (err < 0)
  1108. goto error;
  1109. p = (struct p9_trans_fd *) client->trans;
  1110. p->conn = p9_conn_create(client);
  1111. if (IS_ERR(p->conn)) {
  1112. err = PTR_ERR(p->conn);
  1113. p->conn = NULL;
  1114. goto error;
  1115. }
  1116. return 0;
  1117. error:
  1118. if (csocket)
  1119. sock_release(csocket);
  1120. kfree(p);
  1121. return err;
  1122. }
  1123. static int
  1124. p9_fd_create(struct p9_client *client, const char *addr, char *args)
  1125. {
  1126. int err;
  1127. struct p9_fd_opts opts;
  1128. struct p9_trans_fd *p = NULL; /* this get allocated in p9_fd_open */
  1129. parse_opts(args, &opts);
  1130. if (opts.rfd == ~0 || opts.wfd == ~0) {
  1131. printk(KERN_ERR "v9fs: Insufficient options for proto=fd\n");
  1132. return -ENOPROTOOPT;
  1133. }
  1134. err = p9_fd_open(client, opts.rfd, opts.wfd);
  1135. if (err < 0)
  1136. goto error;
  1137. p = (struct p9_trans_fd *) client->trans;
  1138. p->conn = p9_conn_create(client);
  1139. if (IS_ERR(p->conn)) {
  1140. err = PTR_ERR(p->conn);
  1141. p->conn = NULL;
  1142. goto error;
  1143. }
  1144. return 0;
  1145. error:
  1146. kfree(p);
  1147. return err;
  1148. }
  1149. static struct p9_trans_module p9_tcp_trans = {
  1150. .name = "tcp",
  1151. .maxsize = MAX_SOCK_BUF,
  1152. .def = 1,
  1153. .create = p9_fd_create_tcp,
  1154. .close = p9_fd_close,
  1155. .rpc = p9_fd_rpc,
  1156. .owner = THIS_MODULE,
  1157. };
  1158. static struct p9_trans_module p9_unix_trans = {
  1159. .name = "unix",
  1160. .maxsize = MAX_SOCK_BUF,
  1161. .def = 0,
  1162. .create = p9_fd_create_unix,
  1163. .close = p9_fd_close,
  1164. .rpc = p9_fd_rpc,
  1165. .owner = THIS_MODULE,
  1166. };
  1167. static struct p9_trans_module p9_fd_trans = {
  1168. .name = "fd",
  1169. .maxsize = MAX_SOCK_BUF,
  1170. .def = 0,
  1171. .create = p9_fd_create,
  1172. .close = p9_fd_close,
  1173. .rpc = p9_fd_rpc,
  1174. .owner = THIS_MODULE,
  1175. };
  1176. /**
  1177. * p9_poll_proc - poll worker thread
  1178. * @a: thread state and arguments
  1179. *
  1180. * polls all v9fs transports for new events and queues the appropriate
  1181. * work to the work queue
  1182. *
  1183. */
  1184. static int p9_poll_proc(void *a)
  1185. {
  1186. unsigned long flags;
  1187. P9_DPRINTK(P9_DEBUG_MUX, "start %p\n", current);
  1188. repeat:
  1189. spin_lock_irqsave(&p9_poll_lock, flags);
  1190. while (!list_empty(&p9_poll_pending_list)) {
  1191. struct p9_conn *conn = list_first_entry(&p9_poll_pending_list,
  1192. struct p9_conn,
  1193. poll_pending_link);
  1194. list_del_init(&conn->poll_pending_link);
  1195. spin_unlock_irqrestore(&p9_poll_lock, flags);
  1196. p9_poll_mux(conn);
  1197. spin_lock_irqsave(&p9_poll_lock, flags);
  1198. }
  1199. spin_unlock_irqrestore(&p9_poll_lock, flags);
  1200. set_current_state(TASK_INTERRUPTIBLE);
  1201. if (list_empty(&p9_poll_pending_list)) {
  1202. P9_DPRINTK(P9_DEBUG_MUX, "sleeping...\n");
  1203. schedule();
  1204. }
  1205. __set_current_state(TASK_RUNNING);
  1206. if (!kthread_should_stop())
  1207. goto repeat;
  1208. P9_DPRINTK(P9_DEBUG_MUX, "finish\n");
  1209. return 0;
  1210. }
  1211. int p9_trans_fd_init(void)
  1212. {
  1213. p9_mux_wq = create_workqueue("v9fs");
  1214. if (!p9_mux_wq) {
  1215. printk(KERN_WARNING "v9fs: mux: creating workqueue failed\n");
  1216. return -ENOMEM;
  1217. }
  1218. p9_poll_task = kthread_run(p9_poll_proc, NULL, "v9fs-poll");
  1219. if (IS_ERR(p9_poll_task)) {
  1220. destroy_workqueue(p9_mux_wq);
  1221. printk(KERN_WARNING "v9fs: mux: creating poll task failed\n");
  1222. return PTR_ERR(p9_poll_task);
  1223. }
  1224. v9fs_register_trans(&p9_tcp_trans);
  1225. v9fs_register_trans(&p9_unix_trans);
  1226. v9fs_register_trans(&p9_fd_trans);
  1227. return 0;
  1228. }
  1229. void p9_trans_fd_exit(void)
  1230. {
  1231. kthread_stop(p9_poll_task);
  1232. v9fs_unregister_trans(&p9_tcp_trans);
  1233. v9fs_unregister_trans(&p9_unix_trans);
  1234. v9fs_unregister_trans(&p9_fd_trans);
  1235. destroy_workqueue(p9_mux_wq);
  1236. }