mux.c 23 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058
  1. /*
  2. * net/9p/mux.c
  3. *
  4. * Protocol Multiplexer
  5. *
  6. * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
  7. * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2
  11. * as published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to:
  20. * Free Software Foundation
  21. * 51 Franklin Street, Fifth Floor
  22. * Boston, MA 02111-1301 USA
  23. *
  24. */
  25. #include <linux/module.h>
  26. #include <linux/errno.h>
  27. #include <linux/fs.h>
  28. #include <linux/poll.h>
  29. #include <linux/kthread.h>
  30. #include <linux/idr.h>
  31. #include <linux/mutex.h>
  32. #include <net/9p/9p.h>
  33. #include <linux/parser.h>
  34. #include <net/9p/transport.h>
  35. #include <net/9p/conn.h>
  36. #define ERREQFLUSH 1
  37. #define SCHED_TIMEOUT 10
  38. #define MAXPOLLWADDR 2
  39. enum {
  40. Rworksched = 1, /* read work scheduled or running */
  41. Rpending = 2, /* can read */
  42. Wworksched = 4, /* write work scheduled or running */
  43. Wpending = 8, /* can write */
  44. };
  45. enum {
  46. None,
  47. Flushing,
  48. Flushed,
  49. };
  50. struct p9_mux_poll_task;
  51. struct p9_req {
  52. spinlock_t lock; /* protect request structure */
  53. int tag;
  54. struct p9_fcall *tcall;
  55. struct p9_fcall *rcall;
  56. int err;
  57. p9_conn_req_callback cb;
  58. void *cba;
  59. int flush;
  60. struct list_head req_list;
  61. };
  62. struct p9_conn {
  63. spinlock_t lock; /* protect lock structure */
  64. struct list_head mux_list;
  65. struct p9_mux_poll_task *poll_task;
  66. int msize;
  67. unsigned char *extended;
  68. struct p9_trans *trans;
  69. struct p9_idpool *tagpool;
  70. int err;
  71. wait_queue_head_t equeue;
  72. struct list_head req_list;
  73. struct list_head unsent_req_list;
  74. struct p9_fcall *rcall;
  75. int rpos;
  76. char *rbuf;
  77. int wpos;
  78. int wsize;
  79. char *wbuf;
  80. wait_queue_t poll_wait[MAXPOLLWADDR];
  81. wait_queue_head_t *poll_waddr[MAXPOLLWADDR];
  82. poll_table pt;
  83. struct work_struct rq;
  84. struct work_struct wq;
  85. unsigned long wsched;
  86. };
  87. struct p9_mux_poll_task {
  88. struct task_struct *task;
  89. struct list_head mux_list;
  90. int muxnum;
  91. };
  92. struct p9_mux_rpc {
  93. struct p9_conn *m;
  94. int err;
  95. struct p9_fcall *tcall;
  96. struct p9_fcall *rcall;
  97. wait_queue_head_t wqueue;
  98. };
  99. static int p9_poll_proc(void *);
  100. static void p9_read_work(struct work_struct *work);
  101. static void p9_write_work(struct work_struct *work);
  102. static void p9_pollwait(struct file *filp, wait_queue_head_t *wait_address,
  103. poll_table * p);
  104. static u16 p9_mux_get_tag(struct p9_conn *);
  105. static void p9_mux_put_tag(struct p9_conn *, u16);
  106. static DEFINE_MUTEX(p9_mux_task_lock);
  107. static struct workqueue_struct *p9_mux_wq;
  108. static int p9_mux_num;
  109. static int p9_mux_poll_task_num;
  110. static struct p9_mux_poll_task p9_mux_poll_tasks[100];
  111. int p9_mux_global_init(void)
  112. {
  113. int i;
  114. for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++)
  115. p9_mux_poll_tasks[i].task = NULL;
  116. p9_mux_wq = create_workqueue("v9fs");
  117. if (!p9_mux_wq) {
  118. printk(KERN_WARNING "v9fs: mux: creating workqueue failed\n");
  119. return -ENOMEM;
  120. }
  121. return 0;
  122. }
  123. void p9_mux_global_exit(void)
  124. {
  125. destroy_workqueue(p9_mux_wq);
  126. }
  127. /**
  128. * p9_mux_calc_poll_procs - calculates the number of polling procs
  129. * based on the number of mounted v9fs filesystems.
  130. *
  131. * The current implementation returns sqrt of the number of mounts.
  132. */
  133. static int p9_mux_calc_poll_procs(int muxnum)
  134. {
  135. int n;
  136. if (p9_mux_poll_task_num)
  137. n = muxnum / p9_mux_poll_task_num +
  138. (muxnum % p9_mux_poll_task_num ? 1 : 0);
  139. else
  140. n = 1;
  141. if (n > ARRAY_SIZE(p9_mux_poll_tasks))
  142. n = ARRAY_SIZE(p9_mux_poll_tasks);
  143. return n;
  144. }
  145. static int p9_mux_poll_start(struct p9_conn *m)
  146. {
  147. int i, n;
  148. struct p9_mux_poll_task *vpt, *vptlast;
  149. struct task_struct *pproc;
  150. P9_DPRINTK(P9_DEBUG_MUX, "mux %p muxnum %d procnum %d\n", m, p9_mux_num,
  151. p9_mux_poll_task_num);
  152. mutex_lock(&p9_mux_task_lock);
  153. n = p9_mux_calc_poll_procs(p9_mux_num + 1);
  154. if (n > p9_mux_poll_task_num) {
  155. for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++) {
  156. if (p9_mux_poll_tasks[i].task == NULL) {
  157. vpt = &p9_mux_poll_tasks[i];
  158. P9_DPRINTK(P9_DEBUG_MUX, "create proc %p\n",
  159. vpt);
  160. pproc = kthread_create(p9_poll_proc, vpt,
  161. "v9fs-poll");
  162. if (!IS_ERR(pproc)) {
  163. vpt->task = pproc;
  164. INIT_LIST_HEAD(&vpt->mux_list);
  165. vpt->muxnum = 0;
  166. p9_mux_poll_task_num++;
  167. wake_up_process(vpt->task);
  168. }
  169. break;
  170. }
  171. }
  172. if (i >= ARRAY_SIZE(p9_mux_poll_tasks))
  173. P9_DPRINTK(P9_DEBUG_ERROR,
  174. "warning: no free poll slots\n");
  175. }
  176. n = (p9_mux_num + 1) / p9_mux_poll_task_num +
  177. ((p9_mux_num + 1) % p9_mux_poll_task_num ? 1 : 0);
  178. vptlast = NULL;
  179. for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++) {
  180. vpt = &p9_mux_poll_tasks[i];
  181. if (vpt->task != NULL) {
  182. vptlast = vpt;
  183. if (vpt->muxnum < n) {
  184. P9_DPRINTK(P9_DEBUG_MUX, "put in proc %d\n", i);
  185. list_add(&m->mux_list, &vpt->mux_list);
  186. vpt->muxnum++;
  187. m->poll_task = vpt;
  188. memset(&m->poll_waddr, 0,
  189. sizeof(m->poll_waddr));
  190. init_poll_funcptr(&m->pt, p9_pollwait);
  191. break;
  192. }
  193. }
  194. }
  195. if (i >= ARRAY_SIZE(p9_mux_poll_tasks)) {
  196. if (vptlast == NULL)
  197. return -ENOMEM;
  198. P9_DPRINTK(P9_DEBUG_MUX, "put in proc %d\n", i);
  199. list_add(&m->mux_list, &vptlast->mux_list);
  200. vptlast->muxnum++;
  201. m->poll_task = vptlast;
  202. memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
  203. init_poll_funcptr(&m->pt, p9_pollwait);
  204. }
  205. p9_mux_num++;
  206. mutex_unlock(&p9_mux_task_lock);
  207. return 0;
  208. }
  209. static void p9_mux_poll_stop(struct p9_conn *m)
  210. {
  211. int i;
  212. struct p9_mux_poll_task *vpt;
  213. mutex_lock(&p9_mux_task_lock);
  214. vpt = m->poll_task;
  215. list_del(&m->mux_list);
  216. for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) {
  217. if (m->poll_waddr[i] != NULL) {
  218. remove_wait_queue(m->poll_waddr[i], &m->poll_wait[i]);
  219. m->poll_waddr[i] = NULL;
  220. }
  221. }
  222. vpt->muxnum--;
  223. if (!vpt->muxnum) {
  224. P9_DPRINTK(P9_DEBUG_MUX, "destroy proc %p\n", vpt);
  225. kthread_stop(vpt->task);
  226. vpt->task = NULL;
  227. p9_mux_poll_task_num--;
  228. }
  229. p9_mux_num--;
  230. mutex_unlock(&p9_mux_task_lock);
  231. }
  232. /**
  233. * p9_conn_create - allocate and initialize the per-session mux data
  234. * Creates the polling task if this is the first session.
  235. *
  236. * @trans - transport structure
  237. * @msize - maximum message size
  238. * @extended - pointer to the extended flag
  239. */
  240. struct p9_conn *p9_conn_create(struct p9_trans *trans, int msize,
  241. unsigned char *extended)
  242. {
  243. int i, n;
  244. struct p9_conn *m, *mtmp;
  245. P9_DPRINTK(P9_DEBUG_MUX, "transport %p msize %d\n", trans, msize);
  246. m = kmalloc(sizeof(struct p9_conn), GFP_KERNEL);
  247. if (!m)
  248. return ERR_PTR(-ENOMEM);
  249. spin_lock_init(&m->lock);
  250. INIT_LIST_HEAD(&m->mux_list);
  251. m->msize = msize;
  252. m->extended = extended;
  253. m->trans = trans;
  254. m->tagpool = p9_idpool_create();
  255. if (IS_ERR(m->tagpool)) {
  256. mtmp = ERR_PTR(-ENOMEM);
  257. kfree(m);
  258. return mtmp;
  259. }
  260. m->err = 0;
  261. init_waitqueue_head(&m->equeue);
  262. INIT_LIST_HEAD(&m->req_list);
  263. INIT_LIST_HEAD(&m->unsent_req_list);
  264. m->rcall = NULL;
  265. m->rpos = 0;
  266. m->rbuf = NULL;
  267. m->wpos = m->wsize = 0;
  268. m->wbuf = NULL;
  269. INIT_WORK(&m->rq, p9_read_work);
  270. INIT_WORK(&m->wq, p9_write_work);
  271. m->wsched = 0;
  272. memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
  273. m->poll_task = NULL;
  274. n = p9_mux_poll_start(m);
  275. if (n) {
  276. kfree(m);
  277. return ERR_PTR(n);
  278. }
  279. n = trans->poll(trans, &m->pt);
  280. if (n & POLLIN) {
  281. P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m);
  282. set_bit(Rpending, &m->wsched);
  283. }
  284. if (n & POLLOUT) {
  285. P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m);
  286. set_bit(Wpending, &m->wsched);
  287. }
  288. for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) {
  289. if (IS_ERR(m->poll_waddr[i])) {
  290. p9_mux_poll_stop(m);
  291. mtmp = (void *)m->poll_waddr; /* the error code */
  292. kfree(m);
  293. m = mtmp;
  294. break;
  295. }
  296. }
  297. return m;
  298. }
  299. EXPORT_SYMBOL(p9_conn_create);
  300. /**
  301. * p9_mux_destroy - cancels all pending requests and frees mux resources
  302. */
  303. void p9_conn_destroy(struct p9_conn *m)
  304. {
  305. P9_DPRINTK(P9_DEBUG_MUX, "mux %p prev %p next %p\n", m,
  306. m->mux_list.prev, m->mux_list.next);
  307. p9_conn_cancel(m, -ECONNRESET);
  308. if (!list_empty(&m->req_list)) {
  309. /* wait until all processes waiting on this session exit */
  310. P9_DPRINTK(P9_DEBUG_MUX,
  311. "mux %p waiting for empty request queue\n", m);
  312. wait_event_timeout(m->equeue, (list_empty(&m->req_list)), 5000);
  313. P9_DPRINTK(P9_DEBUG_MUX, "mux %p request queue empty: %d\n", m,
  314. list_empty(&m->req_list));
  315. }
  316. p9_mux_poll_stop(m);
  317. m->trans = NULL;
  318. p9_idpool_destroy(m->tagpool);
  319. kfree(m);
  320. }
  321. EXPORT_SYMBOL(p9_conn_destroy);
  322. /**
  323. * p9_pollwait - called by files poll operation to add v9fs-poll task
  324. * to files wait queue
  325. */
  326. static void
  327. p9_pollwait(struct file *filp, wait_queue_head_t *wait_address,
  328. poll_table * p)
  329. {
  330. int i;
  331. struct p9_conn *m;
  332. m = container_of(p, struct p9_conn, pt);
  333. for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++)
  334. if (m->poll_waddr[i] == NULL)
  335. break;
  336. if (i >= ARRAY_SIZE(m->poll_waddr)) {
  337. P9_DPRINTK(P9_DEBUG_ERROR, "not enough wait_address slots\n");
  338. return;
  339. }
  340. m->poll_waddr[i] = wait_address;
  341. if (!wait_address) {
  342. P9_DPRINTK(P9_DEBUG_ERROR, "no wait_address\n");
  343. m->poll_waddr[i] = ERR_PTR(-EIO);
  344. return;
  345. }
  346. init_waitqueue_entry(&m->poll_wait[i], m->poll_task->task);
  347. add_wait_queue(wait_address, &m->poll_wait[i]);
  348. }
  349. /**
  350. * p9_poll_mux - polls a mux and schedules read or write works if necessary
  351. */
  352. static void p9_poll_mux(struct p9_conn *m)
  353. {
  354. int n;
  355. if (m->err < 0)
  356. return;
  357. n = m->trans->poll(m->trans, NULL);
  358. if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) {
  359. P9_DPRINTK(P9_DEBUG_MUX, "error mux %p err %d\n", m, n);
  360. if (n >= 0)
  361. n = -ECONNRESET;
  362. p9_conn_cancel(m, n);
  363. }
  364. if (n & POLLIN) {
  365. set_bit(Rpending, &m->wsched);
  366. P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m);
  367. if (!test_and_set_bit(Rworksched, &m->wsched)) {
  368. P9_DPRINTK(P9_DEBUG_MUX, "schedule read work %p\n", m);
  369. queue_work(p9_mux_wq, &m->rq);
  370. }
  371. }
  372. if (n & POLLOUT) {
  373. set_bit(Wpending, &m->wsched);
  374. P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m);
  375. if ((m->wsize || !list_empty(&m->unsent_req_list))
  376. && !test_and_set_bit(Wworksched, &m->wsched)) {
  377. P9_DPRINTK(P9_DEBUG_MUX, "schedule write work %p\n", m);
  378. queue_work(p9_mux_wq, &m->wq);
  379. }
  380. }
  381. }
  382. /**
  383. * p9_poll_proc - polls all v9fs transports for new events and queues
  384. * the appropriate work to the work queue
  385. */
  386. static int p9_poll_proc(void *a)
  387. {
  388. struct p9_conn *m, *mtmp;
  389. struct p9_mux_poll_task *vpt;
  390. vpt = a;
  391. P9_DPRINTK(P9_DEBUG_MUX, "start %p %p\n", current, vpt);
  392. while (!kthread_should_stop()) {
  393. set_current_state(TASK_INTERRUPTIBLE);
  394. list_for_each_entry_safe(m, mtmp, &vpt->mux_list, mux_list) {
  395. p9_poll_mux(m);
  396. }
  397. P9_DPRINTK(P9_DEBUG_MUX, "sleeping...\n");
  398. schedule_timeout(SCHED_TIMEOUT * HZ);
  399. }
  400. __set_current_state(TASK_RUNNING);
  401. P9_DPRINTK(P9_DEBUG_MUX, "finish\n");
  402. return 0;
  403. }
  404. /**
  405. * p9_write_work - called when a transport can send some data
  406. */
  407. static void p9_write_work(struct work_struct *work)
  408. {
  409. int n, err;
  410. struct p9_conn *m;
  411. struct p9_req *req;
  412. m = container_of(work, struct p9_conn, wq);
  413. if (m->err < 0) {
  414. clear_bit(Wworksched, &m->wsched);
  415. return;
  416. }
  417. if (!m->wsize) {
  418. if (list_empty(&m->unsent_req_list)) {
  419. clear_bit(Wworksched, &m->wsched);
  420. return;
  421. }
  422. spin_lock(&m->lock);
  423. again:
  424. req = list_entry(m->unsent_req_list.next, struct p9_req,
  425. req_list);
  426. list_move_tail(&req->req_list, &m->req_list);
  427. if (req->err == ERREQFLUSH)
  428. goto again;
  429. m->wbuf = req->tcall->sdata;
  430. m->wsize = req->tcall->size;
  431. m->wpos = 0;
  432. spin_unlock(&m->lock);
  433. }
  434. P9_DPRINTK(P9_DEBUG_MUX, "mux %p pos %d size %d\n", m, m->wpos,
  435. m->wsize);
  436. clear_bit(Wpending, &m->wsched);
  437. err = m->trans->write(m->trans, m->wbuf + m->wpos, m->wsize - m->wpos);
  438. P9_DPRINTK(P9_DEBUG_MUX, "mux %p sent %d bytes\n", m, err);
  439. if (err == -EAGAIN) {
  440. clear_bit(Wworksched, &m->wsched);
  441. return;
  442. }
  443. if (err < 0)
  444. goto error;
  445. else if (err == 0) {
  446. err = -EREMOTEIO;
  447. goto error;
  448. }
  449. m->wpos += err;
  450. if (m->wpos == m->wsize)
  451. m->wpos = m->wsize = 0;
  452. if (m->wsize == 0 && !list_empty(&m->unsent_req_list)) {
  453. if (test_and_clear_bit(Wpending, &m->wsched))
  454. n = POLLOUT;
  455. else
  456. n = m->trans->poll(m->trans, NULL);
  457. if (n & POLLOUT) {
  458. P9_DPRINTK(P9_DEBUG_MUX, "schedule write work %p\n", m);
  459. queue_work(p9_mux_wq, &m->wq);
  460. } else
  461. clear_bit(Wworksched, &m->wsched);
  462. } else
  463. clear_bit(Wworksched, &m->wsched);
  464. return;
  465. error:
  466. p9_conn_cancel(m, err);
  467. clear_bit(Wworksched, &m->wsched);
  468. }
  469. static void process_request(struct p9_conn *m, struct p9_req *req)
  470. {
  471. int ecode;
  472. struct p9_str *ename;
  473. if (!req->err && req->rcall->id == P9_RERROR) {
  474. ecode = req->rcall->params.rerror.errno;
  475. ename = &req->rcall->params.rerror.error;
  476. P9_DPRINTK(P9_DEBUG_MUX, "Rerror %.*s\n", ename->len,
  477. ename->str);
  478. if (*m->extended)
  479. req->err = -ecode;
  480. if (!req->err) {
  481. req->err = p9_errstr2errno(ename->str, ename->len);
  482. if (!req->err) { /* string match failed */
  483. PRINT_FCALL_ERROR("unknown error", req->rcall);
  484. }
  485. if (!req->err)
  486. req->err = -ESERVERFAULT;
  487. }
  488. } else if (req->tcall && req->rcall->id != req->tcall->id + 1) {
  489. P9_DPRINTK(P9_DEBUG_ERROR,
  490. "fcall mismatch: expected %d, got %d\n",
  491. req->tcall->id + 1, req->rcall->id);
  492. if (!req->err)
  493. req->err = -EIO;
  494. }
  495. }
  496. /**
  497. * p9_read_work - called when there is some data to be read from a transport
  498. */
  499. static void p9_read_work(struct work_struct *work)
  500. {
  501. int n, err;
  502. struct p9_conn *m;
  503. struct p9_req *req, *rptr, *rreq;
  504. struct p9_fcall *rcall;
  505. char *rbuf;
  506. m = container_of(work, struct p9_conn, rq);
  507. if (m->err < 0)
  508. return;
  509. rcall = NULL;
  510. P9_DPRINTK(P9_DEBUG_MUX, "start mux %p pos %d\n", m, m->rpos);
  511. if (!m->rcall) {
  512. m->rcall =
  513. kmalloc(sizeof(struct p9_fcall) + m->msize, GFP_KERNEL);
  514. if (!m->rcall) {
  515. err = -ENOMEM;
  516. goto error;
  517. }
  518. m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall);
  519. m->rpos = 0;
  520. }
  521. clear_bit(Rpending, &m->wsched);
  522. err = m->trans->read(m->trans, m->rbuf + m->rpos, m->msize - m->rpos);
  523. P9_DPRINTK(P9_DEBUG_MUX, "mux %p got %d bytes\n", m, err);
  524. if (err == -EAGAIN) {
  525. clear_bit(Rworksched, &m->wsched);
  526. return;
  527. }
  528. if (err <= 0)
  529. goto error;
  530. m->rpos += err;
  531. while (m->rpos > 4) {
  532. n = le32_to_cpu(*(__le32 *) m->rbuf);
  533. if (n >= m->msize) {
  534. P9_DPRINTK(P9_DEBUG_ERROR,
  535. "requested packet size too big: %d\n", n);
  536. err = -EIO;
  537. goto error;
  538. }
  539. if (m->rpos < n)
  540. break;
  541. err =
  542. p9_deserialize_fcall(m->rbuf, n, m->rcall, *m->extended);
  543. if (err < 0) {
  544. goto error;
  545. }
  546. #ifdef CONFIG_NET_9P_DEBUG
  547. if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) {
  548. char buf[150];
  549. p9_printfcall(buf, sizeof(buf), m->rcall,
  550. *m->extended);
  551. printk(KERN_NOTICE ">>> %p %s\n", m, buf);
  552. }
  553. #endif
  554. rcall = m->rcall;
  555. rbuf = m->rbuf;
  556. if (m->rpos > n) {
  557. m->rcall = kmalloc(sizeof(struct p9_fcall) + m->msize,
  558. GFP_KERNEL);
  559. if (!m->rcall) {
  560. err = -ENOMEM;
  561. goto error;
  562. }
  563. m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall);
  564. memmove(m->rbuf, rbuf + n, m->rpos - n);
  565. m->rpos -= n;
  566. } else {
  567. m->rcall = NULL;
  568. m->rbuf = NULL;
  569. m->rpos = 0;
  570. }
  571. P9_DPRINTK(P9_DEBUG_MUX, "mux %p fcall id %d tag %d\n", m,
  572. rcall->id, rcall->tag);
  573. req = NULL;
  574. spin_lock(&m->lock);
  575. list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
  576. if (rreq->tag == rcall->tag) {
  577. req = rreq;
  578. if (req->flush != Flushing)
  579. list_del(&req->req_list);
  580. break;
  581. }
  582. }
  583. spin_unlock(&m->lock);
  584. if (req) {
  585. req->rcall = rcall;
  586. process_request(m, req);
  587. if (req->flush != Flushing) {
  588. if (req->cb)
  589. (*req->cb) (req, req->cba);
  590. else
  591. kfree(req->rcall);
  592. wake_up(&m->equeue);
  593. }
  594. } else {
  595. if (err >= 0 && rcall->id != P9_RFLUSH)
  596. P9_DPRINTK(P9_DEBUG_ERROR,
  597. "unexpected response mux %p id %d tag %d\n",
  598. m, rcall->id, rcall->tag);
  599. kfree(rcall);
  600. }
  601. }
  602. if (!list_empty(&m->req_list)) {
  603. if (test_and_clear_bit(Rpending, &m->wsched))
  604. n = POLLIN;
  605. else
  606. n = m->trans->poll(m->trans, NULL);
  607. if (n & POLLIN) {
  608. P9_DPRINTK(P9_DEBUG_MUX, "schedule read work %p\n", m);
  609. queue_work(p9_mux_wq, &m->rq);
  610. } else
  611. clear_bit(Rworksched, &m->wsched);
  612. } else
  613. clear_bit(Rworksched, &m->wsched);
  614. return;
  615. error:
  616. p9_conn_cancel(m, err);
  617. clear_bit(Rworksched, &m->wsched);
  618. }
  619. /**
  620. * p9_send_request - send 9P request
  621. * The function can sleep until the request is scheduled for sending.
  622. * The function can be interrupted. Return from the function is not
  623. * a guarantee that the request is sent successfully. Can return errors
  624. * that can be retrieved by PTR_ERR macros.
  625. *
  626. * @m: mux data
  627. * @tc: request to be sent
  628. * @cb: callback function to call when response is received
  629. * @cba: parameter to pass to the callback function
  630. */
  631. static struct p9_req *p9_send_request(struct p9_conn *m,
  632. struct p9_fcall *tc,
  633. p9_conn_req_callback cb, void *cba)
  634. {
  635. int n;
  636. struct p9_req *req;
  637. P9_DPRINTK(P9_DEBUG_MUX, "mux %p task %p tcall %p id %d\n", m, current,
  638. tc, tc->id);
  639. if (m->err < 0)
  640. return ERR_PTR(m->err);
  641. req = kmalloc(sizeof(struct p9_req), GFP_KERNEL);
  642. if (!req)
  643. return ERR_PTR(-ENOMEM);
  644. if (tc->id == P9_TVERSION)
  645. n = P9_NOTAG;
  646. else
  647. n = p9_mux_get_tag(m);
  648. if (n < 0)
  649. return ERR_PTR(-ENOMEM);
  650. p9_set_tag(tc, n);
  651. #ifdef CONFIG_NET_9P_DEBUG
  652. if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) {
  653. char buf[150];
  654. p9_printfcall(buf, sizeof(buf), tc, *m->extended);
  655. printk(KERN_NOTICE "<<< %p %s\n", m, buf);
  656. }
  657. #endif
  658. spin_lock_init(&req->lock);
  659. req->tag = n;
  660. req->tcall = tc;
  661. req->rcall = NULL;
  662. req->err = 0;
  663. req->cb = cb;
  664. req->cba = cba;
  665. req->flush = None;
  666. spin_lock(&m->lock);
  667. list_add_tail(&req->req_list, &m->unsent_req_list);
  668. spin_unlock(&m->lock);
  669. if (test_and_clear_bit(Wpending, &m->wsched))
  670. n = POLLOUT;
  671. else
  672. n = m->trans->poll(m->trans, NULL);
  673. if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
  674. queue_work(p9_mux_wq, &m->wq);
  675. return req;
  676. }
  677. static void p9_mux_free_request(struct p9_conn *m, struct p9_req *req)
  678. {
  679. p9_mux_put_tag(m, req->tag);
  680. kfree(req);
  681. }
  682. static void p9_mux_flush_cb(struct p9_req *freq, void *a)
  683. {
  684. p9_conn_req_callback cb;
  685. int tag;
  686. struct p9_conn *m;
  687. struct p9_req *req, *rreq, *rptr;
  688. m = a;
  689. P9_DPRINTK(P9_DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m,
  690. freq->tcall, freq->rcall, freq->err,
  691. freq->tcall->params.tflush.oldtag);
  692. spin_lock(&m->lock);
  693. cb = NULL;
  694. tag = freq->tcall->params.tflush.oldtag;
  695. req = NULL;
  696. list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
  697. if (rreq->tag == tag) {
  698. req = rreq;
  699. list_del(&req->req_list);
  700. break;
  701. }
  702. }
  703. spin_unlock(&m->lock);
  704. if (req) {
  705. spin_lock(&req->lock);
  706. req->flush = Flushed;
  707. spin_unlock(&req->lock);
  708. if (req->cb)
  709. (*req->cb) (req, req->cba);
  710. else
  711. kfree(req->rcall);
  712. wake_up(&m->equeue);
  713. }
  714. kfree(freq->tcall);
  715. kfree(freq->rcall);
  716. p9_mux_free_request(m, freq);
  717. }
  718. static int
  719. p9_mux_flush_request(struct p9_conn *m, struct p9_req *req)
  720. {
  721. struct p9_fcall *fc;
  722. struct p9_req *rreq, *rptr;
  723. P9_DPRINTK(P9_DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag);
  724. /* if a response was received for a request, do nothing */
  725. spin_lock(&req->lock);
  726. if (req->rcall || req->err) {
  727. spin_unlock(&req->lock);
  728. P9_DPRINTK(P9_DEBUG_MUX,
  729. "mux %p req %p response already received\n", m, req);
  730. return 0;
  731. }
  732. req->flush = Flushing;
  733. spin_unlock(&req->lock);
  734. spin_lock(&m->lock);
  735. /* if the request is not sent yet, just remove it from the list */
  736. list_for_each_entry_safe(rreq, rptr, &m->unsent_req_list, req_list) {
  737. if (rreq->tag == req->tag) {
  738. P9_DPRINTK(P9_DEBUG_MUX,
  739. "mux %p req %p request is not sent yet\n", m, req);
  740. list_del(&rreq->req_list);
  741. req->flush = Flushed;
  742. spin_unlock(&m->lock);
  743. if (req->cb)
  744. (*req->cb) (req, req->cba);
  745. return 0;
  746. }
  747. }
  748. spin_unlock(&m->lock);
  749. clear_thread_flag(TIF_SIGPENDING);
  750. fc = p9_create_tflush(req->tag);
  751. p9_send_request(m, fc, p9_mux_flush_cb, m);
  752. return 1;
  753. }
  754. static void
  755. p9_conn_rpc_cb(struct p9_req *req, void *a)
  756. {
  757. struct p9_mux_rpc *r;
  758. P9_DPRINTK(P9_DEBUG_MUX, "req %p r %p\n", req, a);
  759. r = a;
  760. r->rcall = req->rcall;
  761. r->err = req->err;
  762. if (req->flush != None && !req->err)
  763. r->err = -ERESTARTSYS;
  764. wake_up(&r->wqueue);
  765. }
  766. /**
  767. * p9_mux_rpc - sends 9P request and waits until a response is available.
  768. * The function can be interrupted.
  769. * @m: mux data
  770. * @tc: request to be sent
  771. * @rc: pointer where a pointer to the response is stored
  772. */
  773. int
  774. p9_conn_rpc(struct p9_conn *m, struct p9_fcall *tc,
  775. struct p9_fcall **rc)
  776. {
  777. int err, sigpending;
  778. unsigned long flags;
  779. struct p9_req *req;
  780. struct p9_mux_rpc r;
  781. r.err = 0;
  782. r.tcall = tc;
  783. r.rcall = NULL;
  784. r.m = m;
  785. init_waitqueue_head(&r.wqueue);
  786. if (rc)
  787. *rc = NULL;
  788. sigpending = 0;
  789. if (signal_pending(current)) {
  790. sigpending = 1;
  791. clear_thread_flag(TIF_SIGPENDING);
  792. }
  793. req = p9_send_request(m, tc, p9_conn_rpc_cb, &r);
  794. if (IS_ERR(req)) {
  795. err = PTR_ERR(req);
  796. P9_DPRINTK(P9_DEBUG_MUX, "error %d\n", err);
  797. return err;
  798. }
  799. err = wait_event_interruptible(r.wqueue, r.rcall != NULL || r.err < 0);
  800. if (r.err < 0)
  801. err = r.err;
  802. if (err == -ERESTARTSYS && m->trans->status == Connected
  803. && m->err == 0) {
  804. if (p9_mux_flush_request(m, req)) {
  805. /* wait until we get response of the flush message */
  806. do {
  807. clear_thread_flag(TIF_SIGPENDING);
  808. err = wait_event_interruptible(r.wqueue,
  809. r.rcall || r.err);
  810. } while (!r.rcall && !r.err && err == -ERESTARTSYS &&
  811. m->trans->status == Connected && !m->err);
  812. err = -ERESTARTSYS;
  813. }
  814. sigpending = 1;
  815. }
  816. if (sigpending) {
  817. spin_lock_irqsave(&current->sighand->siglock, flags);
  818. recalc_sigpending();
  819. spin_unlock_irqrestore(&current->sighand->siglock, flags);
  820. }
  821. if (rc)
  822. *rc = r.rcall;
  823. else
  824. kfree(r.rcall);
  825. p9_mux_free_request(m, req);
  826. if (err > 0)
  827. err = -EIO;
  828. return err;
  829. }
  830. EXPORT_SYMBOL(p9_conn_rpc);
  831. #ifdef P9_NONBLOCK
  832. /**
  833. * p9_conn_rpcnb - sends 9P request without waiting for response.
  834. * @m: mux data
  835. * @tc: request to be sent
  836. * @cb: callback function to be called when response arrives
  837. * @cba: value to pass to the callback function
  838. */
  839. int p9_conn_rpcnb(struct p9_conn *m, struct p9_fcall *tc,
  840. p9_conn_req_callback cb, void *a)
  841. {
  842. int err;
  843. struct p9_req *req;
  844. req = p9_send_request(m, tc, cb, a);
  845. if (IS_ERR(req)) {
  846. err = PTR_ERR(req);
  847. P9_DPRINTK(P9_DEBUG_MUX, "error %d\n", err);
  848. return PTR_ERR(req);
  849. }
  850. P9_DPRINTK(P9_DEBUG_MUX, "mux %p tc %p tag %d\n", m, tc, req->tag);
  851. return 0;
  852. }
  853. EXPORT_SYMBOL(p9_conn_rpcnb);
  854. #endif /* P9_NONBLOCK */
  855. /**
  856. * p9_conn_cancel - cancel all pending requests with error
  857. * @m: mux data
  858. * @err: error code
  859. */
  860. void p9_conn_cancel(struct p9_conn *m, int err)
  861. {
  862. struct p9_req *req, *rtmp;
  863. LIST_HEAD(cancel_list);
  864. P9_DPRINTK(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
  865. m->err = err;
  866. spin_lock(&m->lock);
  867. list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
  868. list_move(&req->req_list, &cancel_list);
  869. }
  870. list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
  871. list_move(&req->req_list, &cancel_list);
  872. }
  873. spin_unlock(&m->lock);
  874. list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
  875. list_del(&req->req_list);
  876. if (!req->err)
  877. req->err = err;
  878. if (req->cb)
  879. (*req->cb) (req, req->cba);
  880. else
  881. kfree(req->rcall);
  882. }
  883. wake_up(&m->equeue);
  884. }
  885. EXPORT_SYMBOL(p9_conn_cancel);
  886. static u16 p9_mux_get_tag(struct p9_conn *m)
  887. {
  888. int tag;
  889. tag = p9_idpool_get(m->tagpool);
  890. if (tag < 0)
  891. return P9_NOTAG;
  892. else
  893. return (u16) tag;
  894. }
  895. static void p9_mux_put_tag(struct p9_conn *m, u16 tag)
  896. {
  897. if (tag != P9_NOTAG && p9_idpool_check(tag, m->tagpool))
  898. p9_idpool_put(tag, m->tagpool);
  899. }