request.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823
  1. /*
  2. * request.c
  3. *
  4. * Copyright (C) 2001 by Urban Widmark
  5. *
  6. * Please add a note about your changes to smbfs in the ChangeLog file.
  7. */
  8. #include <linux/types.h>
  9. #include <linux/fs.h>
  10. #include <linux/slab.h>
  11. #include <linux/net.h>
  12. #include <linux/smb_fs.h>
  13. #include <linux/smbno.h>
  14. #include <linux/smb_mount.h>
  15. #include "smb_debug.h"
  16. #include "request.h"
  17. #include "proto.h"
  18. /* #define SMB_SLAB_DEBUG (SLAB_RED_ZONE | SLAB_POISON) */
  19. #define SMB_SLAB_DEBUG 0
  20. #define ROUND_UP(x) (((x)+3) & ~3)
  21. /* cache for request structures */
  22. static kmem_cache_t *req_cachep;
  23. static int smb_request_send_req(struct smb_request *req);
  24. /*
  25. /proc/slabinfo:
  26. name, active, num, objsize, active_slabs, num_slaps, #pages
  27. */
  28. int smb_init_request_cache(void)
  29. {
  30. req_cachep = kmem_cache_create("smb_request",
  31. sizeof(struct smb_request), 0,
  32. SMB_SLAB_DEBUG | SLAB_HWCACHE_ALIGN,
  33. NULL, NULL);
  34. if (req_cachep == NULL)
  35. return -ENOMEM;
  36. return 0;
  37. }
  38. void smb_destroy_request_cache(void)
  39. {
  40. if (kmem_cache_destroy(req_cachep))
  41. printk(KERN_INFO "smb_destroy_request_cache: not all structures were freed\n");
  42. }
  43. /*
  44. * Allocate and initialise a request structure
  45. */
  46. static struct smb_request *smb_do_alloc_request(struct smb_sb_info *server,
  47. int bufsize)
  48. {
  49. struct smb_request *req;
  50. unsigned char *buf = NULL;
  51. req = kmem_cache_alloc(req_cachep, SLAB_KERNEL);
  52. VERBOSE("allocating request: %p\n", req);
  53. if (!req)
  54. goto out;
  55. if (bufsize > 0) {
  56. buf = smb_kmalloc(bufsize, GFP_NOFS);
  57. if (!buf) {
  58. kmem_cache_free(req_cachep, req);
  59. return NULL;
  60. }
  61. }
  62. memset(req, 0, sizeof(struct smb_request));
  63. req->rq_buffer = buf;
  64. req->rq_bufsize = bufsize;
  65. req->rq_server = server;
  66. init_waitqueue_head(&req->rq_wait);
  67. INIT_LIST_HEAD(&req->rq_queue);
  68. atomic_set(&req->rq_count, 1);
  69. out:
  70. return req;
  71. }
  72. struct smb_request *smb_alloc_request(struct smb_sb_info *server, int bufsize)
  73. {
  74. struct smb_request *req = NULL;
  75. for (;;) {
  76. atomic_inc(&server->nr_requests);
  77. if (atomic_read(&server->nr_requests) <= MAX_REQUEST_HARD) {
  78. req = smb_do_alloc_request(server, bufsize);
  79. if (req != NULL)
  80. break;
  81. }
  82. #if 0
  83. /*
  84. * Try to free up at least one request in order to stay
  85. * below the hard limit
  86. */
  87. if (nfs_try_to_free_pages(server))
  88. continue;
  89. if (signalled() && (server->flags & NFS_MOUNT_INTR))
  90. return ERR_PTR(-ERESTARTSYS);
  91. current->policy = SCHED_YIELD;
  92. schedule();
  93. #else
  94. /* FIXME: we want something like nfs does above, but that
  95. requires changes to all callers and can wait. */
  96. break;
  97. #endif
  98. }
  99. return req;
  100. }
  101. static void smb_free_request(struct smb_request *req)
  102. {
  103. atomic_dec(&req->rq_server->nr_requests);
  104. if (req->rq_buffer && !(req->rq_flags & SMB_REQ_STATIC))
  105. smb_kfree(req->rq_buffer);
  106. if (req->rq_trans2buffer)
  107. smb_kfree(req->rq_trans2buffer);
  108. kmem_cache_free(req_cachep, req);
  109. }
  110. /*
  111. * What prevents a rget to race with a rput? The count must never drop to zero
  112. * while it is in use. Only rput if it is ok that it is free'd.
  113. */
  114. static void smb_rget(struct smb_request *req)
  115. {
  116. atomic_inc(&req->rq_count);
  117. }
  118. void smb_rput(struct smb_request *req)
  119. {
  120. if (atomic_dec_and_test(&req->rq_count)) {
  121. list_del_init(&req->rq_queue);
  122. smb_free_request(req);
  123. }
  124. }
  125. /* setup to receive the data part of the SMB */
  126. static int smb_setup_bcc(struct smb_request *req)
  127. {
  128. int result = 0;
  129. req->rq_rlen = smb_len(req->rq_header) + 4 - req->rq_bytes_recvd;
  130. if (req->rq_rlen > req->rq_bufsize) {
  131. PARANOIA("Packet too large %d > %d\n",
  132. req->rq_rlen, req->rq_bufsize);
  133. return -ENOBUFS;
  134. }
  135. req->rq_iov[0].iov_base = req->rq_buffer;
  136. req->rq_iov[0].iov_len = req->rq_rlen;
  137. req->rq_iovlen = 1;
  138. return result;
  139. }
  140. /*
  141. * Prepare a "normal" request structure.
  142. */
  143. static int smb_setup_request(struct smb_request *req)
  144. {
  145. int len = smb_len(req->rq_header) + 4;
  146. req->rq_slen = len;
  147. /* if we expect a data part in the reply we set the iov's to read it */
  148. if (req->rq_resp_bcc)
  149. req->rq_setup_read = smb_setup_bcc;
  150. /* This tries to support re-using the same request */
  151. req->rq_bytes_sent = 0;
  152. req->rq_rcls = 0;
  153. req->rq_err = 0;
  154. req->rq_errno = 0;
  155. req->rq_fragment = 0;
  156. if (req->rq_trans2buffer)
  157. smb_kfree(req->rq_trans2buffer);
  158. return 0;
  159. }
  160. /*
  161. * Prepare a transaction2 request structure
  162. */
  163. static int smb_setup_trans2request(struct smb_request *req)
  164. {
  165. struct smb_sb_info *server = req->rq_server;
  166. int mparam, mdata;
  167. static unsigned char padding[4];
  168. /* I know the following is very ugly, but I want to build the
  169. smb packet as efficiently as possible. */
  170. const int smb_parameters = 15;
  171. const int header = SMB_HEADER_LEN + 2 * smb_parameters + 2;
  172. const int oparam = ROUND_UP(header + 3);
  173. const int odata = ROUND_UP(oparam + req->rq_lparm);
  174. const int bcc = (req->rq_data ? odata + req->rq_ldata :
  175. oparam + req->rq_lparm) - header;
  176. if ((bcc + oparam) > server->opt.max_xmit)
  177. return -ENOMEM;
  178. smb_setup_header(req, SMBtrans2, smb_parameters, bcc);
  179. /*
  180. * max parameters + max data + max setup == bufsize to make NT4 happy
  181. * and not abort the transfer or split into multiple responses. It also
  182. * makes smbfs happy as handling packets larger than the buffer size
  183. * is extra work.
  184. *
  185. * OS/2 is probably going to hate me for this ...
  186. */
  187. mparam = SMB_TRANS2_MAX_PARAM;
  188. mdata = req->rq_bufsize - mparam;
  189. mdata = server->opt.max_xmit - mparam - 100;
  190. if (mdata < 1024) {
  191. mdata = 1024;
  192. mparam = 20;
  193. }
  194. #if 0
  195. /* NT/win2k has ~4k max_xmit, so with this we request more than it wants
  196. to return as one SMB. Useful for testing the fragmented trans2
  197. handling. */
  198. mdata = 8192;
  199. #endif
  200. WSET(req->rq_header, smb_tpscnt, req->rq_lparm);
  201. WSET(req->rq_header, smb_tdscnt, req->rq_ldata);
  202. WSET(req->rq_header, smb_mprcnt, mparam);
  203. WSET(req->rq_header, smb_mdrcnt, mdata);
  204. WSET(req->rq_header, smb_msrcnt, 0); /* max setup always 0 ? */
  205. WSET(req->rq_header, smb_flags, 0);
  206. DSET(req->rq_header, smb_timeout, 0);
  207. WSET(req->rq_header, smb_pscnt, req->rq_lparm);
  208. WSET(req->rq_header, smb_psoff, oparam - 4);
  209. WSET(req->rq_header, smb_dscnt, req->rq_ldata);
  210. WSET(req->rq_header, smb_dsoff, req->rq_data ? odata - 4 : 0);
  211. *(req->rq_header + smb_suwcnt) = 0x01; /* setup count */
  212. *(req->rq_header + smb_suwcnt + 1) = 0x00; /* reserved */
  213. WSET(req->rq_header, smb_setup0, req->rq_trans2_command);
  214. req->rq_iovlen = 2;
  215. req->rq_iov[0].iov_base = (void *) req->rq_header;
  216. req->rq_iov[0].iov_len = oparam;
  217. req->rq_iov[1].iov_base = (req->rq_parm==NULL) ? padding : req->rq_parm;
  218. req->rq_iov[1].iov_len = req->rq_lparm;
  219. req->rq_slen = oparam + req->rq_lparm;
  220. if (req->rq_data) {
  221. req->rq_iovlen += 2;
  222. req->rq_iov[2].iov_base = padding;
  223. req->rq_iov[2].iov_len = odata - oparam - req->rq_lparm;
  224. req->rq_iov[3].iov_base = req->rq_data;
  225. req->rq_iov[3].iov_len = req->rq_ldata;
  226. req->rq_slen = odata + req->rq_ldata;
  227. }
  228. /* always a data part for trans2 replies */
  229. req->rq_setup_read = smb_setup_bcc;
  230. return 0;
  231. }
  232. /*
  233. * Add a request and tell smbiod to process it
  234. */
  235. int smb_add_request(struct smb_request *req)
  236. {
  237. long timeleft;
  238. struct smb_sb_info *server = req->rq_server;
  239. int result = 0;
  240. smb_setup_request(req);
  241. if (req->rq_trans2_command) {
  242. if (req->rq_buffer == NULL) {
  243. PARANOIA("trans2 attempted without response buffer!\n");
  244. return -EIO;
  245. }
  246. result = smb_setup_trans2request(req);
  247. }
  248. if (result < 0)
  249. return result;
  250. #ifdef SMB_DEBUG_PACKET_SIZE
  251. add_xmit_stats(req);
  252. #endif
  253. /* add 'req' to the queue of requests */
  254. if (smb_lock_server_interruptible(server))
  255. return -EINTR;
  256. /*
  257. * Try to send the request as the process. If that fails we queue the
  258. * request and let smbiod send it later.
  259. */
  260. /* FIXME: each server has a number on the maximum number of parallel
  261. requests. 10, 50 or so. We should not allow more requests to be
  262. active. */
  263. if (server->mid > 0xf000)
  264. server->mid = 0;
  265. req->rq_mid = server->mid++;
  266. WSET(req->rq_header, smb_mid, req->rq_mid);
  267. result = 0;
  268. if (server->state == CONN_VALID) {
  269. if (list_empty(&server->xmitq))
  270. result = smb_request_send_req(req);
  271. if (result < 0) {
  272. /* Connection lost? */
  273. server->conn_error = result;
  274. server->state = CONN_INVALID;
  275. }
  276. }
  277. if (result != 1)
  278. list_add_tail(&req->rq_queue, &server->xmitq);
  279. smb_rget(req);
  280. if (server->state != CONN_VALID)
  281. smbiod_retry(server);
  282. smb_unlock_server(server);
  283. smbiod_wake_up();
  284. timeleft = wait_event_interruptible_timeout(req->rq_wait,
  285. req->rq_flags & SMB_REQ_RECEIVED, 30*HZ);
  286. if (!timeleft || signal_pending(current)) {
  287. /*
  288. * On timeout or on interrupt we want to try and remove the
  289. * request from the recvq/xmitq.
  290. */
  291. smb_lock_server(server);
  292. if (!(req->rq_flags & SMB_REQ_RECEIVED)) {
  293. list_del_init(&req->rq_queue);
  294. smb_rput(req);
  295. }
  296. smb_unlock_server(server);
  297. }
  298. if (!timeleft) {
  299. PARANOIA("request [%p, mid=%d] timed out!\n",
  300. req, req->rq_mid);
  301. VERBOSE("smb_com: %02x\n", *(req->rq_header + smb_com));
  302. VERBOSE("smb_rcls: %02x\n", *(req->rq_header + smb_rcls));
  303. VERBOSE("smb_flg: %02x\n", *(req->rq_header + smb_flg));
  304. VERBOSE("smb_tid: %04x\n", WVAL(req->rq_header, smb_tid));
  305. VERBOSE("smb_pid: %04x\n", WVAL(req->rq_header, smb_pid));
  306. VERBOSE("smb_uid: %04x\n", WVAL(req->rq_header, smb_uid));
  307. VERBOSE("smb_mid: %04x\n", WVAL(req->rq_header, smb_mid));
  308. VERBOSE("smb_wct: %02x\n", *(req->rq_header + smb_wct));
  309. req->rq_rcls = ERRSRV;
  310. req->rq_err = ERRtimeout;
  311. /* Just in case it was "stuck" */
  312. smbiod_wake_up();
  313. }
  314. VERBOSE("woke up, rcls=%d\n", req->rq_rcls);
  315. if (req->rq_rcls != 0)
  316. req->rq_errno = smb_errno(req);
  317. if (signal_pending(current))
  318. req->rq_errno = -ERESTARTSYS;
  319. return req->rq_errno;
  320. }
  321. /*
  322. * Send a request and place it on the recvq if successfully sent.
  323. * Must be called with the server lock held.
  324. */
  325. static int smb_request_send_req(struct smb_request *req)
  326. {
  327. struct smb_sb_info *server = req->rq_server;
  328. int result;
  329. if (req->rq_bytes_sent == 0) {
  330. WSET(req->rq_header, smb_tid, server->opt.tid);
  331. WSET(req->rq_header, smb_pid, 1);
  332. WSET(req->rq_header, smb_uid, server->opt.server_uid);
  333. }
  334. result = smb_send_request(req);
  335. if (result < 0 && result != -EAGAIN)
  336. goto out;
  337. result = 0;
  338. if (!(req->rq_flags & SMB_REQ_TRANSMITTED))
  339. goto out;
  340. list_del_init(&req->rq_queue);
  341. list_add_tail(&req->rq_queue, &server->recvq);
  342. result = 1;
  343. out:
  344. return result;
  345. }
  346. /*
  347. * Sends one request for this server. (smbiod)
  348. * Must be called with the server lock held.
  349. * Returns: <0 on error
  350. * 0 if no request could be completely sent
  351. * 1 if all data for one request was sent
  352. */
  353. int smb_request_send_server(struct smb_sb_info *server)
  354. {
  355. struct list_head *head;
  356. struct smb_request *req;
  357. int result;
  358. if (server->state != CONN_VALID)
  359. return 0;
  360. /* dequeue first request, if any */
  361. req = NULL;
  362. head = server->xmitq.next;
  363. if (head != &server->xmitq) {
  364. req = list_entry(head, struct smb_request, rq_queue);
  365. }
  366. if (!req)
  367. return 0;
  368. result = smb_request_send_req(req);
  369. if (result < 0) {
  370. server->conn_error = result;
  371. list_del_init(&req->rq_queue);
  372. list_add(&req->rq_queue, &server->xmitq);
  373. result = -EIO;
  374. goto out;
  375. }
  376. out:
  377. return result;
  378. }
  379. /*
  380. * Try to find a request matching this "mid". Typically the first entry will
  381. * be the matching one.
  382. */
  383. static struct smb_request *find_request(struct smb_sb_info *server, int mid)
  384. {
  385. struct list_head *tmp;
  386. struct smb_request *req = NULL;
  387. list_for_each(tmp, &server->recvq) {
  388. req = list_entry(tmp, struct smb_request, rq_queue);
  389. if (req->rq_mid == mid) {
  390. break;
  391. }
  392. req = NULL;
  393. }
  394. if (!req) {
  395. VERBOSE("received reply with mid %d but no request!\n",
  396. WVAL(server->header, smb_mid));
  397. server->rstate = SMB_RECV_DROP;
  398. }
  399. return req;
  400. }
  401. /*
  402. * Called when we have read the smb header and believe this is a response.
  403. */
  404. static int smb_init_request(struct smb_sb_info *server, struct smb_request *req)
  405. {
  406. int hdrlen, wct;
  407. memcpy(req->rq_header, server->header, SMB_HEADER_LEN);
  408. wct = *(req->rq_header + smb_wct);
  409. if (wct > 20) {
  410. PARANOIA("wct too large, %d > 20\n", wct);
  411. server->rstate = SMB_RECV_DROP;
  412. return 0;
  413. }
  414. req->rq_resp_wct = wct;
  415. hdrlen = SMB_HEADER_LEN + wct*2 + 2;
  416. VERBOSE("header length: %d smb_wct: %2d\n", hdrlen, wct);
  417. req->rq_bytes_recvd = SMB_HEADER_LEN;
  418. req->rq_rlen = hdrlen;
  419. req->rq_iov[0].iov_base = req->rq_header;
  420. req->rq_iov[0].iov_len = hdrlen;
  421. req->rq_iovlen = 1;
  422. server->rstate = SMB_RECV_PARAM;
  423. #ifdef SMB_DEBUG_PACKET_SIZE
  424. add_recv_stats(smb_len(server->header));
  425. #endif
  426. return 0;
  427. }
  428. /*
  429. * Reads the SMB parameters
  430. */
  431. static int smb_recv_param(struct smb_sb_info *server, struct smb_request *req)
  432. {
  433. int result;
  434. result = smb_receive(server, req);
  435. if (result < 0)
  436. return result;
  437. if (req->rq_bytes_recvd < req->rq_rlen)
  438. return 0;
  439. VERBOSE("result: %d smb_bcc: %04x\n", result,
  440. WVAL(req->rq_header, SMB_HEADER_LEN +
  441. (*(req->rq_header + smb_wct) * 2)));
  442. result = 0;
  443. req->rq_iov[0].iov_base = NULL;
  444. req->rq_rlen = 0;
  445. if (req->rq_callback)
  446. req->rq_callback(req);
  447. else if (req->rq_setup_read)
  448. result = req->rq_setup_read(req);
  449. if (result < 0) {
  450. server->rstate = SMB_RECV_DROP;
  451. return result;
  452. }
  453. server->rstate = req->rq_rlen > 0 ? SMB_RECV_DATA : SMB_RECV_END;
  454. req->rq_bytes_recvd = 0; // recvd out of the iov
  455. VERBOSE("rlen: %d\n", req->rq_rlen);
  456. if (req->rq_rlen < 0) {
  457. PARANOIA("Parameters read beyond end of packet!\n");
  458. server->rstate = SMB_RECV_END;
  459. return -EIO;
  460. }
  461. return 0;
  462. }
  463. /*
  464. * Reads the SMB data
  465. */
  466. static int smb_recv_data(struct smb_sb_info *server, struct smb_request *req)
  467. {
  468. int result;
  469. result = smb_receive(server, req);
  470. if (result < 0)
  471. goto out;
  472. if (req->rq_bytes_recvd < req->rq_rlen)
  473. goto out;
  474. server->rstate = SMB_RECV_END;
  475. out:
  476. VERBOSE("result: %d\n", result);
  477. return result;
  478. }
  479. /*
  480. * Receive a transaction2 response
  481. * Return: 0 if the response has been fully read
  482. * 1 if there are further "fragments" to read
  483. * <0 if there is an error
  484. */
  485. static int smb_recv_trans2(struct smb_sb_info *server, struct smb_request *req)
  486. {
  487. unsigned char *inbuf;
  488. unsigned int parm_disp, parm_offset, parm_count, parm_tot;
  489. unsigned int data_disp, data_offset, data_count, data_tot;
  490. int hdrlen = SMB_HEADER_LEN + req->rq_resp_wct*2 - 2;
  491. VERBOSE("handling trans2\n");
  492. inbuf = req->rq_header;
  493. data_tot = WVAL(inbuf, smb_tdrcnt);
  494. parm_tot = WVAL(inbuf, smb_tprcnt);
  495. parm_disp = WVAL(inbuf, smb_prdisp);
  496. parm_offset = WVAL(inbuf, smb_proff);
  497. parm_count = WVAL(inbuf, smb_prcnt);
  498. data_disp = WVAL(inbuf, smb_drdisp);
  499. data_offset = WVAL(inbuf, smb_droff);
  500. data_count = WVAL(inbuf, smb_drcnt);
  501. /* Modify offset for the split header/buffer we use */
  502. if (data_count || data_offset) {
  503. if (unlikely(data_offset < hdrlen))
  504. goto out_bad_data;
  505. else
  506. data_offset -= hdrlen;
  507. }
  508. if (parm_count || parm_offset) {
  509. if (unlikely(parm_offset < hdrlen))
  510. goto out_bad_parm;
  511. else
  512. parm_offset -= hdrlen;
  513. }
  514. if (parm_count == parm_tot && data_count == data_tot) {
  515. /*
  516. * This packet has all the trans2 data.
  517. *
  518. * We setup the request so that this will be the common
  519. * case. It may be a server error to not return a
  520. * response that fits.
  521. */
  522. VERBOSE("single trans2 response "
  523. "dcnt=%u, pcnt=%u, doff=%u, poff=%u\n",
  524. data_count, parm_count,
  525. data_offset, parm_offset);
  526. req->rq_ldata = data_count;
  527. req->rq_lparm = parm_count;
  528. req->rq_data = req->rq_buffer + data_offset;
  529. req->rq_parm = req->rq_buffer + parm_offset;
  530. if (unlikely(parm_offset + parm_count > req->rq_rlen))
  531. goto out_bad_parm;
  532. if (unlikely(data_offset + data_count > req->rq_rlen))
  533. goto out_bad_data;
  534. return 0;
  535. }
  536. VERBOSE("multi trans2 response "
  537. "frag=%d, dcnt=%u, pcnt=%u, doff=%u, poff=%u\n",
  538. req->rq_fragment,
  539. data_count, parm_count,
  540. data_offset, parm_offset);
  541. if (!req->rq_fragment) {
  542. int buf_len;
  543. /* We got the first trans2 fragment */
  544. req->rq_fragment = 1;
  545. req->rq_total_data = data_tot;
  546. req->rq_total_parm = parm_tot;
  547. req->rq_ldata = 0;
  548. req->rq_lparm = 0;
  549. buf_len = data_tot + parm_tot;
  550. if (buf_len > SMB_MAX_PACKET_SIZE)
  551. goto out_too_long;
  552. req->rq_trans2bufsize = buf_len;
  553. req->rq_trans2buffer = smb_kmalloc(buf_len, GFP_NOFS);
  554. if (!req->rq_trans2buffer)
  555. goto out_no_mem;
  556. memset(req->rq_trans2buffer, 0, buf_len);
  557. req->rq_parm = req->rq_trans2buffer;
  558. req->rq_data = req->rq_trans2buffer + parm_tot;
  559. } else if (unlikely(req->rq_total_data < data_tot ||
  560. req->rq_total_parm < parm_tot))
  561. goto out_data_grew;
  562. if (unlikely(parm_disp + parm_count > req->rq_total_parm ||
  563. parm_offset + parm_count > req->rq_rlen))
  564. goto out_bad_parm;
  565. if (unlikely(data_disp + data_count > req->rq_total_data ||
  566. data_offset + data_count > req->rq_rlen))
  567. goto out_bad_data;
  568. inbuf = req->rq_buffer;
  569. memcpy(req->rq_parm + parm_disp, inbuf + parm_offset, parm_count);
  570. memcpy(req->rq_data + data_disp, inbuf + data_offset, data_count);
  571. req->rq_ldata += data_count;
  572. req->rq_lparm += parm_count;
  573. /*
  574. * Check whether we've received all of the data. Note that
  575. * we use the packet totals -- total lengths might shrink!
  576. */
  577. if (req->rq_ldata >= data_tot && req->rq_lparm >= parm_tot) {
  578. req->rq_ldata = data_tot;
  579. req->rq_lparm = parm_tot;
  580. return 0;
  581. }
  582. return 1;
  583. out_too_long:
  584. printk(KERN_ERR "smb_trans2: data/param too long, data=%u, parm=%u\n",
  585. data_tot, parm_tot);
  586. goto out_EIO;
  587. out_no_mem:
  588. printk(KERN_ERR "smb_trans2: couldn't allocate data area of %d bytes\n",
  589. req->rq_trans2bufsize);
  590. req->rq_errno = -ENOMEM;
  591. goto out;
  592. out_data_grew:
  593. printk(KERN_ERR "smb_trans2: data/params grew!\n");
  594. goto out_EIO;
  595. out_bad_parm:
  596. printk(KERN_ERR "smb_trans2: invalid parms, disp=%u, cnt=%u, tot=%u, ofs=%u\n",
  597. parm_disp, parm_count, parm_tot, parm_offset);
  598. goto out_EIO;
  599. out_bad_data:
  600. printk(KERN_ERR "smb_trans2: invalid data, disp=%u, cnt=%u, tot=%u, ofs=%u\n",
  601. data_disp, data_count, data_tot, data_offset);
  602. out_EIO:
  603. req->rq_errno = -EIO;
  604. out:
  605. return req->rq_errno;
  606. }
  607. /*
  608. * State machine for receiving responses. We handle the fact that we can't
  609. * read the full response in one try by having states telling us how much we
  610. * have read.
  611. *
  612. * Must be called with the server lock held (only called from smbiod).
  613. *
  614. * Return: <0 on error
  615. */
  616. int smb_request_recv(struct smb_sb_info *server)
  617. {
  618. struct smb_request *req = NULL;
  619. int result = 0;
  620. if (smb_recv_available(server) <= 0)
  621. return 0;
  622. VERBOSE("state: %d\n", server->rstate);
  623. switch (server->rstate) {
  624. case SMB_RECV_DROP:
  625. result = smb_receive_drop(server);
  626. if (result < 0)
  627. break;
  628. if (server->rstate == SMB_RECV_DROP)
  629. break;
  630. server->rstate = SMB_RECV_START;
  631. /* fallthrough */
  632. case SMB_RECV_START:
  633. server->smb_read = 0;
  634. server->rstate = SMB_RECV_HEADER;
  635. /* fallthrough */
  636. case SMB_RECV_HEADER:
  637. result = smb_receive_header(server);
  638. if (result < 0)
  639. break;
  640. if (server->rstate == SMB_RECV_HEADER)
  641. break;
  642. if (! (*(server->header + smb_flg) & SMB_FLAGS_REPLY) ) {
  643. server->rstate = SMB_RECV_REQUEST;
  644. break;
  645. }
  646. if (server->rstate != SMB_RECV_HCOMPLETE)
  647. break;
  648. /* fallthrough */
  649. case SMB_RECV_HCOMPLETE:
  650. req = find_request(server, WVAL(server->header, smb_mid));
  651. if (!req)
  652. break;
  653. smb_init_request(server, req);
  654. req->rq_rcls = *(req->rq_header + smb_rcls);
  655. req->rq_err = WVAL(req->rq_header, smb_err);
  656. if (server->rstate != SMB_RECV_PARAM)
  657. break;
  658. /* fallthrough */
  659. case SMB_RECV_PARAM:
  660. if (!req)
  661. req = find_request(server,WVAL(server->header,smb_mid));
  662. if (!req)
  663. break;
  664. result = smb_recv_param(server, req);
  665. if (result < 0)
  666. break;
  667. if (server->rstate != SMB_RECV_DATA)
  668. break;
  669. /* fallthrough */
  670. case SMB_RECV_DATA:
  671. if (!req)
  672. req = find_request(server,WVAL(server->header,smb_mid));
  673. if (!req)
  674. break;
  675. result = smb_recv_data(server, req);
  676. if (result < 0)
  677. break;
  678. break;
  679. /* We should never be called with any of these states */
  680. case SMB_RECV_END:
  681. case SMB_RECV_REQUEST:
  682. server->rstate = SMB_RECV_END;
  683. break;
  684. }
  685. if (result < 0) {
  686. /* We saw an error */
  687. return result;
  688. }
  689. if (server->rstate != SMB_RECV_END)
  690. return 0;
  691. result = 0;
  692. if (req->rq_trans2_command && req->rq_rcls == SUCCESS)
  693. result = smb_recv_trans2(server, req);
  694. /*
  695. * Response completely read. Drop any extra bytes sent by the server.
  696. * (Yes, servers sometimes add extra bytes to responses)
  697. */
  698. VERBOSE("smb_len: %d smb_read: %d\n",
  699. server->smb_len, server->smb_read);
  700. if (server->smb_read < server->smb_len)
  701. smb_receive_drop(server);
  702. server->rstate = SMB_RECV_START;
  703. if (!result) {
  704. list_del_init(&req->rq_queue);
  705. req->rq_flags |= SMB_REQ_RECEIVED;
  706. smb_rput(req);
  707. wake_up_interruptible(&req->rq_wait);
  708. }
  709. return 0;
  710. }