smb2pdu.c 59 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211
  1. /*
  2. * fs/cifs/smb2pdu.c
  3. *
  4. * Copyright (C) International Business Machines Corp., 2009, 2012
  5. * Etersoft, 2012
  6. * Author(s): Steve French (sfrench@us.ibm.com)
  7. * Pavel Shilovsky (pshilovsky@samba.org) 2012
  8. *
  9. * Contains the routines for constructing the SMB2 PDUs themselves
  10. *
  11. * This library is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU Lesser General Public License as published
  13. * by the Free Software Foundation; either version 2.1 of the License, or
  14. * (at your option) any later version.
  15. *
  16. * This library is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
  19. * the GNU Lesser General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU Lesser General Public License
  22. * along with this library; if not, write to the Free Software
  23. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  24. */
  25. /* SMB2 PDU handling routines here - except for leftovers (eg session setup) */
  26. /* Note that there are handle based routines which must be */
  27. /* treated slightly differently for reconnection purposes since we never */
  28. /* want to reuse a stale file handle and only the caller knows the file info */
  29. #include <linux/fs.h>
  30. #include <linux/kernel.h>
  31. #include <linux/vfs.h>
  32. #include <linux/task_io_accounting_ops.h>
  33. #include <linux/uaccess.h>
  34. #include <linux/pagemap.h>
  35. #include <linux/xattr.h>
  36. #include "smb2pdu.h"
  37. #include "cifsglob.h"
  38. #include "cifsacl.h"
  39. #include "cifsproto.h"
  40. #include "smb2proto.h"
  41. #include "cifs_unicode.h"
  42. #include "cifs_debug.h"
  43. #include "ntlmssp.h"
  44. #include "smb2status.h"
  45. #include "smb2glob.h"
  46. #include "cifspdu.h"
  47. /*
  48. * The following table defines the expected "StructureSize" of SMB2 requests
  49. * in order by SMB2 command. This is similar to "wct" in SMB/CIFS requests.
  50. *
  51. * Note that commands are defined in smb2pdu.h in le16 but the array below is
  52. * indexed by command in host byte order.
  53. */
  54. static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
  55. /* SMB2_NEGOTIATE */ 36,
  56. /* SMB2_SESSION_SETUP */ 25,
  57. /* SMB2_LOGOFF */ 4,
  58. /* SMB2_TREE_CONNECT */ 9,
  59. /* SMB2_TREE_DISCONNECT */ 4,
  60. /* SMB2_CREATE */ 57,
  61. /* SMB2_CLOSE */ 24,
  62. /* SMB2_FLUSH */ 24,
  63. /* SMB2_READ */ 49,
  64. /* SMB2_WRITE */ 49,
  65. /* SMB2_LOCK */ 48,
  66. /* SMB2_IOCTL */ 57,
  67. /* SMB2_CANCEL */ 4,
  68. /* SMB2_ECHO */ 4,
  69. /* SMB2_QUERY_DIRECTORY */ 33,
  70. /* SMB2_CHANGE_NOTIFY */ 32,
  71. /* SMB2_QUERY_INFO */ 41,
  72. /* SMB2_SET_INFO */ 33,
  73. /* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */
  74. };
  75. static void
  76. smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ ,
  77. const struct cifs_tcon *tcon)
  78. {
  79. struct smb2_pdu *pdu = (struct smb2_pdu *)hdr;
  80. char *temp = (char *)hdr;
  81. /* lookup word count ie StructureSize from table */
  82. __u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_cmd)];
  83. /*
  84. * smaller than SMALL_BUFFER_SIZE but bigger than fixed area of
  85. * largest operations (Create)
  86. */
  87. memset(temp, 0, 256);
  88. /* Note this is only network field converted to big endian */
  89. hdr->smb2_buf_length = cpu_to_be32(parmsize + sizeof(struct smb2_hdr)
  90. - 4 /* RFC 1001 length field itself not counted */);
  91. hdr->ProtocolId[0] = 0xFE;
  92. hdr->ProtocolId[1] = 'S';
  93. hdr->ProtocolId[2] = 'M';
  94. hdr->ProtocolId[3] = 'B';
  95. hdr->StructureSize = cpu_to_le16(64);
  96. hdr->Command = smb2_cmd;
  97. hdr->CreditRequest = cpu_to_le16(2); /* BB make this dynamic */
  98. hdr->ProcessId = cpu_to_le32((__u16)current->tgid);
  99. if (!tcon)
  100. goto out;
  101. hdr->TreeId = tcon->tid;
  102. /* Uid is not converted */
  103. if (tcon->ses)
  104. hdr->SessionId = tcon->ses->Suid;
  105. /* BB check following DFS flags BB */
  106. /* BB do we have to add check for SHI1005_FLAGS_DFS_ROOT too? */
  107. if (tcon->share_flags & SHI1005_FLAGS_DFS)
  108. hdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS;
  109. /* BB how does SMB2 do case sensitive? */
  110. /* if (tcon->nocase)
  111. hdr->Flags |= SMBFLG_CASELESS; */
  112. if (tcon->ses && tcon->ses->server &&
  113. (tcon->ses->server->sec_mode & SECMODE_SIGN_REQUIRED))
  114. hdr->Flags |= SMB2_FLAGS_SIGNED;
  115. out:
  116. pdu->StructureSize2 = cpu_to_le16(parmsize);
  117. return;
  118. }
  119. static int
  120. smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
  121. {
  122. int rc = 0;
  123. struct nls_table *nls_codepage;
  124. struct cifs_ses *ses;
  125. struct TCP_Server_Info *server;
  126. /*
  127. * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so
  128. * check for tcp and smb session status done differently
  129. * for those three - in the calling routine.
  130. */
  131. if (tcon == NULL)
  132. return rc;
  133. if (smb2_command == SMB2_TREE_CONNECT)
  134. return rc;
  135. if (tcon->tidStatus == CifsExiting) {
  136. /*
  137. * only tree disconnect, open, and write,
  138. * (and ulogoff which does not have tcon)
  139. * are allowed as we start force umount.
  140. */
  141. if ((smb2_command != SMB2_WRITE) &&
  142. (smb2_command != SMB2_CREATE) &&
  143. (smb2_command != SMB2_TREE_DISCONNECT)) {
  144. cFYI(1, "can not send cmd %d while umounting",
  145. smb2_command);
  146. return -ENODEV;
  147. }
  148. }
  149. if ((!tcon->ses) || (tcon->ses->status == CifsExiting) ||
  150. (!tcon->ses->server))
  151. return -EIO;
  152. ses = tcon->ses;
  153. server = ses->server;
  154. /*
  155. * Give demultiplex thread up to 10 seconds to reconnect, should be
  156. * greater than cifs socket timeout which is 7 seconds
  157. */
  158. while (server->tcpStatus == CifsNeedReconnect) {
  159. /*
  160. * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE
  161. * here since they are implicitly done when session drops.
  162. */
  163. switch (smb2_command) {
  164. /*
  165. * BB Should we keep oplock break and add flush to exceptions?
  166. */
  167. case SMB2_TREE_DISCONNECT:
  168. case SMB2_CANCEL:
  169. case SMB2_CLOSE:
  170. case SMB2_OPLOCK_BREAK:
  171. return -EAGAIN;
  172. }
  173. wait_event_interruptible_timeout(server->response_q,
  174. (server->tcpStatus != CifsNeedReconnect), 10 * HZ);
  175. /* are we still trying to reconnect? */
  176. if (server->tcpStatus != CifsNeedReconnect)
  177. break;
  178. /*
  179. * on "soft" mounts we wait once. Hard mounts keep
  180. * retrying until process is killed or server comes
  181. * back on-line
  182. */
  183. if (!tcon->retry) {
  184. cFYI(1, "gave up waiting on reconnect in smb_init");
  185. return -EHOSTDOWN;
  186. }
  187. }
  188. if (!tcon->ses->need_reconnect && !tcon->need_reconnect)
  189. return rc;
  190. nls_codepage = load_nls_default();
  191. /*
  192. * need to prevent multiple threads trying to simultaneously reconnect
  193. * the same SMB session
  194. */
  195. mutex_lock(&tcon->ses->session_mutex);
  196. rc = cifs_negotiate_protocol(0, tcon->ses);
  197. if (!rc && tcon->ses->need_reconnect)
  198. rc = cifs_setup_session(0, tcon->ses, nls_codepage);
  199. if (rc || !tcon->need_reconnect) {
  200. mutex_unlock(&tcon->ses->session_mutex);
  201. goto out;
  202. }
  203. cifs_mark_open_files_invalid(tcon);
  204. rc = SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nls_codepage);
  205. mutex_unlock(&tcon->ses->session_mutex);
  206. cFYI(1, "reconnect tcon rc = %d", rc);
  207. if (rc)
  208. goto out;
  209. atomic_inc(&tconInfoReconnectCount);
  210. /*
  211. * BB FIXME add code to check if wsize needs update due to negotiated
  212. * smb buffer size shrinking.
  213. */
  214. out:
  215. /*
  216. * Check if handle based operation so we know whether we can continue
  217. * or not without returning to caller to reset file handle.
  218. */
  219. /*
  220. * BB Is flush done by server on drop of tcp session? Should we special
  221. * case it and skip above?
  222. */
  223. switch (smb2_command) {
  224. case SMB2_FLUSH:
  225. case SMB2_READ:
  226. case SMB2_WRITE:
  227. case SMB2_LOCK:
  228. case SMB2_IOCTL:
  229. case SMB2_QUERY_DIRECTORY:
  230. case SMB2_CHANGE_NOTIFY:
  231. case SMB2_QUERY_INFO:
  232. case SMB2_SET_INFO:
  233. return -EAGAIN;
  234. }
  235. unload_nls(nls_codepage);
  236. return rc;
  237. }
  238. /*
  239. * Allocate and return pointer to an SMB request hdr, and set basic
  240. * SMB information in the SMB header. If the return code is zero, this
  241. * function must have filled in request_buf pointer.
  242. */
  243. static int
  244. small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon,
  245. void **request_buf)
  246. {
  247. int rc = 0;
  248. rc = smb2_reconnect(smb2_command, tcon);
  249. if (rc)
  250. return rc;
  251. /* BB eventually switch this to SMB2 specific small buf size */
  252. *request_buf = cifs_small_buf_get();
  253. if (*request_buf == NULL) {
  254. /* BB should we add a retry in here if not a writepage? */
  255. return -ENOMEM;
  256. }
  257. smb2_hdr_assemble((struct smb2_hdr *) *request_buf, smb2_command, tcon);
  258. if (tcon != NULL) {
  259. #ifdef CONFIG_CIFS_STATS2
  260. uint16_t com_code = le16_to_cpu(smb2_command);
  261. cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
  262. #endif
  263. cifs_stats_inc(&tcon->num_smbs_sent);
  264. }
  265. return rc;
  266. }
  267. static void
  268. free_rsp_buf(int resp_buftype, void *rsp)
  269. {
  270. if (resp_buftype == CIFS_SMALL_BUFFER)
  271. cifs_small_buf_release(rsp);
  272. else if (resp_buftype == CIFS_LARGE_BUFFER)
  273. cifs_buf_release(rsp);
  274. }
  275. /*
  276. *
  277. * SMB2 Worker functions follow:
  278. *
  279. * The general structure of the worker functions is:
  280. * 1) Call smb2_init (assembles SMB2 header)
  281. * 2) Initialize SMB2 command specific fields in fixed length area of SMB
  282. * 3) Call smb_sendrcv2 (sends request on socket and waits for response)
  283. * 4) Decode SMB2 command specific fields in the fixed length area
  284. * 5) Decode variable length data area (if any for this SMB2 command type)
  285. * 6) Call free smb buffer
  286. * 7) return
  287. *
  288. */
  289. int
  290. SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
  291. {
  292. struct smb2_negotiate_req *req;
  293. struct smb2_negotiate_rsp *rsp;
  294. struct kvec iov[1];
  295. int rc = 0;
  296. int resp_buftype;
  297. struct TCP_Server_Info *server;
  298. unsigned int sec_flags;
  299. u16 temp = 0;
  300. int blob_offset, blob_length;
  301. char *security_blob;
  302. int flags = CIFS_NEG_OP;
  303. cFYI(1, "Negotiate protocol");
  304. if (ses->server)
  305. server = ses->server;
  306. else {
  307. rc = -EIO;
  308. return rc;
  309. }
  310. rc = small_smb2_init(SMB2_NEGOTIATE, NULL, (void **) &req);
  311. if (rc)
  312. return rc;
  313. /* if any of auth flags (ie not sign or seal) are overriden use them */
  314. if (ses->overrideSecFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL)))
  315. sec_flags = ses->overrideSecFlg; /* BB FIXME fix sign flags?*/
  316. else /* if override flags set only sign/seal OR them with global auth */
  317. sec_flags = global_secflags | ses->overrideSecFlg;
  318. cFYI(1, "sec_flags 0x%x", sec_flags);
  319. req->hdr.SessionId = 0;
  320. req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id);
  321. req->DialectCount = cpu_to_le16(1); /* One vers= at a time for now */
  322. inc_rfc1001_len(req, 2);
  323. /* only one of SMB2 signing flags may be set in SMB2 request */
  324. if ((sec_flags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN)
  325. temp = SMB2_NEGOTIATE_SIGNING_REQUIRED;
  326. else if (sec_flags & CIFSSEC_MAY_SIGN) /* MAY_SIGN is a single flag */
  327. temp = SMB2_NEGOTIATE_SIGNING_ENABLED;
  328. req->SecurityMode = cpu_to_le16(temp);
  329. req->Capabilities = cpu_to_le32(ses->server->vals->req_capabilities);
  330. memcpy(req->ClientGUID, cifs_client_guid, SMB2_CLIENT_GUID_SIZE);
  331. iov[0].iov_base = (char *)req;
  332. /* 4 for rfc1002 length field */
  333. iov[0].iov_len = get_rfc1002_length(req) + 4;
  334. rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags);
  335. rsp = (struct smb2_negotiate_rsp *)iov[0].iov_base;
  336. /*
  337. * No tcon so can't do
  338. * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
  339. */
  340. if (rc != 0)
  341. goto neg_exit;
  342. cFYI(1, "mode 0x%x", rsp->SecurityMode);
  343. /* BB we may eventually want to match the negotiated vs. requested
  344. dialect, even though we are only requesting one at a time */
  345. if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID))
  346. cFYI(1, "negotiated smb2.0 dialect");
  347. else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID))
  348. cFYI(1, "negotiated smb2.1 dialect");
  349. else if (rsp->DialectRevision == cpu_to_le16(SMB30_PROT_ID))
  350. cFYI(1, "negotiated smb3.0 dialect");
  351. else {
  352. cERROR(1, "Illegal dialect returned by server %d",
  353. le16_to_cpu(rsp->DialectRevision));
  354. rc = -EIO;
  355. goto neg_exit;
  356. }
  357. server->dialect = le16_to_cpu(rsp->DialectRevision);
  358. server->maxBuf = le32_to_cpu(rsp->MaxTransactSize);
  359. server->max_read = le32_to_cpu(rsp->MaxReadSize);
  360. server->max_write = le32_to_cpu(rsp->MaxWriteSize);
  361. /* BB Do we need to validate the SecurityMode? */
  362. server->sec_mode = le16_to_cpu(rsp->SecurityMode);
  363. server->capabilities = le32_to_cpu(rsp->Capabilities);
  364. /* Internal types */
  365. server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES;
  366. security_blob = smb2_get_data_area_len(&blob_offset, &blob_length,
  367. &rsp->hdr);
  368. if (blob_length == 0) {
  369. cERROR(1, "missing security blob on negprot");
  370. rc = -EIO;
  371. goto neg_exit;
  372. }
  373. cFYI(1, "sec_flags 0x%x", sec_flags);
  374. if ((sec_flags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) {
  375. cFYI(1, "Signing required");
  376. if (!(server->sec_mode & (SMB2_NEGOTIATE_SIGNING_REQUIRED |
  377. SMB2_NEGOTIATE_SIGNING_ENABLED))) {
  378. cERROR(1, "signing required but server lacks support");
  379. rc = -EOPNOTSUPP;
  380. goto neg_exit;
  381. }
  382. server->sec_mode |= SECMODE_SIGN_REQUIRED;
  383. } else if (sec_flags & CIFSSEC_MAY_SIGN) {
  384. cFYI(1, "Signing optional");
  385. if (server->sec_mode & SMB2_NEGOTIATE_SIGNING_REQUIRED) {
  386. cFYI(1, "Server requires signing");
  387. server->sec_mode |= SECMODE_SIGN_REQUIRED;
  388. } else {
  389. server->sec_mode &=
  390. ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
  391. }
  392. } else {
  393. cFYI(1, "Signing disabled");
  394. if (server->sec_mode & SMB2_NEGOTIATE_SIGNING_REQUIRED) {
  395. cERROR(1, "Server requires packet signing to be enabled"
  396. " in /proc/fs/cifs/SecurityFlags.");
  397. rc = -EOPNOTSUPP;
  398. goto neg_exit;
  399. }
  400. server->sec_mode &=
  401. ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
  402. }
  403. #ifdef CONFIG_SMB2_ASN1 /* BB REMOVEME when updated asn1.c ready */
  404. rc = decode_neg_token_init(security_blob, blob_length,
  405. &server->sec_type);
  406. if (rc == 1)
  407. rc = 0;
  408. else if (rc == 0) {
  409. rc = -EIO;
  410. goto neg_exit;
  411. }
  412. #endif
  413. neg_exit:
  414. free_rsp_buf(resp_buftype, rsp);
  415. return rc;
  416. }
  417. int
  418. SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
  419. const struct nls_table *nls_cp)
  420. {
  421. struct smb2_sess_setup_req *req;
  422. struct smb2_sess_setup_rsp *rsp = NULL;
  423. struct kvec iov[2];
  424. int rc = 0;
  425. int resp_buftype;
  426. __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
  427. struct TCP_Server_Info *server;
  428. unsigned int sec_flags;
  429. u8 temp = 0;
  430. u16 blob_length = 0;
  431. char *security_blob;
  432. char *ntlmssp_blob = NULL;
  433. bool use_spnego = false; /* else use raw ntlmssp */
  434. cFYI(1, "Session Setup");
  435. if (ses->server)
  436. server = ses->server;
  437. else {
  438. rc = -EIO;
  439. return rc;
  440. }
  441. /*
  442. * If memory allocation is successful, caller of this function
  443. * frees it.
  444. */
  445. ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL);
  446. if (!ses->ntlmssp)
  447. return -ENOMEM;
  448. ses->server->secType = RawNTLMSSP;
  449. ssetup_ntlmssp_authenticate:
  450. if (phase == NtLmChallenge)
  451. phase = NtLmAuthenticate; /* if ntlmssp, now final phase */
  452. rc = small_smb2_init(SMB2_SESSION_SETUP, NULL, (void **) &req);
  453. if (rc)
  454. return rc;
  455. /* if any of auth flags (ie not sign or seal) are overriden use them */
  456. if (ses->overrideSecFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL)))
  457. sec_flags = ses->overrideSecFlg; /* BB FIXME fix sign flags?*/
  458. else /* if override flags set only sign/seal OR them with global auth */
  459. sec_flags = global_secflags | ses->overrideSecFlg;
  460. cFYI(1, "sec_flags 0x%x", sec_flags);
  461. req->hdr.SessionId = 0; /* First session, not a reauthenticate */
  462. req->VcNumber = 0; /* MBZ */
  463. /* to enable echos and oplocks */
  464. req->hdr.CreditRequest = cpu_to_le16(3);
  465. /* only one of SMB2 signing flags may be set in SMB2 request */
  466. if ((sec_flags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN)
  467. temp = SMB2_NEGOTIATE_SIGNING_REQUIRED;
  468. else if (ses->server->sec_mode & SMB2_NEGOTIATE_SIGNING_REQUIRED)
  469. temp = SMB2_NEGOTIATE_SIGNING_REQUIRED;
  470. else if (sec_flags & CIFSSEC_MAY_SIGN) /* MAY_SIGN is a single flag */
  471. temp = SMB2_NEGOTIATE_SIGNING_ENABLED;
  472. req->SecurityMode = temp;
  473. req->Capabilities = 0;
  474. req->Channel = 0; /* MBZ */
  475. iov[0].iov_base = (char *)req;
  476. /* 4 for rfc1002 length field and 1 for pad */
  477. iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
  478. if (phase == NtLmNegotiate) {
  479. ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE),
  480. GFP_KERNEL);
  481. if (ntlmssp_blob == NULL) {
  482. rc = -ENOMEM;
  483. goto ssetup_exit;
  484. }
  485. build_ntlmssp_negotiate_blob(ntlmssp_blob, ses);
  486. if (use_spnego) {
  487. /* blob_length = build_spnego_ntlmssp_blob(
  488. &security_blob,
  489. sizeof(struct _NEGOTIATE_MESSAGE),
  490. ntlmssp_blob); */
  491. /* BB eventually need to add this */
  492. cERROR(1, "spnego not supported for SMB2 yet");
  493. rc = -EOPNOTSUPP;
  494. kfree(ntlmssp_blob);
  495. goto ssetup_exit;
  496. } else {
  497. blob_length = sizeof(struct _NEGOTIATE_MESSAGE);
  498. /* with raw NTLMSSP we don't encapsulate in SPNEGO */
  499. security_blob = ntlmssp_blob;
  500. }
  501. } else if (phase == NtLmAuthenticate) {
  502. req->hdr.SessionId = ses->Suid;
  503. ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500,
  504. GFP_KERNEL);
  505. if (ntlmssp_blob == NULL) {
  506. cERROR(1, "failed to malloc ntlmssp blob");
  507. rc = -ENOMEM;
  508. goto ssetup_exit;
  509. }
  510. rc = build_ntlmssp_auth_blob(ntlmssp_blob, &blob_length, ses,
  511. nls_cp);
  512. if (rc) {
  513. cFYI(1, "build_ntlmssp_auth_blob failed %d", rc);
  514. goto ssetup_exit; /* BB double check error handling */
  515. }
  516. if (use_spnego) {
  517. /* blob_length = build_spnego_ntlmssp_blob(
  518. &security_blob,
  519. blob_length,
  520. ntlmssp_blob); */
  521. cERROR(1, "spnego not supported for SMB2 yet");
  522. rc = -EOPNOTSUPP;
  523. kfree(ntlmssp_blob);
  524. goto ssetup_exit;
  525. } else {
  526. security_blob = ntlmssp_blob;
  527. }
  528. } else {
  529. cERROR(1, "illegal ntlmssp phase");
  530. rc = -EIO;
  531. goto ssetup_exit;
  532. }
  533. /* Testing shows that buffer offset must be at location of Buffer[0] */
  534. req->SecurityBufferOffset =
  535. cpu_to_le16(sizeof(struct smb2_sess_setup_req) -
  536. 1 /* pad */ - 4 /* rfc1001 len */);
  537. req->SecurityBufferLength = cpu_to_le16(blob_length);
  538. iov[1].iov_base = security_blob;
  539. iov[1].iov_len = blob_length;
  540. inc_rfc1001_len(req, blob_length - 1 /* pad */);
  541. /* BB add code to build os and lm fields */
  542. rc = SendReceive2(xid, ses, iov, 2, &resp_buftype,
  543. CIFS_LOG_ERROR | CIFS_NEG_OP);
  544. kfree(security_blob);
  545. rsp = (struct smb2_sess_setup_rsp *)iov[0].iov_base;
  546. if (resp_buftype != CIFS_NO_BUFFER &&
  547. rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) {
  548. if (phase != NtLmNegotiate) {
  549. cERROR(1, "Unexpected more processing error");
  550. goto ssetup_exit;
  551. }
  552. if (offsetof(struct smb2_sess_setup_rsp, Buffer) - 4 !=
  553. le16_to_cpu(rsp->SecurityBufferOffset)) {
  554. cERROR(1, "Invalid security buffer offset %d",
  555. le16_to_cpu(rsp->SecurityBufferOffset));
  556. rc = -EIO;
  557. goto ssetup_exit;
  558. }
  559. /* NTLMSSP Negotiate sent now processing challenge (response) */
  560. phase = NtLmChallenge; /* process ntlmssp challenge */
  561. rc = 0; /* MORE_PROCESSING is not an error here but expected */
  562. ses->Suid = rsp->hdr.SessionId;
  563. rc = decode_ntlmssp_challenge(rsp->Buffer,
  564. le16_to_cpu(rsp->SecurityBufferLength), ses);
  565. }
  566. /*
  567. * BB eventually add code for SPNEGO decoding of NtlmChallenge blob,
  568. * but at least the raw NTLMSSP case works.
  569. */
  570. /*
  571. * No tcon so can't do
  572. * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
  573. */
  574. if (rc != 0)
  575. goto ssetup_exit;
  576. ses->session_flags = le16_to_cpu(rsp->SessionFlags);
  577. ssetup_exit:
  578. free_rsp_buf(resp_buftype, rsp);
  579. /* if ntlmssp, and negotiate succeeded, proceed to authenticate phase */
  580. if ((phase == NtLmChallenge) && (rc == 0))
  581. goto ssetup_ntlmssp_authenticate;
  582. return rc;
  583. }
  584. int
  585. SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
  586. {
  587. struct smb2_logoff_req *req; /* response is also trivial struct */
  588. int rc = 0;
  589. struct TCP_Server_Info *server;
  590. cFYI(1, "disconnect session %p", ses);
  591. if (ses && (ses->server))
  592. server = ses->server;
  593. else
  594. return -EIO;
  595. rc = small_smb2_init(SMB2_LOGOFF, NULL, (void **) &req);
  596. if (rc)
  597. return rc;
  598. /* since no tcon, smb2_init can not do this, so do here */
  599. req->hdr.SessionId = ses->Suid;
  600. if (server->sec_mode & SECMODE_SIGN_REQUIRED)
  601. req->hdr.Flags |= SMB2_FLAGS_SIGNED;
  602. rc = SendReceiveNoRsp(xid, ses, (char *) &req->hdr, 0);
  603. /*
  604. * No tcon so can't do
  605. * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
  606. */
  607. return rc;
  608. }
  609. static inline void cifs_stats_fail_inc(struct cifs_tcon *tcon, uint16_t code)
  610. {
  611. cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_failed[code]);
  612. }
  613. #define MAX_SHARENAME_LENGTH (255 /* server */ + 80 /* share */ + 1 /* NULL */)
  614. int
  615. SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
  616. struct cifs_tcon *tcon, const struct nls_table *cp)
  617. {
  618. struct smb2_tree_connect_req *req;
  619. struct smb2_tree_connect_rsp *rsp = NULL;
  620. struct kvec iov[2];
  621. int rc = 0;
  622. int resp_buftype;
  623. int unc_path_len;
  624. struct TCP_Server_Info *server;
  625. __le16 *unc_path = NULL;
  626. cFYI(1, "TCON");
  627. if ((ses->server) && tree)
  628. server = ses->server;
  629. else
  630. return -EIO;
  631. if (tcon && tcon->bad_network_name)
  632. return -ENOENT;
  633. unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
  634. if (unc_path == NULL)
  635. return -ENOMEM;
  636. unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp) + 1;
  637. unc_path_len *= 2;
  638. if (unc_path_len < 2) {
  639. kfree(unc_path);
  640. return -EINVAL;
  641. }
  642. rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
  643. if (rc) {
  644. kfree(unc_path);
  645. return rc;
  646. }
  647. if (tcon == NULL) {
  648. /* since no tcon, smb2_init can not do this, so do here */
  649. req->hdr.SessionId = ses->Suid;
  650. /* if (ses->server->sec_mode & SECMODE_SIGN_REQUIRED)
  651. req->hdr.Flags |= SMB2_FLAGS_SIGNED; */
  652. }
  653. iov[0].iov_base = (char *)req;
  654. /* 4 for rfc1002 length field and 1 for pad */
  655. iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
  656. /* Testing shows that buffer offset must be at location of Buffer[0] */
  657. req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req)
  658. - 1 /* pad */ - 4 /* do not count rfc1001 len field */);
  659. req->PathLength = cpu_to_le16(unc_path_len - 2);
  660. iov[1].iov_base = unc_path;
  661. iov[1].iov_len = unc_path_len;
  662. inc_rfc1001_len(req, unc_path_len - 1 /* pad */);
  663. rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, 0);
  664. rsp = (struct smb2_tree_connect_rsp *)iov[0].iov_base;
  665. if (rc != 0) {
  666. if (tcon) {
  667. cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE);
  668. tcon->need_reconnect = true;
  669. }
  670. goto tcon_error_exit;
  671. }
  672. if (tcon == NULL) {
  673. ses->ipc_tid = rsp->hdr.TreeId;
  674. goto tcon_exit;
  675. }
  676. if (rsp->ShareType & SMB2_SHARE_TYPE_DISK)
  677. cFYI(1, "connection to disk share");
  678. else if (rsp->ShareType & SMB2_SHARE_TYPE_PIPE) {
  679. tcon->ipc = true;
  680. cFYI(1, "connection to pipe share");
  681. } else if (rsp->ShareType & SMB2_SHARE_TYPE_PRINT) {
  682. tcon->print = true;
  683. cFYI(1, "connection to printer");
  684. } else {
  685. cERROR(1, "unknown share type %d", rsp->ShareType);
  686. rc = -EOPNOTSUPP;
  687. goto tcon_error_exit;
  688. }
  689. tcon->share_flags = le32_to_cpu(rsp->ShareFlags);
  690. tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess);
  691. tcon->tidStatus = CifsGood;
  692. tcon->need_reconnect = false;
  693. tcon->tid = rsp->hdr.TreeId;
  694. strncpy(tcon->treeName, tree, MAX_TREE_SIZE);
  695. if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) &&
  696. ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
  697. cERROR(1, "DFS capability contradicts DFS flag");
  698. tcon_exit:
  699. free_rsp_buf(resp_buftype, rsp);
  700. kfree(unc_path);
  701. return rc;
  702. tcon_error_exit:
  703. if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) {
  704. cERROR(1, "BAD_NETWORK_NAME: %s", tree);
  705. tcon->bad_network_name = true;
  706. }
  707. goto tcon_exit;
  708. }
  709. int
  710. SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
  711. {
  712. struct smb2_tree_disconnect_req *req; /* response is trivial */
  713. int rc = 0;
  714. struct TCP_Server_Info *server;
  715. struct cifs_ses *ses = tcon->ses;
  716. cFYI(1, "Tree Disconnect");
  717. if (ses && (ses->server))
  718. server = ses->server;
  719. else
  720. return -EIO;
  721. if ((tcon->need_reconnect) || (tcon->ses->need_reconnect))
  722. return 0;
  723. rc = small_smb2_init(SMB2_TREE_DISCONNECT, tcon, (void **) &req);
  724. if (rc)
  725. return rc;
  726. rc = SendReceiveNoRsp(xid, ses, (char *)&req->hdr, 0);
  727. if (rc)
  728. cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
  729. return rc;
  730. }
  731. static struct create_lease *
  732. create_lease_buf(u8 *lease_key, u8 oplock)
  733. {
  734. struct create_lease *buf;
  735. buf = kmalloc(sizeof(struct create_lease), GFP_KERNEL);
  736. if (!buf)
  737. return NULL;
  738. memset(buf, 0, sizeof(struct create_lease));
  739. buf->lcontext.LeaseKeyLow = cpu_to_le64(*((u64 *)lease_key));
  740. buf->lcontext.LeaseKeyHigh = cpu_to_le64(*((u64 *)(lease_key + 8)));
  741. if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
  742. buf->lcontext.LeaseState = SMB2_LEASE_WRITE_CACHING |
  743. SMB2_LEASE_READ_CACHING;
  744. else if (oplock == SMB2_OPLOCK_LEVEL_II)
  745. buf->lcontext.LeaseState = SMB2_LEASE_READ_CACHING;
  746. else if (oplock == SMB2_OPLOCK_LEVEL_BATCH)
  747. buf->lcontext.LeaseState = SMB2_LEASE_HANDLE_CACHING |
  748. SMB2_LEASE_READ_CACHING |
  749. SMB2_LEASE_WRITE_CACHING;
  750. buf->ccontext.DataOffset = cpu_to_le16(offsetof
  751. (struct create_lease, lcontext));
  752. buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context));
  753. buf->ccontext.NameOffset = cpu_to_le16(offsetof
  754. (struct create_lease, Name));
  755. buf->ccontext.NameLength = cpu_to_le16(4);
  756. buf->Name[0] = 'R';
  757. buf->Name[1] = 'q';
  758. buf->Name[2] = 'L';
  759. buf->Name[3] = 's';
  760. return buf;
  761. }
  762. static __u8
  763. parse_lease_state(struct smb2_create_rsp *rsp)
  764. {
  765. char *data_offset;
  766. struct create_lease *lc;
  767. bool found = false;
  768. data_offset = (char *)rsp;
  769. data_offset += 4 + le32_to_cpu(rsp->CreateContextsOffset);
  770. lc = (struct create_lease *)data_offset;
  771. do {
  772. char *name = le16_to_cpu(lc->ccontext.NameOffset) + (char *)lc;
  773. if (le16_to_cpu(lc->ccontext.NameLength) != 4 ||
  774. strncmp(name, "RqLs", 4)) {
  775. lc = (struct create_lease *)((char *)lc
  776. + le32_to_cpu(lc->ccontext.Next));
  777. continue;
  778. }
  779. if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
  780. return SMB2_OPLOCK_LEVEL_NOCHANGE;
  781. found = true;
  782. break;
  783. } while (le32_to_cpu(lc->ccontext.Next) != 0);
  784. if (!found)
  785. return 0;
  786. return smb2_map_lease_to_oplock(lc->lcontext.LeaseState);
  787. }
  788. int
  789. SMB2_open(const unsigned int xid, struct cifs_tcon *tcon, __le16 *path,
  790. u64 *persistent_fid, u64 *volatile_fid, __u32 desired_access,
  791. __u32 create_disposition, __u32 file_attributes, __u32 create_options,
  792. __u8 *oplock, struct smb2_file_all_info *buf)
  793. {
  794. struct smb2_create_req *req;
  795. struct smb2_create_rsp *rsp;
  796. struct TCP_Server_Info *server;
  797. struct cifs_ses *ses = tcon->ses;
  798. struct kvec iov[3];
  799. int resp_buftype;
  800. int uni_path_len;
  801. __le16 *copy_path = NULL;
  802. int copy_size;
  803. int rc = 0;
  804. int num_iovecs = 2;
  805. cFYI(1, "create/open");
  806. if (ses && (ses->server))
  807. server = ses->server;
  808. else
  809. return -EIO;
  810. rc = small_smb2_init(SMB2_CREATE, tcon, (void **) &req);
  811. if (rc)
  812. return rc;
  813. req->ImpersonationLevel = IL_IMPERSONATION;
  814. req->DesiredAccess = cpu_to_le32(desired_access);
  815. /* File attributes ignored on open (used in create though) */
  816. req->FileAttributes = cpu_to_le32(file_attributes);
  817. req->ShareAccess = FILE_SHARE_ALL_LE;
  818. req->CreateDisposition = cpu_to_le32(create_disposition);
  819. req->CreateOptions = cpu_to_le32(create_options);
  820. uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
  821. req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req)
  822. - 8 /* pad */ - 4 /* do not count rfc1001 len field */);
  823. iov[0].iov_base = (char *)req;
  824. /* 4 for rfc1002 length field */
  825. iov[0].iov_len = get_rfc1002_length(req) + 4;
  826. /* MUST set path len (NameLength) to 0 opening root of share */
  827. if (uni_path_len >= 4) {
  828. req->NameLength = cpu_to_le16(uni_path_len - 2);
  829. /* -1 since last byte is buf[0] which is sent below (path) */
  830. iov[0].iov_len--;
  831. if (uni_path_len % 8 != 0) {
  832. copy_size = uni_path_len / 8 * 8;
  833. if (copy_size < uni_path_len)
  834. copy_size += 8;
  835. copy_path = kzalloc(copy_size, GFP_KERNEL);
  836. if (!copy_path)
  837. return -ENOMEM;
  838. memcpy((char *)copy_path, (const char *)path,
  839. uni_path_len);
  840. uni_path_len = copy_size;
  841. path = copy_path;
  842. }
  843. iov[1].iov_len = uni_path_len;
  844. iov[1].iov_base = path;
  845. /*
  846. * -1 since last byte is buf[0] which was counted in
  847. * smb2_buf_len.
  848. */
  849. inc_rfc1001_len(req, uni_path_len - 1);
  850. } else {
  851. iov[0].iov_len += 7;
  852. req->hdr.smb2_buf_length = cpu_to_be32(be32_to_cpu(
  853. req->hdr.smb2_buf_length) + 8 - 1);
  854. num_iovecs = 1;
  855. req->NameLength = 0;
  856. }
  857. if (!server->oplocks)
  858. *oplock = SMB2_OPLOCK_LEVEL_NONE;
  859. if (!(tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
  860. *oplock == SMB2_OPLOCK_LEVEL_NONE)
  861. req->RequestedOplockLevel = *oplock;
  862. else {
  863. iov[num_iovecs].iov_base = create_lease_buf(oplock+1, *oplock);
  864. if (iov[num_iovecs].iov_base == NULL) {
  865. cifs_small_buf_release(req);
  866. kfree(copy_path);
  867. return -ENOMEM;
  868. }
  869. iov[num_iovecs].iov_len = sizeof(struct create_lease);
  870. req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE;
  871. req->CreateContextsOffset = cpu_to_le32(
  872. sizeof(struct smb2_create_req) - 4 - 8 +
  873. iov[num_iovecs-1].iov_len);
  874. req->CreateContextsLength = cpu_to_le32(
  875. sizeof(struct create_lease));
  876. inc_rfc1001_len(&req->hdr, sizeof(struct create_lease));
  877. num_iovecs++;
  878. }
  879. rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
  880. rsp = (struct smb2_create_rsp *)iov[0].iov_base;
  881. if (rc != 0) {
  882. cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
  883. goto creat_exit;
  884. }
  885. *persistent_fid = rsp->PersistentFileId;
  886. *volatile_fid = rsp->VolatileFileId;
  887. if (buf) {
  888. memcpy(buf, &rsp->CreationTime, 32);
  889. buf->AllocationSize = rsp->AllocationSize;
  890. buf->EndOfFile = rsp->EndofFile;
  891. buf->Attributes = rsp->FileAttributes;
  892. buf->NumberOfLinks = cpu_to_le32(1);
  893. buf->DeletePending = 0;
  894. }
  895. if (rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE)
  896. *oplock = parse_lease_state(rsp);
  897. else
  898. *oplock = rsp->OplockLevel;
  899. creat_exit:
  900. kfree(copy_path);
  901. free_rsp_buf(resp_buftype, rsp);
  902. return rc;
  903. }
  904. int
  905. SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
  906. u64 persistent_fid, u64 volatile_fid)
  907. {
  908. struct smb2_close_req *req;
  909. struct smb2_close_rsp *rsp;
  910. struct TCP_Server_Info *server;
  911. struct cifs_ses *ses = tcon->ses;
  912. struct kvec iov[1];
  913. int resp_buftype;
  914. int rc = 0;
  915. cFYI(1, "Close");
  916. if (ses && (ses->server))
  917. server = ses->server;
  918. else
  919. return -EIO;
  920. rc = small_smb2_init(SMB2_CLOSE, tcon, (void **) &req);
  921. if (rc)
  922. return rc;
  923. req->PersistentFileId = persistent_fid;
  924. req->VolatileFileId = volatile_fid;
  925. iov[0].iov_base = (char *)req;
  926. /* 4 for rfc1002 length field */
  927. iov[0].iov_len = get_rfc1002_length(req) + 4;
  928. rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
  929. rsp = (struct smb2_close_rsp *)iov[0].iov_base;
  930. if (rc != 0) {
  931. if (tcon)
  932. cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
  933. goto close_exit;
  934. }
  935. /* BB FIXME - decode close response, update inode for caching */
  936. close_exit:
  937. free_rsp_buf(resp_buftype, rsp);
  938. return rc;
  939. }
  940. static int
  941. validate_buf(unsigned int offset, unsigned int buffer_length,
  942. struct smb2_hdr *hdr, unsigned int min_buf_size)
  943. {
  944. unsigned int smb_len = be32_to_cpu(hdr->smb2_buf_length);
  945. char *end_of_smb = smb_len + 4 /* RFC1001 length field */ + (char *)hdr;
  946. char *begin_of_buf = 4 /* RFC1001 len field */ + offset + (char *)hdr;
  947. char *end_of_buf = begin_of_buf + buffer_length;
  948. if (buffer_length < min_buf_size) {
  949. cERROR(1, "buffer length %d smaller than minimum size %d",
  950. buffer_length, min_buf_size);
  951. return -EINVAL;
  952. }
  953. /* check if beyond RFC1001 maximum length */
  954. if ((smb_len > 0x7FFFFF) || (buffer_length > 0x7FFFFF)) {
  955. cERROR(1, "buffer length %d or smb length %d too large",
  956. buffer_length, smb_len);
  957. return -EINVAL;
  958. }
  959. if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) {
  960. cERROR(1, "illegal server response, bad offset to data");
  961. return -EINVAL;
  962. }
  963. return 0;
  964. }
  965. /*
  966. * If SMB buffer fields are valid, copy into temporary buffer to hold result.
  967. * Caller must free buffer.
  968. */
  969. static int
  970. validate_and_copy_buf(unsigned int offset, unsigned int buffer_length,
  971. struct smb2_hdr *hdr, unsigned int minbufsize,
  972. char *data)
  973. {
  974. char *begin_of_buf = 4 /* RFC1001 len field */ + offset + (char *)hdr;
  975. int rc;
  976. if (!data)
  977. return -EINVAL;
  978. rc = validate_buf(offset, buffer_length, hdr, minbufsize);
  979. if (rc)
  980. return rc;
  981. memcpy(data, begin_of_buf, buffer_length);
  982. return 0;
  983. }
  984. static int
  985. query_info(const unsigned int xid, struct cifs_tcon *tcon,
  986. u64 persistent_fid, u64 volatile_fid, u8 info_class,
  987. size_t output_len, size_t min_len, void *data)
  988. {
  989. struct smb2_query_info_req *req;
  990. struct smb2_query_info_rsp *rsp = NULL;
  991. struct kvec iov[2];
  992. int rc = 0;
  993. int resp_buftype;
  994. struct TCP_Server_Info *server;
  995. struct cifs_ses *ses = tcon->ses;
  996. cFYI(1, "Query Info");
  997. if (ses && (ses->server))
  998. server = ses->server;
  999. else
  1000. return -EIO;
  1001. rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req);
  1002. if (rc)
  1003. return rc;
  1004. req->InfoType = SMB2_O_INFO_FILE;
  1005. req->FileInfoClass = info_class;
  1006. req->PersistentFileId = persistent_fid;
  1007. req->VolatileFileId = volatile_fid;
  1008. /* 4 for rfc1002 length field and 1 for Buffer */
  1009. req->InputBufferOffset =
  1010. cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4);
  1011. req->OutputBufferLength = cpu_to_le32(output_len);
  1012. iov[0].iov_base = (char *)req;
  1013. /* 4 for rfc1002 length field */
  1014. iov[0].iov_len = get_rfc1002_length(req) + 4;
  1015. rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
  1016. rsp = (struct smb2_query_info_rsp *)iov[0].iov_base;
  1017. if (rc) {
  1018. cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
  1019. goto qinf_exit;
  1020. }
  1021. rc = validate_and_copy_buf(le16_to_cpu(rsp->OutputBufferOffset),
  1022. le32_to_cpu(rsp->OutputBufferLength),
  1023. &rsp->hdr, min_len, data);
  1024. qinf_exit:
  1025. free_rsp_buf(resp_buftype, rsp);
  1026. return rc;
  1027. }
  1028. int
  1029. SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
  1030. u64 persistent_fid, u64 volatile_fid,
  1031. struct smb2_file_all_info *data)
  1032. {
  1033. return query_info(xid, tcon, persistent_fid, volatile_fid,
  1034. FILE_ALL_INFORMATION,
  1035. sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
  1036. sizeof(struct smb2_file_all_info), data);
  1037. }
  1038. int
  1039. SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
  1040. u64 persistent_fid, u64 volatile_fid, __le64 *uniqueid)
  1041. {
  1042. return query_info(xid, tcon, persistent_fid, volatile_fid,
  1043. FILE_INTERNAL_INFORMATION,
  1044. sizeof(struct smb2_file_internal_info),
  1045. sizeof(struct smb2_file_internal_info), uniqueid);
  1046. }
  1047. /*
  1048. * This is a no-op for now. We're not really interested in the reply, but
  1049. * rather in the fact that the server sent one and that server->lstrp
  1050. * gets updated.
  1051. *
  1052. * FIXME: maybe we should consider checking that the reply matches request?
  1053. */
  1054. static void
  1055. smb2_echo_callback(struct mid_q_entry *mid)
  1056. {
  1057. struct TCP_Server_Info *server = mid->callback_data;
  1058. struct smb2_echo_rsp *smb2 = (struct smb2_echo_rsp *)mid->resp_buf;
  1059. unsigned int credits_received = 1;
  1060. if (mid->mid_state == MID_RESPONSE_RECEIVED)
  1061. credits_received = le16_to_cpu(smb2->hdr.CreditRequest);
  1062. DeleteMidQEntry(mid);
  1063. add_credits(server, credits_received, CIFS_ECHO_OP);
  1064. }
  1065. int
  1066. SMB2_echo(struct TCP_Server_Info *server)
  1067. {
  1068. struct smb2_echo_req *req;
  1069. int rc = 0;
  1070. struct kvec iov;
  1071. struct smb_rqst rqst = { .rq_iov = &iov,
  1072. .rq_nvec = 1 };
  1073. cFYI(1, "In echo request");
  1074. rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
  1075. if (rc)
  1076. return rc;
  1077. req->hdr.CreditRequest = cpu_to_le16(1);
  1078. iov.iov_base = (char *)req;
  1079. /* 4 for rfc1002 length field */
  1080. iov.iov_len = get_rfc1002_length(req) + 4;
  1081. rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, server,
  1082. CIFS_ECHO_OP);
  1083. if (rc)
  1084. cFYI(1, "Echo request failed: %d", rc);
  1085. cifs_small_buf_release(req);
  1086. return rc;
  1087. }
  1088. int
  1089. SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
  1090. u64 volatile_fid)
  1091. {
  1092. struct smb2_flush_req *req;
  1093. struct TCP_Server_Info *server;
  1094. struct cifs_ses *ses = tcon->ses;
  1095. struct kvec iov[1];
  1096. int resp_buftype;
  1097. int rc = 0;
  1098. cFYI(1, "Flush");
  1099. if (ses && (ses->server))
  1100. server = ses->server;
  1101. else
  1102. return -EIO;
  1103. rc = small_smb2_init(SMB2_FLUSH, tcon, (void **) &req);
  1104. if (rc)
  1105. return rc;
  1106. req->PersistentFileId = persistent_fid;
  1107. req->VolatileFileId = volatile_fid;
  1108. iov[0].iov_base = (char *)req;
  1109. /* 4 for rfc1002 length field */
  1110. iov[0].iov_len = get_rfc1002_length(req) + 4;
  1111. rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
  1112. if ((rc != 0) && tcon)
  1113. cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE);
  1114. free_rsp_buf(resp_buftype, iov[0].iov_base);
  1115. return rc;
  1116. }
  1117. /*
  1118. * To form a chain of read requests, any read requests after the first should
  1119. * have the end_of_chain boolean set to true.
  1120. */
  1121. static int
  1122. smb2_new_read_req(struct kvec *iov, struct cifs_io_parms *io_parms,
  1123. unsigned int remaining_bytes, int request_type)
  1124. {
  1125. int rc = -EACCES;
  1126. struct smb2_read_req *req = NULL;
  1127. rc = small_smb2_init(SMB2_READ, io_parms->tcon, (void **) &req);
  1128. if (rc)
  1129. return rc;
  1130. if (io_parms->tcon->ses->server == NULL)
  1131. return -ECONNABORTED;
  1132. req->hdr.ProcessId = cpu_to_le32(io_parms->pid);
  1133. req->PersistentFileId = io_parms->persistent_fid;
  1134. req->VolatileFileId = io_parms->volatile_fid;
  1135. req->ReadChannelInfoOffset = 0; /* reserved */
  1136. req->ReadChannelInfoLength = 0; /* reserved */
  1137. req->Channel = 0; /* reserved */
  1138. req->MinimumCount = 0;
  1139. req->Length = cpu_to_le32(io_parms->length);
  1140. req->Offset = cpu_to_le64(io_parms->offset);
  1141. if (request_type & CHAINED_REQUEST) {
  1142. if (!(request_type & END_OF_CHAIN)) {
  1143. /* 4 for rfc1002 length field */
  1144. req->hdr.NextCommand =
  1145. cpu_to_le32(get_rfc1002_length(req) + 4);
  1146. } else /* END_OF_CHAIN */
  1147. req->hdr.NextCommand = 0;
  1148. if (request_type & RELATED_REQUEST) {
  1149. req->hdr.Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
  1150. /*
  1151. * Related requests use info from previous read request
  1152. * in chain.
  1153. */
  1154. req->hdr.SessionId = 0xFFFFFFFF;
  1155. req->hdr.TreeId = 0xFFFFFFFF;
  1156. req->PersistentFileId = 0xFFFFFFFF;
  1157. req->VolatileFileId = 0xFFFFFFFF;
  1158. }
  1159. }
  1160. if (remaining_bytes > io_parms->length)
  1161. req->RemainingBytes = cpu_to_le32(remaining_bytes);
  1162. else
  1163. req->RemainingBytes = 0;
  1164. iov[0].iov_base = (char *)req;
  1165. /* 4 for rfc1002 length field */
  1166. iov[0].iov_len = get_rfc1002_length(req) + 4;
  1167. return rc;
  1168. }
  1169. static void
  1170. smb2_readv_callback(struct mid_q_entry *mid)
  1171. {
  1172. struct cifs_readdata *rdata = mid->callback_data;
  1173. struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
  1174. struct TCP_Server_Info *server = tcon->ses->server;
  1175. struct smb2_hdr *buf = (struct smb2_hdr *)rdata->iov.iov_base;
  1176. unsigned int credits_received = 1;
  1177. struct smb_rqst rqst = { .rq_iov = &rdata->iov,
  1178. .rq_nvec = 1,
  1179. .rq_pages = rdata->pages,
  1180. .rq_npages = rdata->nr_pages,
  1181. .rq_pagesz = rdata->pagesz,
  1182. .rq_tailsz = rdata->tailsz };
  1183. cFYI(1, "%s: mid=%llu state=%d result=%d bytes=%u", __func__,
  1184. mid->mid, mid->mid_state, rdata->result, rdata->bytes);
  1185. switch (mid->mid_state) {
  1186. case MID_RESPONSE_RECEIVED:
  1187. credits_received = le16_to_cpu(buf->CreditRequest);
  1188. /* result already set, check signature */
  1189. if (server->sec_mode &
  1190. (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
  1191. int rc;
  1192. rc = smb2_verify_signature(&rqst, server);
  1193. if (rc)
  1194. cERROR(1, "SMB signature verification returned "
  1195. "error = %d", rc);
  1196. }
  1197. /* FIXME: should this be counted toward the initiating task? */
  1198. task_io_account_read(rdata->bytes);
  1199. cifs_stats_bytes_read(tcon, rdata->bytes);
  1200. break;
  1201. case MID_REQUEST_SUBMITTED:
  1202. case MID_RETRY_NEEDED:
  1203. rdata->result = -EAGAIN;
  1204. break;
  1205. default:
  1206. if (rdata->result != -ENODATA)
  1207. rdata->result = -EIO;
  1208. }
  1209. if (rdata->result)
  1210. cifs_stats_fail_inc(tcon, SMB2_READ_HE);
  1211. queue_work(cifsiod_wq, &rdata->work);
  1212. DeleteMidQEntry(mid);
  1213. add_credits(server, credits_received, 0);
  1214. }
  1215. /* smb2_async_readv - send an async write, and set up mid to handle result */
  1216. int
  1217. smb2_async_readv(struct cifs_readdata *rdata)
  1218. {
  1219. int rc;
  1220. struct smb2_hdr *buf;
  1221. struct cifs_io_parms io_parms;
  1222. struct smb_rqst rqst = { .rq_iov = &rdata->iov,
  1223. .rq_nvec = 1 };
  1224. cFYI(1, "%s: offset=%llu bytes=%u", __func__,
  1225. rdata->offset, rdata->bytes);
  1226. io_parms.tcon = tlink_tcon(rdata->cfile->tlink);
  1227. io_parms.offset = rdata->offset;
  1228. io_parms.length = rdata->bytes;
  1229. io_parms.persistent_fid = rdata->cfile->fid.persistent_fid;
  1230. io_parms.volatile_fid = rdata->cfile->fid.volatile_fid;
  1231. io_parms.pid = rdata->pid;
  1232. rc = smb2_new_read_req(&rdata->iov, &io_parms, 0, 0);
  1233. if (rc)
  1234. return rc;
  1235. buf = (struct smb2_hdr *)rdata->iov.iov_base;
  1236. /* 4 for rfc1002 length field */
  1237. rdata->iov.iov_len = get_rfc1002_length(rdata->iov.iov_base) + 4;
  1238. kref_get(&rdata->refcount);
  1239. rc = cifs_call_async(io_parms.tcon->ses->server, &rqst,
  1240. cifs_readv_receive, smb2_readv_callback,
  1241. rdata, 0);
  1242. if (rc) {
  1243. kref_put(&rdata->refcount, cifs_readdata_release);
  1244. cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
  1245. }
  1246. cifs_small_buf_release(buf);
  1247. return rc;
  1248. }
  1249. int
  1250. SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
  1251. unsigned int *nbytes, char **buf, int *buf_type)
  1252. {
  1253. int resp_buftype, rc = -EACCES;
  1254. struct smb2_read_rsp *rsp = NULL;
  1255. struct kvec iov[1];
  1256. *nbytes = 0;
  1257. rc = smb2_new_read_req(iov, io_parms, 0, 0);
  1258. if (rc)
  1259. return rc;
  1260. rc = SendReceive2(xid, io_parms->tcon->ses, iov, 1,
  1261. &resp_buftype, CIFS_LOG_ERROR);
  1262. rsp = (struct smb2_read_rsp *)iov[0].iov_base;
  1263. if (rsp->hdr.Status == STATUS_END_OF_FILE) {
  1264. free_rsp_buf(resp_buftype, iov[0].iov_base);
  1265. return 0;
  1266. }
  1267. if (rc) {
  1268. cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
  1269. cERROR(1, "Send error in read = %d", rc);
  1270. } else {
  1271. *nbytes = le32_to_cpu(rsp->DataLength);
  1272. if ((*nbytes > CIFS_MAX_MSGSIZE) ||
  1273. (*nbytes > io_parms->length)) {
  1274. cFYI(1, "bad length %d for count %d", *nbytes,
  1275. io_parms->length);
  1276. rc = -EIO;
  1277. *nbytes = 0;
  1278. }
  1279. }
  1280. if (*buf) {
  1281. memcpy(*buf, (char *)rsp->hdr.ProtocolId + rsp->DataOffset,
  1282. *nbytes);
  1283. free_rsp_buf(resp_buftype, iov[0].iov_base);
  1284. } else if (resp_buftype != CIFS_NO_BUFFER) {
  1285. *buf = iov[0].iov_base;
  1286. if (resp_buftype == CIFS_SMALL_BUFFER)
  1287. *buf_type = CIFS_SMALL_BUFFER;
  1288. else if (resp_buftype == CIFS_LARGE_BUFFER)
  1289. *buf_type = CIFS_LARGE_BUFFER;
  1290. }
  1291. return rc;
  1292. }
  1293. /*
  1294. * Check the mid_state and signature on received buffer (if any), and queue the
  1295. * workqueue completion task.
  1296. */
  1297. static void
  1298. smb2_writev_callback(struct mid_q_entry *mid)
  1299. {
  1300. struct cifs_writedata *wdata = mid->callback_data;
  1301. struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
  1302. unsigned int written;
  1303. struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
  1304. unsigned int credits_received = 1;
  1305. switch (mid->mid_state) {
  1306. case MID_RESPONSE_RECEIVED:
  1307. credits_received = le16_to_cpu(rsp->hdr.CreditRequest);
  1308. wdata->result = smb2_check_receive(mid, tcon->ses->server, 0);
  1309. if (wdata->result != 0)
  1310. break;
  1311. written = le32_to_cpu(rsp->DataLength);
  1312. /*
  1313. * Mask off high 16 bits when bytes written as returned
  1314. * by the server is greater than bytes requested by the
  1315. * client. OS/2 servers are known to set incorrect
  1316. * CountHigh values.
  1317. */
  1318. if (written > wdata->bytes)
  1319. written &= 0xFFFF;
  1320. if (written < wdata->bytes)
  1321. wdata->result = -ENOSPC;
  1322. else
  1323. wdata->bytes = written;
  1324. break;
  1325. case MID_REQUEST_SUBMITTED:
  1326. case MID_RETRY_NEEDED:
  1327. wdata->result = -EAGAIN;
  1328. break;
  1329. default:
  1330. wdata->result = -EIO;
  1331. break;
  1332. }
  1333. if (wdata->result)
  1334. cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
  1335. queue_work(cifsiod_wq, &wdata->work);
  1336. DeleteMidQEntry(mid);
  1337. add_credits(tcon->ses->server, credits_received, 0);
  1338. }
  1339. /* smb2_async_writev - send an async write, and set up mid to handle result */
  1340. int
  1341. smb2_async_writev(struct cifs_writedata *wdata)
  1342. {
  1343. int rc = -EACCES;
  1344. struct smb2_write_req *req = NULL;
  1345. struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
  1346. struct kvec iov;
  1347. struct smb_rqst rqst;
  1348. rc = small_smb2_init(SMB2_WRITE, tcon, (void **) &req);
  1349. if (rc)
  1350. goto async_writev_out;
  1351. req->hdr.ProcessId = cpu_to_le32(wdata->cfile->pid);
  1352. req->PersistentFileId = wdata->cfile->fid.persistent_fid;
  1353. req->VolatileFileId = wdata->cfile->fid.volatile_fid;
  1354. req->WriteChannelInfoOffset = 0;
  1355. req->WriteChannelInfoLength = 0;
  1356. req->Channel = 0;
  1357. req->Offset = cpu_to_le64(wdata->offset);
  1358. /* 4 for rfc1002 length field */
  1359. req->DataOffset = cpu_to_le16(
  1360. offsetof(struct smb2_write_req, Buffer) - 4);
  1361. req->RemainingBytes = 0;
  1362. /* 4 for rfc1002 length field and 1 for Buffer */
  1363. iov.iov_len = get_rfc1002_length(req) + 4 - 1;
  1364. iov.iov_base = req;
  1365. rqst.rq_iov = &iov;
  1366. rqst.rq_nvec = 1;
  1367. rqst.rq_pages = wdata->pages;
  1368. rqst.rq_npages = wdata->nr_pages;
  1369. rqst.rq_pagesz = wdata->pagesz;
  1370. rqst.rq_tailsz = wdata->tailsz;
  1371. cFYI(1, "async write at %llu %u bytes", wdata->offset, wdata->bytes);
  1372. req->Length = cpu_to_le32(wdata->bytes);
  1373. inc_rfc1001_len(&req->hdr, wdata->bytes - 1 /* Buffer */);
  1374. kref_get(&wdata->refcount);
  1375. rc = cifs_call_async(tcon->ses->server, &rqst, NULL,
  1376. smb2_writev_callback, wdata, 0);
  1377. if (rc) {
  1378. kref_put(&wdata->refcount, cifs_writedata_release);
  1379. cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
  1380. }
  1381. async_writev_out:
  1382. cifs_small_buf_release(req);
  1383. return rc;
  1384. }
  1385. /*
  1386. * SMB2_write function gets iov pointer to kvec array with n_vec as a length.
  1387. * The length field from io_parms must be at least 1 and indicates a number of
  1388. * elements with data to write that begins with position 1 in iov array. All
  1389. * data length is specified by count.
  1390. */
  1391. int
  1392. SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
  1393. unsigned int *nbytes, struct kvec *iov, int n_vec)
  1394. {
  1395. int rc = 0;
  1396. struct smb2_write_req *req = NULL;
  1397. struct smb2_write_rsp *rsp = NULL;
  1398. int resp_buftype;
  1399. *nbytes = 0;
  1400. if (n_vec < 1)
  1401. return rc;
  1402. rc = small_smb2_init(SMB2_WRITE, io_parms->tcon, (void **) &req);
  1403. if (rc)
  1404. return rc;
  1405. if (io_parms->tcon->ses->server == NULL)
  1406. return -ECONNABORTED;
  1407. req->hdr.ProcessId = cpu_to_le32(io_parms->pid);
  1408. req->PersistentFileId = io_parms->persistent_fid;
  1409. req->VolatileFileId = io_parms->volatile_fid;
  1410. req->WriteChannelInfoOffset = 0;
  1411. req->WriteChannelInfoLength = 0;
  1412. req->Channel = 0;
  1413. req->Length = cpu_to_le32(io_parms->length);
  1414. req->Offset = cpu_to_le64(io_parms->offset);
  1415. /* 4 for rfc1002 length field */
  1416. req->DataOffset = cpu_to_le16(
  1417. offsetof(struct smb2_write_req, Buffer) - 4);
  1418. req->RemainingBytes = 0;
  1419. iov[0].iov_base = (char *)req;
  1420. /* 4 for rfc1002 length field and 1 for Buffer */
  1421. iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
  1422. /* length of entire message including data to be written */
  1423. inc_rfc1001_len(req, io_parms->length - 1 /* Buffer */);
  1424. rc = SendReceive2(xid, io_parms->tcon->ses, iov, n_vec + 1,
  1425. &resp_buftype, 0);
  1426. rsp = (struct smb2_write_rsp *)iov[0].iov_base;
  1427. if (rc) {
  1428. cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE);
  1429. cERROR(1, "Send error in write = %d", rc);
  1430. } else
  1431. *nbytes = le32_to_cpu(rsp->DataLength);
  1432. free_rsp_buf(resp_buftype, rsp);
  1433. return rc;
  1434. }
  1435. static unsigned int
  1436. num_entries(char *bufstart, char *end_of_buf, char **lastentry, size_t size)
  1437. {
  1438. int len;
  1439. unsigned int entrycount = 0;
  1440. unsigned int next_offset = 0;
  1441. FILE_DIRECTORY_INFO *entryptr;
  1442. if (bufstart == NULL)
  1443. return 0;
  1444. entryptr = (FILE_DIRECTORY_INFO *)bufstart;
  1445. while (1) {
  1446. entryptr = (FILE_DIRECTORY_INFO *)
  1447. ((char *)entryptr + next_offset);
  1448. if ((char *)entryptr + size > end_of_buf) {
  1449. cERROR(1, "malformed search entry would overflow");
  1450. break;
  1451. }
  1452. len = le32_to_cpu(entryptr->FileNameLength);
  1453. if ((char *)entryptr + len + size > end_of_buf) {
  1454. cERROR(1, "directory entry name would overflow frame "
  1455. "end of buf %p", end_of_buf);
  1456. break;
  1457. }
  1458. *lastentry = (char *)entryptr;
  1459. entrycount++;
  1460. next_offset = le32_to_cpu(entryptr->NextEntryOffset);
  1461. if (!next_offset)
  1462. break;
  1463. }
  1464. return entrycount;
  1465. }
  1466. /*
  1467. * Readdir/FindFirst
  1468. */
  1469. int
  1470. SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
  1471. u64 persistent_fid, u64 volatile_fid, int index,
  1472. struct cifs_search_info *srch_inf)
  1473. {
  1474. struct smb2_query_directory_req *req;
  1475. struct smb2_query_directory_rsp *rsp = NULL;
  1476. struct kvec iov[2];
  1477. int rc = 0;
  1478. int len;
  1479. int resp_buftype;
  1480. unsigned char *bufptr;
  1481. struct TCP_Server_Info *server;
  1482. struct cifs_ses *ses = tcon->ses;
  1483. __le16 asteriks = cpu_to_le16('*');
  1484. char *end_of_smb;
  1485. unsigned int output_size = CIFSMaxBufSize;
  1486. size_t info_buf_size;
  1487. if (ses && (ses->server))
  1488. server = ses->server;
  1489. else
  1490. return -EIO;
  1491. rc = small_smb2_init(SMB2_QUERY_DIRECTORY, tcon, (void **) &req);
  1492. if (rc)
  1493. return rc;
  1494. switch (srch_inf->info_level) {
  1495. case SMB_FIND_FILE_DIRECTORY_INFO:
  1496. req->FileInformationClass = FILE_DIRECTORY_INFORMATION;
  1497. info_buf_size = sizeof(FILE_DIRECTORY_INFO) - 1;
  1498. break;
  1499. case SMB_FIND_FILE_ID_FULL_DIR_INFO:
  1500. req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION;
  1501. info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO) - 1;
  1502. break;
  1503. default:
  1504. cERROR(1, "info level %u isn't supported",
  1505. srch_inf->info_level);
  1506. rc = -EINVAL;
  1507. goto qdir_exit;
  1508. }
  1509. req->FileIndex = cpu_to_le32(index);
  1510. req->PersistentFileId = persistent_fid;
  1511. req->VolatileFileId = volatile_fid;
  1512. len = 0x2;
  1513. bufptr = req->Buffer;
  1514. memcpy(bufptr, &asteriks, len);
  1515. req->FileNameOffset =
  1516. cpu_to_le16(sizeof(struct smb2_query_directory_req) - 1 - 4);
  1517. req->FileNameLength = cpu_to_le16(len);
  1518. /*
  1519. * BB could be 30 bytes or so longer if we used SMB2 specific
  1520. * buffer lengths, but this is safe and close enough.
  1521. */
  1522. output_size = min_t(unsigned int, output_size, server->maxBuf);
  1523. output_size = min_t(unsigned int, output_size, 2 << 15);
  1524. req->OutputBufferLength = cpu_to_le32(output_size);
  1525. iov[0].iov_base = (char *)req;
  1526. /* 4 for RFC1001 length and 1 for Buffer */
  1527. iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
  1528. iov[1].iov_base = (char *)(req->Buffer);
  1529. iov[1].iov_len = len;
  1530. inc_rfc1001_len(req, len - 1 /* Buffer */);
  1531. rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, 0);
  1532. rsp = (struct smb2_query_directory_rsp *)iov[0].iov_base;
  1533. if (rc) {
  1534. cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
  1535. goto qdir_exit;
  1536. }
  1537. rc = validate_buf(le16_to_cpu(rsp->OutputBufferOffset),
  1538. le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr,
  1539. info_buf_size);
  1540. if (rc)
  1541. goto qdir_exit;
  1542. srch_inf->unicode = true;
  1543. if (srch_inf->ntwrk_buf_start) {
  1544. if (srch_inf->smallBuf)
  1545. cifs_small_buf_release(srch_inf->ntwrk_buf_start);
  1546. else
  1547. cifs_buf_release(srch_inf->ntwrk_buf_start);
  1548. }
  1549. srch_inf->ntwrk_buf_start = (char *)rsp;
  1550. srch_inf->srch_entries_start = srch_inf->last_entry = 4 /* rfclen */ +
  1551. (char *)&rsp->hdr + le16_to_cpu(rsp->OutputBufferOffset);
  1552. /* 4 for rfc1002 length field */
  1553. end_of_smb = get_rfc1002_length(rsp) + 4 + (char *)&rsp->hdr;
  1554. srch_inf->entries_in_buffer =
  1555. num_entries(srch_inf->srch_entries_start, end_of_smb,
  1556. &srch_inf->last_entry, info_buf_size);
  1557. srch_inf->index_of_last_entry += srch_inf->entries_in_buffer;
  1558. cFYI(1, "num entries %d last_index %lld srch start %p srch end %p",
  1559. srch_inf->entries_in_buffer, srch_inf->index_of_last_entry,
  1560. srch_inf->srch_entries_start, srch_inf->last_entry);
  1561. if (resp_buftype == CIFS_LARGE_BUFFER)
  1562. srch_inf->smallBuf = false;
  1563. else if (resp_buftype == CIFS_SMALL_BUFFER)
  1564. srch_inf->smallBuf = true;
  1565. else
  1566. cERROR(1, "illegal search buffer type");
  1567. if (rsp->hdr.Status == STATUS_NO_MORE_FILES)
  1568. srch_inf->endOfSearch = 1;
  1569. else
  1570. srch_inf->endOfSearch = 0;
  1571. return rc;
  1572. qdir_exit:
  1573. free_rsp_buf(resp_buftype, rsp);
  1574. return rc;
  1575. }
  1576. static int
  1577. send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
  1578. u64 persistent_fid, u64 volatile_fid, u32 pid, int info_class,
  1579. unsigned int num, void **data, unsigned int *size)
  1580. {
  1581. struct smb2_set_info_req *req;
  1582. struct smb2_set_info_rsp *rsp = NULL;
  1583. struct kvec *iov;
  1584. int rc = 0;
  1585. int resp_buftype;
  1586. unsigned int i;
  1587. struct TCP_Server_Info *server;
  1588. struct cifs_ses *ses = tcon->ses;
  1589. if (ses && (ses->server))
  1590. server = ses->server;
  1591. else
  1592. return -EIO;
  1593. if (!num)
  1594. return -EINVAL;
  1595. iov = kmalloc(sizeof(struct kvec) * num, GFP_KERNEL);
  1596. if (!iov)
  1597. return -ENOMEM;
  1598. rc = small_smb2_init(SMB2_SET_INFO, tcon, (void **) &req);
  1599. if (rc) {
  1600. kfree(iov);
  1601. return rc;
  1602. }
  1603. req->hdr.ProcessId = cpu_to_le32(pid);
  1604. req->InfoType = SMB2_O_INFO_FILE;
  1605. req->FileInfoClass = info_class;
  1606. req->PersistentFileId = persistent_fid;
  1607. req->VolatileFileId = volatile_fid;
  1608. /* 4 for RFC1001 length and 1 for Buffer */
  1609. req->BufferOffset =
  1610. cpu_to_le16(sizeof(struct smb2_set_info_req) - 1 - 4);
  1611. req->BufferLength = cpu_to_le32(*size);
  1612. inc_rfc1001_len(req, *size - 1 /* Buffer */);
  1613. memcpy(req->Buffer, *data, *size);
  1614. iov[0].iov_base = (char *)req;
  1615. /* 4 for RFC1001 length */
  1616. iov[0].iov_len = get_rfc1002_length(req) + 4;
  1617. for (i = 1; i < num; i++) {
  1618. inc_rfc1001_len(req, size[i]);
  1619. le32_add_cpu(&req->BufferLength, size[i]);
  1620. iov[i].iov_base = (char *)data[i];
  1621. iov[i].iov_len = size[i];
  1622. }
  1623. rc = SendReceive2(xid, ses, iov, num, &resp_buftype, 0);
  1624. rsp = (struct smb2_set_info_rsp *)iov[0].iov_base;
  1625. if (rc != 0) {
  1626. cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE);
  1627. goto out;
  1628. }
  1629. out:
  1630. free_rsp_buf(resp_buftype, rsp);
  1631. kfree(iov);
  1632. return rc;
  1633. }
  1634. int
  1635. SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
  1636. u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
  1637. {
  1638. struct smb2_file_rename_info info;
  1639. void **data;
  1640. unsigned int size[2];
  1641. int rc;
  1642. int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX));
  1643. data = kmalloc(sizeof(void *) * 2, GFP_KERNEL);
  1644. if (!data)
  1645. return -ENOMEM;
  1646. info.ReplaceIfExists = 1; /* 1 = replace existing target with new */
  1647. /* 0 = fail if target already exists */
  1648. info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */
  1649. info.FileNameLength = cpu_to_le32(len);
  1650. data[0] = &info;
  1651. size[0] = sizeof(struct smb2_file_rename_info);
  1652. data[1] = target_file;
  1653. size[1] = len + 2 /* null */;
  1654. rc = send_set_info(xid, tcon, persistent_fid, volatile_fid,
  1655. current->tgid, FILE_RENAME_INFORMATION, 2, data,
  1656. size);
  1657. kfree(data);
  1658. return rc;
  1659. }
  1660. int
  1661. SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
  1662. u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
  1663. {
  1664. struct smb2_file_link_info info;
  1665. void **data;
  1666. unsigned int size[2];
  1667. int rc;
  1668. int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX));
  1669. data = kmalloc(sizeof(void *) * 2, GFP_KERNEL);
  1670. if (!data)
  1671. return -ENOMEM;
  1672. info.ReplaceIfExists = 0; /* 1 = replace existing link with new */
  1673. /* 0 = fail if link already exists */
  1674. info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */
  1675. info.FileNameLength = cpu_to_le32(len);
  1676. data[0] = &info;
  1677. size[0] = sizeof(struct smb2_file_link_info);
  1678. data[1] = target_file;
  1679. size[1] = len + 2 /* null */;
  1680. rc = send_set_info(xid, tcon, persistent_fid, volatile_fid,
  1681. current->tgid, FILE_LINK_INFORMATION, 2, data, size);
  1682. kfree(data);
  1683. return rc;
  1684. }
  1685. int
  1686. SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
  1687. u64 volatile_fid, u32 pid, __le64 *eof)
  1688. {
  1689. struct smb2_file_eof_info info;
  1690. void *data;
  1691. unsigned int size;
  1692. info.EndOfFile = *eof;
  1693. data = &info;
  1694. size = sizeof(struct smb2_file_eof_info);
  1695. return send_set_info(xid, tcon, persistent_fid, volatile_fid, pid,
  1696. FILE_END_OF_FILE_INFORMATION, 1, &data, &size);
  1697. }
  1698. int
  1699. SMB2_set_info(const unsigned int xid, struct cifs_tcon *tcon,
  1700. u64 persistent_fid, u64 volatile_fid, FILE_BASIC_INFO *buf)
  1701. {
  1702. unsigned int size;
  1703. size = sizeof(FILE_BASIC_INFO);
  1704. return send_set_info(xid, tcon, persistent_fid, volatile_fid,
  1705. current->tgid, FILE_BASIC_INFORMATION, 1,
  1706. (void **)&buf, &size);
  1707. }
  1708. int
  1709. SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
  1710. const u64 persistent_fid, const u64 volatile_fid,
  1711. __u8 oplock_level)
  1712. {
  1713. int rc;
  1714. struct smb2_oplock_break *req = NULL;
  1715. cFYI(1, "SMB2_oplock_break");
  1716. rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req);
  1717. if (rc)
  1718. return rc;
  1719. req->VolatileFid = volatile_fid;
  1720. req->PersistentFid = persistent_fid;
  1721. req->OplockLevel = oplock_level;
  1722. req->hdr.CreditRequest = cpu_to_le16(1);
  1723. rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, CIFS_OBREAK_OP);
  1724. /* SMB2 buffer freed by function above */
  1725. if (rc) {
  1726. cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
  1727. cFYI(1, "Send error in Oplock Break = %d", rc);
  1728. }
  1729. return rc;
  1730. }
  1731. static void
  1732. copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
  1733. struct kstatfs *kst)
  1734. {
  1735. kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
  1736. le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
  1737. kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
  1738. kst->f_bfree = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits);
  1739. kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
  1740. return;
  1741. }
  1742. static int
  1743. build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, int level,
  1744. int outbuf_len, u64 persistent_fid, u64 volatile_fid)
  1745. {
  1746. int rc;
  1747. struct smb2_query_info_req *req;
  1748. cFYI(1, "Query FSInfo level %d", level);
  1749. if ((tcon->ses == NULL) || (tcon->ses->server == NULL))
  1750. return -EIO;
  1751. rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req);
  1752. if (rc)
  1753. return rc;
  1754. req->InfoType = SMB2_O_INFO_FILESYSTEM;
  1755. req->FileInfoClass = level;
  1756. req->PersistentFileId = persistent_fid;
  1757. req->VolatileFileId = volatile_fid;
  1758. /* 4 for rfc1002 length field and 1 for pad */
  1759. req->InputBufferOffset =
  1760. cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4);
  1761. req->OutputBufferLength = cpu_to_le32(
  1762. outbuf_len + sizeof(struct smb2_query_info_rsp) - 1 - 4);
  1763. iov->iov_base = (char *)req;
  1764. /* 4 for rfc1002 length field */
  1765. iov->iov_len = get_rfc1002_length(req) + 4;
  1766. return 0;
  1767. }
  1768. int
  1769. SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
  1770. u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
  1771. {
  1772. struct smb2_query_info_rsp *rsp = NULL;
  1773. struct kvec iov;
  1774. int rc = 0;
  1775. int resp_buftype;
  1776. struct cifs_ses *ses = tcon->ses;
  1777. struct smb2_fs_full_size_info *info = NULL;
  1778. rc = build_qfs_info_req(&iov, tcon, FS_FULL_SIZE_INFORMATION,
  1779. sizeof(struct smb2_fs_full_size_info),
  1780. persistent_fid, volatile_fid);
  1781. if (rc)
  1782. return rc;
  1783. rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, 0);
  1784. if (rc) {
  1785. cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
  1786. goto qinf_exit;
  1787. }
  1788. rsp = (struct smb2_query_info_rsp *)iov.iov_base;
  1789. info = (struct smb2_fs_full_size_info *)(4 /* RFC1001 len */ +
  1790. le16_to_cpu(rsp->OutputBufferOffset) + (char *)&rsp->hdr);
  1791. rc = validate_buf(le16_to_cpu(rsp->OutputBufferOffset),
  1792. le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr,
  1793. sizeof(struct smb2_fs_full_size_info));
  1794. if (!rc)
  1795. copy_fs_info_to_kstatfs(info, fsdata);
  1796. qinf_exit:
  1797. free_rsp_buf(resp_buftype, iov.iov_base);
  1798. return rc;
  1799. }
  1800. int
  1801. smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
  1802. const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
  1803. const __u32 num_lock, struct smb2_lock_element *buf)
  1804. {
  1805. int rc = 0;
  1806. struct smb2_lock_req *req = NULL;
  1807. struct kvec iov[2];
  1808. int resp_buf_type;
  1809. unsigned int count;
  1810. cFYI(1, "smb2_lockv num lock %d", num_lock);
  1811. rc = small_smb2_init(SMB2_LOCK, tcon, (void **) &req);
  1812. if (rc)
  1813. return rc;
  1814. req->hdr.ProcessId = cpu_to_le32(pid);
  1815. req->LockCount = cpu_to_le16(num_lock);
  1816. req->PersistentFileId = persist_fid;
  1817. req->VolatileFileId = volatile_fid;
  1818. count = num_lock * sizeof(struct smb2_lock_element);
  1819. inc_rfc1001_len(req, count - sizeof(struct smb2_lock_element));
  1820. iov[0].iov_base = (char *)req;
  1821. /* 4 for rfc1002 length field and count for all locks */
  1822. iov[0].iov_len = get_rfc1002_length(req) + 4 - count;
  1823. iov[1].iov_base = (char *)buf;
  1824. iov[1].iov_len = count;
  1825. cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
  1826. rc = SendReceive2(xid, tcon->ses, iov, 2, &resp_buf_type, CIFS_NO_RESP);
  1827. if (rc) {
  1828. cFYI(1, "Send error in smb2_lockv = %d", rc);
  1829. cifs_stats_fail_inc(tcon, SMB2_LOCK_HE);
  1830. }
  1831. return rc;
  1832. }
  1833. int
  1834. SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon,
  1835. const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
  1836. const __u64 length, const __u64 offset, const __u32 lock_flags,
  1837. const bool wait)
  1838. {
  1839. struct smb2_lock_element lock;
  1840. lock.Offset = cpu_to_le64(offset);
  1841. lock.Length = cpu_to_le64(length);
  1842. lock.Flags = cpu_to_le32(lock_flags);
  1843. if (!wait && lock_flags != SMB2_LOCKFLAG_UNLOCK)
  1844. lock.Flags |= cpu_to_le32(SMB2_LOCKFLAG_FAIL_IMMEDIATELY);
  1845. return smb2_lockv(xid, tcon, persist_fid, volatile_fid, pid, 1, &lock);
  1846. }
  1847. int
  1848. SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
  1849. __u8 *lease_key, const __le32 lease_state)
  1850. {
  1851. int rc;
  1852. struct smb2_lease_ack *req = NULL;
  1853. cFYI(1, "SMB2_lease_break");
  1854. rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req);
  1855. if (rc)
  1856. return rc;
  1857. req->hdr.CreditRequest = cpu_to_le16(1);
  1858. req->StructureSize = cpu_to_le16(36);
  1859. inc_rfc1001_len(req, 12);
  1860. memcpy(req->LeaseKey, lease_key, 16);
  1861. req->LeaseState = lease_state;
  1862. rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, CIFS_OBREAK_OP);
  1863. /* SMB2 buffer freed by function above */
  1864. if (rc) {
  1865. cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
  1866. cFYI(1, "Send error in Lease Break = %d", rc);
  1867. }
  1868. return rc;
  1869. }