smb2pdu.c 59 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210
  1. /*
  2. * fs/cifs/smb2pdu.c
  3. *
  4. * Copyright (C) International Business Machines Corp., 2009, 2012
  5. * Etersoft, 2012
  6. * Author(s): Steve French (sfrench@us.ibm.com)
  7. * Pavel Shilovsky (pshilovsky@samba.org) 2012
  8. *
  9. * Contains the routines for constructing the SMB2 PDUs themselves
  10. *
  11. * This library is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU Lesser General Public License as published
  13. * by the Free Software Foundation; either version 2.1 of the License, or
  14. * (at your option) any later version.
  15. *
  16. * This library is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
  19. * the GNU Lesser General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU Lesser General Public License
  22. * along with this library; if not, write to the Free Software
  23. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  24. */
  25. /* SMB2 PDU handling routines here - except for leftovers (eg session setup) */
  26. /* Note that there are handle based routines which must be */
  27. /* treated slightly differently for reconnection purposes since we never */
  28. /* want to reuse a stale file handle and only the caller knows the file info */
  29. #include <linux/fs.h>
  30. #include <linux/kernel.h>
  31. #include <linux/vfs.h>
  32. #include <linux/task_io_accounting_ops.h>
  33. #include <linux/uaccess.h>
  34. #include <linux/pagemap.h>
  35. #include <linux/xattr.h>
  36. #include "smb2pdu.h"
  37. #include "cifsglob.h"
  38. #include "cifsacl.h"
  39. #include "cifsproto.h"
  40. #include "smb2proto.h"
  41. #include "cifs_unicode.h"
  42. #include "cifs_debug.h"
  43. #include "ntlmssp.h"
  44. #include "smb2status.h"
  45. #include "smb2glob.h"
  46. #include "cifspdu.h"
  47. /*
  48. * The following table defines the expected "StructureSize" of SMB2 requests
  49. * in order by SMB2 command. This is similar to "wct" in SMB/CIFS requests.
  50. *
  51. * Note that commands are defined in smb2pdu.h in le16 but the array below is
  52. * indexed by command in host byte order.
  53. */
  54. static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
  55. /* SMB2_NEGOTIATE */ 36,
  56. /* SMB2_SESSION_SETUP */ 25,
  57. /* SMB2_LOGOFF */ 4,
  58. /* SMB2_TREE_CONNECT */ 9,
  59. /* SMB2_TREE_DISCONNECT */ 4,
  60. /* SMB2_CREATE */ 57,
  61. /* SMB2_CLOSE */ 24,
  62. /* SMB2_FLUSH */ 24,
  63. /* SMB2_READ */ 49,
  64. /* SMB2_WRITE */ 49,
  65. /* SMB2_LOCK */ 48,
  66. /* SMB2_IOCTL */ 57,
  67. /* SMB2_CANCEL */ 4,
  68. /* SMB2_ECHO */ 4,
  69. /* SMB2_QUERY_DIRECTORY */ 33,
  70. /* SMB2_CHANGE_NOTIFY */ 32,
  71. /* SMB2_QUERY_INFO */ 41,
  72. /* SMB2_SET_INFO */ 33,
  73. /* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */
  74. };
  75. static void
  76. smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ ,
  77. const struct cifs_tcon *tcon)
  78. {
  79. struct smb2_pdu *pdu = (struct smb2_pdu *)hdr;
  80. char *temp = (char *)hdr;
  81. /* lookup word count ie StructureSize from table */
  82. __u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_cmd)];
  83. /*
  84. * smaller than SMALL_BUFFER_SIZE but bigger than fixed area of
  85. * largest operations (Create)
  86. */
  87. memset(temp, 0, 256);
  88. /* Note this is only network field converted to big endian */
  89. hdr->smb2_buf_length = cpu_to_be32(parmsize + sizeof(struct smb2_hdr)
  90. - 4 /* RFC 1001 length field itself not counted */);
  91. hdr->ProtocolId[0] = 0xFE;
  92. hdr->ProtocolId[1] = 'S';
  93. hdr->ProtocolId[2] = 'M';
  94. hdr->ProtocolId[3] = 'B';
  95. hdr->StructureSize = cpu_to_le16(64);
  96. hdr->Command = smb2_cmd;
  97. hdr->CreditRequest = cpu_to_le16(2); /* BB make this dynamic */
  98. hdr->ProcessId = cpu_to_le32((__u16)current->tgid);
  99. if (!tcon)
  100. goto out;
  101. hdr->TreeId = tcon->tid;
  102. /* Uid is not converted */
  103. if (tcon->ses)
  104. hdr->SessionId = tcon->ses->Suid;
  105. /* BB check following DFS flags BB */
  106. /* BB do we have to add check for SHI1005_FLAGS_DFS_ROOT too? */
  107. if (tcon->share_flags & SHI1005_FLAGS_DFS)
  108. hdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS;
  109. /* BB how does SMB2 do case sensitive? */
  110. /* if (tcon->nocase)
  111. hdr->Flags |= SMBFLG_CASELESS; */
  112. if (tcon->ses && tcon->ses->server &&
  113. (tcon->ses->server->sec_mode & SECMODE_SIGN_REQUIRED))
  114. hdr->Flags |= SMB2_FLAGS_SIGNED;
  115. out:
  116. pdu->StructureSize2 = cpu_to_le16(parmsize);
  117. return;
  118. }
  119. static int
  120. smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
  121. {
  122. int rc = 0;
  123. struct nls_table *nls_codepage;
  124. struct cifs_ses *ses;
  125. struct TCP_Server_Info *server;
  126. /*
  127. * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so
  128. * check for tcp and smb session status done differently
  129. * for those three - in the calling routine.
  130. */
  131. if (tcon == NULL)
  132. return rc;
  133. if (smb2_command == SMB2_TREE_CONNECT)
  134. return rc;
  135. if (tcon->tidStatus == CifsExiting) {
  136. /*
  137. * only tree disconnect, open, and write,
  138. * (and ulogoff which does not have tcon)
  139. * are allowed as we start force umount.
  140. */
  141. if ((smb2_command != SMB2_WRITE) &&
  142. (smb2_command != SMB2_CREATE) &&
  143. (smb2_command != SMB2_TREE_DISCONNECT)) {
  144. cFYI(1, "can not send cmd %d while umounting",
  145. smb2_command);
  146. return -ENODEV;
  147. }
  148. }
  149. if ((!tcon->ses) || (tcon->ses->status == CifsExiting) ||
  150. (!tcon->ses->server))
  151. return -EIO;
  152. ses = tcon->ses;
  153. server = ses->server;
  154. /*
  155. * Give demultiplex thread up to 10 seconds to reconnect, should be
  156. * greater than cifs socket timeout which is 7 seconds
  157. */
  158. while (server->tcpStatus == CifsNeedReconnect) {
  159. /*
  160. * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE
  161. * here since they are implicitly done when session drops.
  162. */
  163. switch (smb2_command) {
  164. /*
  165. * BB Should we keep oplock break and add flush to exceptions?
  166. */
  167. case SMB2_TREE_DISCONNECT:
  168. case SMB2_CANCEL:
  169. case SMB2_CLOSE:
  170. case SMB2_OPLOCK_BREAK:
  171. return -EAGAIN;
  172. }
  173. wait_event_interruptible_timeout(server->response_q,
  174. (server->tcpStatus != CifsNeedReconnect), 10 * HZ);
  175. /* are we still trying to reconnect? */
  176. if (server->tcpStatus != CifsNeedReconnect)
  177. break;
  178. /*
  179. * on "soft" mounts we wait once. Hard mounts keep
  180. * retrying until process is killed or server comes
  181. * back on-line
  182. */
  183. if (!tcon->retry) {
  184. cFYI(1, "gave up waiting on reconnect in smb_init");
  185. return -EHOSTDOWN;
  186. }
  187. }
  188. if (!tcon->ses->need_reconnect && !tcon->need_reconnect)
  189. return rc;
  190. nls_codepage = load_nls_default();
  191. /*
  192. * need to prevent multiple threads trying to simultaneously reconnect
  193. * the same SMB session
  194. */
  195. mutex_lock(&tcon->ses->session_mutex);
  196. rc = cifs_negotiate_protocol(0, tcon->ses);
  197. if (!rc && tcon->ses->need_reconnect)
  198. rc = cifs_setup_session(0, tcon->ses, nls_codepage);
  199. if (rc || !tcon->need_reconnect) {
  200. mutex_unlock(&tcon->ses->session_mutex);
  201. goto out;
  202. }
  203. cifs_mark_open_files_invalid(tcon);
  204. rc = SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nls_codepage);
  205. mutex_unlock(&tcon->ses->session_mutex);
  206. cFYI(1, "reconnect tcon rc = %d", rc);
  207. if (rc)
  208. goto out;
  209. atomic_inc(&tconInfoReconnectCount);
  210. /*
  211. * BB FIXME add code to check if wsize needs update due to negotiated
  212. * smb buffer size shrinking.
  213. */
  214. out:
  215. /*
  216. * Check if handle based operation so we know whether we can continue
  217. * or not without returning to caller to reset file handle.
  218. */
  219. /*
  220. * BB Is flush done by server on drop of tcp session? Should we special
  221. * case it and skip above?
  222. */
  223. switch (smb2_command) {
  224. case SMB2_FLUSH:
  225. case SMB2_READ:
  226. case SMB2_WRITE:
  227. case SMB2_LOCK:
  228. case SMB2_IOCTL:
  229. case SMB2_QUERY_DIRECTORY:
  230. case SMB2_CHANGE_NOTIFY:
  231. case SMB2_QUERY_INFO:
  232. case SMB2_SET_INFO:
  233. return -EAGAIN;
  234. }
  235. unload_nls(nls_codepage);
  236. return rc;
  237. }
  238. /*
  239. * Allocate and return pointer to an SMB request hdr, and set basic
  240. * SMB information in the SMB header. If the return code is zero, this
  241. * function must have filled in request_buf pointer.
  242. */
  243. static int
  244. small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon,
  245. void **request_buf)
  246. {
  247. int rc = 0;
  248. rc = smb2_reconnect(smb2_command, tcon);
  249. if (rc)
  250. return rc;
  251. /* BB eventually switch this to SMB2 specific small buf size */
  252. *request_buf = cifs_small_buf_get();
  253. if (*request_buf == NULL) {
  254. /* BB should we add a retry in here if not a writepage? */
  255. return -ENOMEM;
  256. }
  257. smb2_hdr_assemble((struct smb2_hdr *) *request_buf, smb2_command, tcon);
  258. if (tcon != NULL) {
  259. #ifdef CONFIG_CIFS_STATS2
  260. uint16_t com_code = le16_to_cpu(smb2_command);
  261. cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
  262. #endif
  263. cifs_stats_inc(&tcon->num_smbs_sent);
  264. }
  265. return rc;
  266. }
  267. static void
  268. free_rsp_buf(int resp_buftype, void *rsp)
  269. {
  270. if (resp_buftype == CIFS_SMALL_BUFFER)
  271. cifs_small_buf_release(rsp);
  272. else if (resp_buftype == CIFS_LARGE_BUFFER)
  273. cifs_buf_release(rsp);
  274. }
  275. /*
  276. *
  277. * SMB2 Worker functions follow:
  278. *
  279. * The general structure of the worker functions is:
  280. * 1) Call smb2_init (assembles SMB2 header)
  281. * 2) Initialize SMB2 command specific fields in fixed length area of SMB
  282. * 3) Call smb_sendrcv2 (sends request on socket and waits for response)
  283. * 4) Decode SMB2 command specific fields in the fixed length area
  284. * 5) Decode variable length data area (if any for this SMB2 command type)
  285. * 6) Call free smb buffer
  286. * 7) return
  287. *
  288. */
  289. int
  290. SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
  291. {
  292. struct smb2_negotiate_req *req;
  293. struct smb2_negotiate_rsp *rsp;
  294. struct kvec iov[1];
  295. int rc = 0;
  296. int resp_buftype;
  297. struct TCP_Server_Info *server;
  298. unsigned int sec_flags;
  299. u16 temp = 0;
  300. int blob_offset, blob_length;
  301. char *security_blob;
  302. int flags = CIFS_NEG_OP;
  303. cFYI(1, "Negotiate protocol");
  304. if (ses->server)
  305. server = ses->server;
  306. else {
  307. rc = -EIO;
  308. return rc;
  309. }
  310. rc = small_smb2_init(SMB2_NEGOTIATE, NULL, (void **) &req);
  311. if (rc)
  312. return rc;
  313. /* if any of auth flags (ie not sign or seal) are overriden use them */
  314. if (ses->overrideSecFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL)))
  315. sec_flags = ses->overrideSecFlg; /* BB FIXME fix sign flags?*/
  316. else /* if override flags set only sign/seal OR them with global auth */
  317. sec_flags = global_secflags | ses->overrideSecFlg;
  318. cFYI(1, "sec_flags 0x%x", sec_flags);
  319. req->hdr.SessionId = 0;
  320. req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id);
  321. req->DialectCount = cpu_to_le16(1); /* One vers= at a time for now */
  322. inc_rfc1001_len(req, 2);
  323. /* only one of SMB2 signing flags may be set in SMB2 request */
  324. if ((sec_flags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN)
  325. temp = SMB2_NEGOTIATE_SIGNING_REQUIRED;
  326. else if (sec_flags & CIFSSEC_MAY_SIGN) /* MAY_SIGN is a single flag */
  327. temp = SMB2_NEGOTIATE_SIGNING_ENABLED;
  328. req->SecurityMode = cpu_to_le16(temp);
  329. req->Capabilities = cpu_to_le32(ses->server->vals->req_capabilities);
  330. memcpy(req->ClientGUID, cifs_client_guid, SMB2_CLIENT_GUID_SIZE);
  331. iov[0].iov_base = (char *)req;
  332. /* 4 for rfc1002 length field */
  333. iov[0].iov_len = get_rfc1002_length(req) + 4;
  334. rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags);
  335. rsp = (struct smb2_negotiate_rsp *)iov[0].iov_base;
  336. /*
  337. * No tcon so can't do
  338. * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
  339. */
  340. if (rc != 0)
  341. goto neg_exit;
  342. cFYI(1, "mode 0x%x", rsp->SecurityMode);
  343. /* BB we may eventually want to match the negotiated vs. requested
  344. dialect, even though we are only requesting one at a time */
  345. if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID))
  346. cFYI(1, "negotiated smb2.0 dialect");
  347. else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID))
  348. cFYI(1, "negotiated smb2.1 dialect");
  349. else if (rsp->DialectRevision == cpu_to_le16(SMB30_PROT_ID))
  350. cFYI(1, "negotiated smb3.0 dialect");
  351. else {
  352. cERROR(1, "Illegal dialect returned by server %d",
  353. le16_to_cpu(rsp->DialectRevision));
  354. rc = -EIO;
  355. goto neg_exit;
  356. }
  357. server->dialect = le16_to_cpu(rsp->DialectRevision);
  358. server->maxBuf = le32_to_cpu(rsp->MaxTransactSize);
  359. server->max_read = le32_to_cpu(rsp->MaxReadSize);
  360. server->max_write = le32_to_cpu(rsp->MaxWriteSize);
  361. /* BB Do we need to validate the SecurityMode? */
  362. server->sec_mode = le16_to_cpu(rsp->SecurityMode);
  363. server->capabilities = le32_to_cpu(rsp->Capabilities);
  364. /* Internal types */
  365. server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES;
  366. security_blob = smb2_get_data_area_len(&blob_offset, &blob_length,
  367. &rsp->hdr);
  368. if (blob_length == 0) {
  369. cERROR(1, "missing security blob on negprot");
  370. rc = -EIO;
  371. goto neg_exit;
  372. }
  373. cFYI(1, "sec_flags 0x%x", sec_flags);
  374. if (sec_flags & CIFSSEC_MUST_SIGN) {
  375. cFYI(1, "Signing required");
  376. if (!(server->sec_mode & (SMB2_NEGOTIATE_SIGNING_REQUIRED |
  377. SMB2_NEGOTIATE_SIGNING_ENABLED))) {
  378. cERROR(1, "signing required but server lacks support");
  379. rc = -EOPNOTSUPP;
  380. goto neg_exit;
  381. }
  382. server->sec_mode |= SECMODE_SIGN_REQUIRED;
  383. } else if (sec_flags & CIFSSEC_MAY_SIGN) {
  384. cFYI(1, "Signing optional");
  385. if (server->sec_mode & SMB2_NEGOTIATE_SIGNING_REQUIRED) {
  386. cFYI(1, "Server requires signing");
  387. server->sec_mode |= SECMODE_SIGN_REQUIRED;
  388. } else {
  389. server->sec_mode &=
  390. ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
  391. }
  392. } else {
  393. cFYI(1, "Signing disabled");
  394. if (server->sec_mode & SMB2_NEGOTIATE_SIGNING_REQUIRED) {
  395. cERROR(1, "Server requires packet signing to be enabled"
  396. " in /proc/fs/cifs/SecurityFlags.");
  397. rc = -EOPNOTSUPP;
  398. goto neg_exit;
  399. }
  400. server->sec_mode &=
  401. ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
  402. }
  403. #ifdef CONFIG_SMB2_ASN1 /* BB REMOVEME when updated asn1.c ready */
  404. rc = decode_neg_token_init(security_blob, blob_length,
  405. &server->sec_type);
  406. if (rc == 1)
  407. rc = 0;
  408. else if (rc == 0) {
  409. rc = -EIO;
  410. goto neg_exit;
  411. }
  412. #endif
  413. neg_exit:
  414. free_rsp_buf(resp_buftype, rsp);
  415. return rc;
  416. }
  417. int
  418. SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
  419. const struct nls_table *nls_cp)
  420. {
  421. struct smb2_sess_setup_req *req;
  422. struct smb2_sess_setup_rsp *rsp = NULL;
  423. struct kvec iov[2];
  424. int rc = 0;
  425. int resp_buftype;
  426. __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
  427. struct TCP_Server_Info *server;
  428. unsigned int sec_flags;
  429. u8 temp = 0;
  430. u16 blob_length = 0;
  431. char *security_blob;
  432. char *ntlmssp_blob = NULL;
  433. bool use_spnego = false; /* else use raw ntlmssp */
  434. cFYI(1, "Session Setup");
  435. if (ses->server)
  436. server = ses->server;
  437. else {
  438. rc = -EIO;
  439. return rc;
  440. }
  441. /*
  442. * If memory allocation is successful, caller of this function
  443. * frees it.
  444. */
  445. ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL);
  446. if (!ses->ntlmssp)
  447. return -ENOMEM;
  448. ses->server->secType = RawNTLMSSP;
  449. ssetup_ntlmssp_authenticate:
  450. if (phase == NtLmChallenge)
  451. phase = NtLmAuthenticate; /* if ntlmssp, now final phase */
  452. rc = small_smb2_init(SMB2_SESSION_SETUP, NULL, (void **) &req);
  453. if (rc)
  454. return rc;
  455. /* if any of auth flags (ie not sign or seal) are overriden use them */
  456. if (ses->overrideSecFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL)))
  457. sec_flags = ses->overrideSecFlg; /* BB FIXME fix sign flags?*/
  458. else /* if override flags set only sign/seal OR them with global auth */
  459. sec_flags = global_secflags | ses->overrideSecFlg;
  460. cFYI(1, "sec_flags 0x%x", sec_flags);
  461. req->hdr.SessionId = 0; /* First session, not a reauthenticate */
  462. req->VcNumber = 0; /* MBZ */
  463. /* to enable echos and oplocks */
  464. req->hdr.CreditRequest = cpu_to_le16(3);
  465. /* only one of SMB2 signing flags may be set in SMB2 request */
  466. if ((sec_flags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN)
  467. temp = SMB2_NEGOTIATE_SIGNING_REQUIRED;
  468. else if (ses->server->sec_mode & SMB2_NEGOTIATE_SIGNING_REQUIRED)
  469. temp = SMB2_NEGOTIATE_SIGNING_REQUIRED;
  470. else if (sec_flags & CIFSSEC_MAY_SIGN) /* MAY_SIGN is a single flag */
  471. temp = SMB2_NEGOTIATE_SIGNING_ENABLED;
  472. req->SecurityMode = temp;
  473. req->Capabilities = 0;
  474. req->Channel = 0; /* MBZ */
  475. iov[0].iov_base = (char *)req;
  476. /* 4 for rfc1002 length field and 1 for pad */
  477. iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
  478. if (phase == NtLmNegotiate) {
  479. ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE),
  480. GFP_KERNEL);
  481. if (ntlmssp_blob == NULL) {
  482. rc = -ENOMEM;
  483. goto ssetup_exit;
  484. }
  485. build_ntlmssp_negotiate_blob(ntlmssp_blob, ses);
  486. if (use_spnego) {
  487. /* blob_length = build_spnego_ntlmssp_blob(
  488. &security_blob,
  489. sizeof(struct _NEGOTIATE_MESSAGE),
  490. ntlmssp_blob); */
  491. /* BB eventually need to add this */
  492. cERROR(1, "spnego not supported for SMB2 yet");
  493. rc = -EOPNOTSUPP;
  494. kfree(ntlmssp_blob);
  495. goto ssetup_exit;
  496. } else {
  497. blob_length = sizeof(struct _NEGOTIATE_MESSAGE);
  498. /* with raw NTLMSSP we don't encapsulate in SPNEGO */
  499. security_blob = ntlmssp_blob;
  500. }
  501. } else if (phase == NtLmAuthenticate) {
  502. req->hdr.SessionId = ses->Suid;
  503. ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500,
  504. GFP_KERNEL);
  505. if (ntlmssp_blob == NULL) {
  506. cERROR(1, "failed to malloc ntlmssp blob");
  507. rc = -ENOMEM;
  508. goto ssetup_exit;
  509. }
  510. rc = build_ntlmssp_auth_blob(ntlmssp_blob, &blob_length, ses,
  511. nls_cp);
  512. if (rc) {
  513. cFYI(1, "build_ntlmssp_auth_blob failed %d", rc);
  514. goto ssetup_exit; /* BB double check error handling */
  515. }
  516. if (use_spnego) {
  517. /* blob_length = build_spnego_ntlmssp_blob(
  518. &security_blob,
  519. blob_length,
  520. ntlmssp_blob); */
  521. cERROR(1, "spnego not supported for SMB2 yet");
  522. rc = -EOPNOTSUPP;
  523. kfree(ntlmssp_blob);
  524. goto ssetup_exit;
  525. } else {
  526. security_blob = ntlmssp_blob;
  527. }
  528. } else {
  529. cERROR(1, "illegal ntlmssp phase");
  530. rc = -EIO;
  531. goto ssetup_exit;
  532. }
  533. /* Testing shows that buffer offset must be at location of Buffer[0] */
  534. req->SecurityBufferOffset =
  535. cpu_to_le16(sizeof(struct smb2_sess_setup_req) -
  536. 1 /* pad */ - 4 /* rfc1001 len */);
  537. req->SecurityBufferLength = cpu_to_le16(blob_length);
  538. iov[1].iov_base = security_blob;
  539. iov[1].iov_len = blob_length;
  540. inc_rfc1001_len(req, blob_length - 1 /* pad */);
  541. /* BB add code to build os and lm fields */
  542. rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, CIFS_LOG_ERROR);
  543. kfree(security_blob);
  544. rsp = (struct smb2_sess_setup_rsp *)iov[0].iov_base;
  545. if (resp_buftype != CIFS_NO_BUFFER &&
  546. rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) {
  547. if (phase != NtLmNegotiate) {
  548. cERROR(1, "Unexpected more processing error");
  549. goto ssetup_exit;
  550. }
  551. if (offsetof(struct smb2_sess_setup_rsp, Buffer) - 4 !=
  552. le16_to_cpu(rsp->SecurityBufferOffset)) {
  553. cERROR(1, "Invalid security buffer offset %d",
  554. le16_to_cpu(rsp->SecurityBufferOffset));
  555. rc = -EIO;
  556. goto ssetup_exit;
  557. }
  558. /* NTLMSSP Negotiate sent now processing challenge (response) */
  559. phase = NtLmChallenge; /* process ntlmssp challenge */
  560. rc = 0; /* MORE_PROCESSING is not an error here but expected */
  561. ses->Suid = rsp->hdr.SessionId;
  562. rc = decode_ntlmssp_challenge(rsp->Buffer,
  563. le16_to_cpu(rsp->SecurityBufferLength), ses);
  564. }
  565. /*
  566. * BB eventually add code for SPNEGO decoding of NtlmChallenge blob,
  567. * but at least the raw NTLMSSP case works.
  568. */
  569. /*
  570. * No tcon so can't do
  571. * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
  572. */
  573. if (rc != 0)
  574. goto ssetup_exit;
  575. ses->session_flags = le16_to_cpu(rsp->SessionFlags);
  576. ssetup_exit:
  577. free_rsp_buf(resp_buftype, rsp);
  578. /* if ntlmssp, and negotiate succeeded, proceed to authenticate phase */
  579. if ((phase == NtLmChallenge) && (rc == 0))
  580. goto ssetup_ntlmssp_authenticate;
  581. return rc;
  582. }
  583. int
  584. SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
  585. {
  586. struct smb2_logoff_req *req; /* response is also trivial struct */
  587. int rc = 0;
  588. struct TCP_Server_Info *server;
  589. cFYI(1, "disconnect session %p", ses);
  590. if (ses && (ses->server))
  591. server = ses->server;
  592. else
  593. return -EIO;
  594. rc = small_smb2_init(SMB2_LOGOFF, NULL, (void **) &req);
  595. if (rc)
  596. return rc;
  597. /* since no tcon, smb2_init can not do this, so do here */
  598. req->hdr.SessionId = ses->Suid;
  599. if (server->sec_mode & SECMODE_SIGN_REQUIRED)
  600. req->hdr.Flags |= SMB2_FLAGS_SIGNED;
  601. rc = SendReceiveNoRsp(xid, ses, (char *) &req->hdr, 0);
  602. /*
  603. * No tcon so can't do
  604. * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
  605. */
  606. return rc;
  607. }
  608. static inline void cifs_stats_fail_inc(struct cifs_tcon *tcon, uint16_t code)
  609. {
  610. cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_failed[code]);
  611. }
  612. #define MAX_SHARENAME_LENGTH (255 /* server */ + 80 /* share */ + 1 /* NULL */)
  613. int
  614. SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
  615. struct cifs_tcon *tcon, const struct nls_table *cp)
  616. {
  617. struct smb2_tree_connect_req *req;
  618. struct smb2_tree_connect_rsp *rsp = NULL;
  619. struct kvec iov[2];
  620. int rc = 0;
  621. int resp_buftype;
  622. int unc_path_len;
  623. struct TCP_Server_Info *server;
  624. __le16 *unc_path = NULL;
  625. cFYI(1, "TCON");
  626. if ((ses->server) && tree)
  627. server = ses->server;
  628. else
  629. return -EIO;
  630. if (tcon && tcon->bad_network_name)
  631. return -ENOENT;
  632. unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
  633. if (unc_path == NULL)
  634. return -ENOMEM;
  635. unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp) + 1;
  636. unc_path_len *= 2;
  637. if (unc_path_len < 2) {
  638. kfree(unc_path);
  639. return -EINVAL;
  640. }
  641. rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
  642. if (rc) {
  643. kfree(unc_path);
  644. return rc;
  645. }
  646. if (tcon == NULL) {
  647. /* since no tcon, smb2_init can not do this, so do here */
  648. req->hdr.SessionId = ses->Suid;
  649. /* if (ses->server->sec_mode & SECMODE_SIGN_REQUIRED)
  650. req->hdr.Flags |= SMB2_FLAGS_SIGNED; */
  651. }
  652. iov[0].iov_base = (char *)req;
  653. /* 4 for rfc1002 length field and 1 for pad */
  654. iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
  655. /* Testing shows that buffer offset must be at location of Buffer[0] */
  656. req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req)
  657. - 1 /* pad */ - 4 /* do not count rfc1001 len field */);
  658. req->PathLength = cpu_to_le16(unc_path_len - 2);
  659. iov[1].iov_base = unc_path;
  660. iov[1].iov_len = unc_path_len;
  661. inc_rfc1001_len(req, unc_path_len - 1 /* pad */);
  662. rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, 0);
  663. rsp = (struct smb2_tree_connect_rsp *)iov[0].iov_base;
  664. if (rc != 0) {
  665. if (tcon) {
  666. cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE);
  667. tcon->need_reconnect = true;
  668. }
  669. goto tcon_error_exit;
  670. }
  671. if (tcon == NULL) {
  672. ses->ipc_tid = rsp->hdr.TreeId;
  673. goto tcon_exit;
  674. }
  675. if (rsp->ShareType & SMB2_SHARE_TYPE_DISK)
  676. cFYI(1, "connection to disk share");
  677. else if (rsp->ShareType & SMB2_SHARE_TYPE_PIPE) {
  678. tcon->ipc = true;
  679. cFYI(1, "connection to pipe share");
  680. } else if (rsp->ShareType & SMB2_SHARE_TYPE_PRINT) {
  681. tcon->print = true;
  682. cFYI(1, "connection to printer");
  683. } else {
  684. cERROR(1, "unknown share type %d", rsp->ShareType);
  685. rc = -EOPNOTSUPP;
  686. goto tcon_error_exit;
  687. }
  688. tcon->share_flags = le32_to_cpu(rsp->ShareFlags);
  689. tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess);
  690. tcon->tidStatus = CifsGood;
  691. tcon->need_reconnect = false;
  692. tcon->tid = rsp->hdr.TreeId;
  693. strncpy(tcon->treeName, tree, MAX_TREE_SIZE);
  694. if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) &&
  695. ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
  696. cERROR(1, "DFS capability contradicts DFS flag");
  697. tcon_exit:
  698. free_rsp_buf(resp_buftype, rsp);
  699. kfree(unc_path);
  700. return rc;
  701. tcon_error_exit:
  702. if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) {
  703. cERROR(1, "BAD_NETWORK_NAME: %s", tree);
  704. tcon->bad_network_name = true;
  705. }
  706. goto tcon_exit;
  707. }
  708. int
  709. SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
  710. {
  711. struct smb2_tree_disconnect_req *req; /* response is trivial */
  712. int rc = 0;
  713. struct TCP_Server_Info *server;
  714. struct cifs_ses *ses = tcon->ses;
  715. cFYI(1, "Tree Disconnect");
  716. if (ses && (ses->server))
  717. server = ses->server;
  718. else
  719. return -EIO;
  720. if ((tcon->need_reconnect) || (tcon->ses->need_reconnect))
  721. return 0;
  722. rc = small_smb2_init(SMB2_TREE_DISCONNECT, tcon, (void **) &req);
  723. if (rc)
  724. return rc;
  725. rc = SendReceiveNoRsp(xid, ses, (char *)&req->hdr, 0);
  726. if (rc)
  727. cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
  728. return rc;
  729. }
  730. static struct create_lease *
  731. create_lease_buf(u8 *lease_key, u8 oplock)
  732. {
  733. struct create_lease *buf;
  734. buf = kmalloc(sizeof(struct create_lease), GFP_KERNEL);
  735. if (!buf)
  736. return NULL;
  737. memset(buf, 0, sizeof(struct create_lease));
  738. buf->lcontext.LeaseKeyLow = cpu_to_le64(*((u64 *)lease_key));
  739. buf->lcontext.LeaseKeyHigh = cpu_to_le64(*((u64 *)(lease_key + 8)));
  740. if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
  741. buf->lcontext.LeaseState = SMB2_LEASE_WRITE_CACHING |
  742. SMB2_LEASE_READ_CACHING;
  743. else if (oplock == SMB2_OPLOCK_LEVEL_II)
  744. buf->lcontext.LeaseState = SMB2_LEASE_READ_CACHING;
  745. else if (oplock == SMB2_OPLOCK_LEVEL_BATCH)
  746. buf->lcontext.LeaseState = SMB2_LEASE_HANDLE_CACHING |
  747. SMB2_LEASE_READ_CACHING |
  748. SMB2_LEASE_WRITE_CACHING;
  749. buf->ccontext.DataOffset = cpu_to_le16(offsetof
  750. (struct create_lease, lcontext));
  751. buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context));
  752. buf->ccontext.NameOffset = cpu_to_le16(offsetof
  753. (struct create_lease, Name));
  754. buf->ccontext.NameLength = cpu_to_le16(4);
  755. buf->Name[0] = 'R';
  756. buf->Name[1] = 'q';
  757. buf->Name[2] = 'L';
  758. buf->Name[3] = 's';
  759. return buf;
  760. }
  761. static __u8
  762. parse_lease_state(struct smb2_create_rsp *rsp)
  763. {
  764. char *data_offset;
  765. struct create_lease *lc;
  766. bool found = false;
  767. data_offset = (char *)rsp;
  768. data_offset += 4 + le32_to_cpu(rsp->CreateContextsOffset);
  769. lc = (struct create_lease *)data_offset;
  770. do {
  771. char *name = le16_to_cpu(lc->ccontext.NameOffset) + (char *)lc;
  772. if (le16_to_cpu(lc->ccontext.NameLength) != 4 ||
  773. strncmp(name, "RqLs", 4)) {
  774. lc = (struct create_lease *)((char *)lc
  775. + le32_to_cpu(lc->ccontext.Next));
  776. continue;
  777. }
  778. if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
  779. return SMB2_OPLOCK_LEVEL_NOCHANGE;
  780. found = true;
  781. break;
  782. } while (le32_to_cpu(lc->ccontext.Next) != 0);
  783. if (!found)
  784. return 0;
  785. return smb2_map_lease_to_oplock(lc->lcontext.LeaseState);
  786. }
  787. int
  788. SMB2_open(const unsigned int xid, struct cifs_tcon *tcon, __le16 *path,
  789. u64 *persistent_fid, u64 *volatile_fid, __u32 desired_access,
  790. __u32 create_disposition, __u32 file_attributes, __u32 create_options,
  791. __u8 *oplock, struct smb2_file_all_info *buf)
  792. {
  793. struct smb2_create_req *req;
  794. struct smb2_create_rsp *rsp;
  795. struct TCP_Server_Info *server;
  796. struct cifs_ses *ses = tcon->ses;
  797. struct kvec iov[3];
  798. int resp_buftype;
  799. int uni_path_len;
  800. __le16 *copy_path = NULL;
  801. int copy_size;
  802. int rc = 0;
  803. int num_iovecs = 2;
  804. cFYI(1, "create/open");
  805. if (ses && (ses->server))
  806. server = ses->server;
  807. else
  808. return -EIO;
  809. rc = small_smb2_init(SMB2_CREATE, tcon, (void **) &req);
  810. if (rc)
  811. return rc;
  812. req->ImpersonationLevel = IL_IMPERSONATION;
  813. req->DesiredAccess = cpu_to_le32(desired_access);
  814. /* File attributes ignored on open (used in create though) */
  815. req->FileAttributes = cpu_to_le32(file_attributes);
  816. req->ShareAccess = FILE_SHARE_ALL_LE;
  817. req->CreateDisposition = cpu_to_le32(create_disposition);
  818. req->CreateOptions = cpu_to_le32(create_options);
  819. uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
  820. req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req)
  821. - 8 /* pad */ - 4 /* do not count rfc1001 len field */);
  822. iov[0].iov_base = (char *)req;
  823. /* 4 for rfc1002 length field */
  824. iov[0].iov_len = get_rfc1002_length(req) + 4;
  825. /* MUST set path len (NameLength) to 0 opening root of share */
  826. if (uni_path_len >= 4) {
  827. req->NameLength = cpu_to_le16(uni_path_len - 2);
  828. /* -1 since last byte is buf[0] which is sent below (path) */
  829. iov[0].iov_len--;
  830. if (uni_path_len % 8 != 0) {
  831. copy_size = uni_path_len / 8 * 8;
  832. if (copy_size < uni_path_len)
  833. copy_size += 8;
  834. copy_path = kzalloc(copy_size, GFP_KERNEL);
  835. if (!copy_path)
  836. return -ENOMEM;
  837. memcpy((char *)copy_path, (const char *)path,
  838. uni_path_len);
  839. uni_path_len = copy_size;
  840. path = copy_path;
  841. }
  842. iov[1].iov_len = uni_path_len;
  843. iov[1].iov_base = path;
  844. /*
  845. * -1 since last byte is buf[0] which was counted in
  846. * smb2_buf_len.
  847. */
  848. inc_rfc1001_len(req, uni_path_len - 1);
  849. } else {
  850. iov[0].iov_len += 7;
  851. req->hdr.smb2_buf_length = cpu_to_be32(be32_to_cpu(
  852. req->hdr.smb2_buf_length) + 8 - 1);
  853. num_iovecs = 1;
  854. req->NameLength = 0;
  855. }
  856. if (!server->oplocks)
  857. *oplock = SMB2_OPLOCK_LEVEL_NONE;
  858. if (!(tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
  859. *oplock == SMB2_OPLOCK_LEVEL_NONE)
  860. req->RequestedOplockLevel = *oplock;
  861. else {
  862. iov[num_iovecs].iov_base = create_lease_buf(oplock+1, *oplock);
  863. if (iov[num_iovecs].iov_base == NULL) {
  864. cifs_small_buf_release(req);
  865. kfree(copy_path);
  866. return -ENOMEM;
  867. }
  868. iov[num_iovecs].iov_len = sizeof(struct create_lease);
  869. req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE;
  870. req->CreateContextsOffset = cpu_to_le32(
  871. sizeof(struct smb2_create_req) - 4 - 8 +
  872. iov[num_iovecs-1].iov_len);
  873. req->CreateContextsLength = cpu_to_le32(
  874. sizeof(struct create_lease));
  875. inc_rfc1001_len(&req->hdr, sizeof(struct create_lease));
  876. num_iovecs++;
  877. }
  878. rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
  879. rsp = (struct smb2_create_rsp *)iov[0].iov_base;
  880. if (rc != 0) {
  881. cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
  882. goto creat_exit;
  883. }
  884. *persistent_fid = rsp->PersistentFileId;
  885. *volatile_fid = rsp->VolatileFileId;
  886. if (buf) {
  887. memcpy(buf, &rsp->CreationTime, 32);
  888. buf->AllocationSize = rsp->AllocationSize;
  889. buf->EndOfFile = rsp->EndofFile;
  890. buf->Attributes = rsp->FileAttributes;
  891. buf->NumberOfLinks = cpu_to_le32(1);
  892. buf->DeletePending = 0;
  893. }
  894. if (rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE)
  895. *oplock = parse_lease_state(rsp);
  896. else
  897. *oplock = rsp->OplockLevel;
  898. creat_exit:
  899. kfree(copy_path);
  900. free_rsp_buf(resp_buftype, rsp);
  901. return rc;
  902. }
  903. int
  904. SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
  905. u64 persistent_fid, u64 volatile_fid)
  906. {
  907. struct smb2_close_req *req;
  908. struct smb2_close_rsp *rsp;
  909. struct TCP_Server_Info *server;
  910. struct cifs_ses *ses = tcon->ses;
  911. struct kvec iov[1];
  912. int resp_buftype;
  913. int rc = 0;
  914. cFYI(1, "Close");
  915. if (ses && (ses->server))
  916. server = ses->server;
  917. else
  918. return -EIO;
  919. rc = small_smb2_init(SMB2_CLOSE, tcon, (void **) &req);
  920. if (rc)
  921. return rc;
  922. req->PersistentFileId = persistent_fid;
  923. req->VolatileFileId = volatile_fid;
  924. iov[0].iov_base = (char *)req;
  925. /* 4 for rfc1002 length field */
  926. iov[0].iov_len = get_rfc1002_length(req) + 4;
  927. rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
  928. rsp = (struct smb2_close_rsp *)iov[0].iov_base;
  929. if (rc != 0) {
  930. if (tcon)
  931. cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
  932. goto close_exit;
  933. }
  934. /* BB FIXME - decode close response, update inode for caching */
  935. close_exit:
  936. free_rsp_buf(resp_buftype, rsp);
  937. return rc;
  938. }
  939. static int
  940. validate_buf(unsigned int offset, unsigned int buffer_length,
  941. struct smb2_hdr *hdr, unsigned int min_buf_size)
  942. {
  943. unsigned int smb_len = be32_to_cpu(hdr->smb2_buf_length);
  944. char *end_of_smb = smb_len + 4 /* RFC1001 length field */ + (char *)hdr;
  945. char *begin_of_buf = 4 /* RFC1001 len field */ + offset + (char *)hdr;
  946. char *end_of_buf = begin_of_buf + buffer_length;
  947. if (buffer_length < min_buf_size) {
  948. cERROR(1, "buffer length %d smaller than minimum size %d",
  949. buffer_length, min_buf_size);
  950. return -EINVAL;
  951. }
  952. /* check if beyond RFC1001 maximum length */
  953. if ((smb_len > 0x7FFFFF) || (buffer_length > 0x7FFFFF)) {
  954. cERROR(1, "buffer length %d or smb length %d too large",
  955. buffer_length, smb_len);
  956. return -EINVAL;
  957. }
  958. if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) {
  959. cERROR(1, "illegal server response, bad offset to data");
  960. return -EINVAL;
  961. }
  962. return 0;
  963. }
  964. /*
  965. * If SMB buffer fields are valid, copy into temporary buffer to hold result.
  966. * Caller must free buffer.
  967. */
  968. static int
  969. validate_and_copy_buf(unsigned int offset, unsigned int buffer_length,
  970. struct smb2_hdr *hdr, unsigned int minbufsize,
  971. char *data)
  972. {
  973. char *begin_of_buf = 4 /* RFC1001 len field */ + offset + (char *)hdr;
  974. int rc;
  975. if (!data)
  976. return -EINVAL;
  977. rc = validate_buf(offset, buffer_length, hdr, minbufsize);
  978. if (rc)
  979. return rc;
  980. memcpy(data, begin_of_buf, buffer_length);
  981. return 0;
  982. }
  983. static int
  984. query_info(const unsigned int xid, struct cifs_tcon *tcon,
  985. u64 persistent_fid, u64 volatile_fid, u8 info_class,
  986. size_t output_len, size_t min_len, void *data)
  987. {
  988. struct smb2_query_info_req *req;
  989. struct smb2_query_info_rsp *rsp = NULL;
  990. struct kvec iov[2];
  991. int rc = 0;
  992. int resp_buftype;
  993. struct TCP_Server_Info *server;
  994. struct cifs_ses *ses = tcon->ses;
  995. cFYI(1, "Query Info");
  996. if (ses && (ses->server))
  997. server = ses->server;
  998. else
  999. return -EIO;
  1000. rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req);
  1001. if (rc)
  1002. return rc;
  1003. req->InfoType = SMB2_O_INFO_FILE;
  1004. req->FileInfoClass = info_class;
  1005. req->PersistentFileId = persistent_fid;
  1006. req->VolatileFileId = volatile_fid;
  1007. /* 4 for rfc1002 length field and 1 for Buffer */
  1008. req->InputBufferOffset =
  1009. cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4);
  1010. req->OutputBufferLength = cpu_to_le32(output_len);
  1011. iov[0].iov_base = (char *)req;
  1012. /* 4 for rfc1002 length field */
  1013. iov[0].iov_len = get_rfc1002_length(req) + 4;
  1014. rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
  1015. rsp = (struct smb2_query_info_rsp *)iov[0].iov_base;
  1016. if (rc) {
  1017. cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
  1018. goto qinf_exit;
  1019. }
  1020. rc = validate_and_copy_buf(le16_to_cpu(rsp->OutputBufferOffset),
  1021. le32_to_cpu(rsp->OutputBufferLength),
  1022. &rsp->hdr, min_len, data);
  1023. qinf_exit:
  1024. free_rsp_buf(resp_buftype, rsp);
  1025. return rc;
  1026. }
  1027. int
  1028. SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
  1029. u64 persistent_fid, u64 volatile_fid,
  1030. struct smb2_file_all_info *data)
  1031. {
  1032. return query_info(xid, tcon, persistent_fid, volatile_fid,
  1033. FILE_ALL_INFORMATION,
  1034. sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
  1035. sizeof(struct smb2_file_all_info), data);
  1036. }
  1037. int
  1038. SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
  1039. u64 persistent_fid, u64 volatile_fid, __le64 *uniqueid)
  1040. {
  1041. return query_info(xid, tcon, persistent_fid, volatile_fid,
  1042. FILE_INTERNAL_INFORMATION,
  1043. sizeof(struct smb2_file_internal_info),
  1044. sizeof(struct smb2_file_internal_info), uniqueid);
  1045. }
  1046. /*
  1047. * This is a no-op for now. We're not really interested in the reply, but
  1048. * rather in the fact that the server sent one and that server->lstrp
  1049. * gets updated.
  1050. *
  1051. * FIXME: maybe we should consider checking that the reply matches request?
  1052. */
  1053. static void
  1054. smb2_echo_callback(struct mid_q_entry *mid)
  1055. {
  1056. struct TCP_Server_Info *server = mid->callback_data;
  1057. struct smb2_echo_rsp *smb2 = (struct smb2_echo_rsp *)mid->resp_buf;
  1058. unsigned int credits_received = 1;
  1059. if (mid->mid_state == MID_RESPONSE_RECEIVED)
  1060. credits_received = le16_to_cpu(smb2->hdr.CreditRequest);
  1061. DeleteMidQEntry(mid);
  1062. add_credits(server, credits_received, CIFS_ECHO_OP);
  1063. }
  1064. int
  1065. SMB2_echo(struct TCP_Server_Info *server)
  1066. {
  1067. struct smb2_echo_req *req;
  1068. int rc = 0;
  1069. struct kvec iov;
  1070. struct smb_rqst rqst = { .rq_iov = &iov,
  1071. .rq_nvec = 1 };
  1072. cFYI(1, "In echo request");
  1073. rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
  1074. if (rc)
  1075. return rc;
  1076. req->hdr.CreditRequest = cpu_to_le16(1);
  1077. iov.iov_base = (char *)req;
  1078. /* 4 for rfc1002 length field */
  1079. iov.iov_len = get_rfc1002_length(req) + 4;
  1080. rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, server,
  1081. CIFS_ECHO_OP);
  1082. if (rc)
  1083. cFYI(1, "Echo request failed: %d", rc);
  1084. cifs_small_buf_release(req);
  1085. return rc;
  1086. }
  1087. int
  1088. SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
  1089. u64 volatile_fid)
  1090. {
  1091. struct smb2_flush_req *req;
  1092. struct TCP_Server_Info *server;
  1093. struct cifs_ses *ses = tcon->ses;
  1094. struct kvec iov[1];
  1095. int resp_buftype;
  1096. int rc = 0;
  1097. cFYI(1, "Flush");
  1098. if (ses && (ses->server))
  1099. server = ses->server;
  1100. else
  1101. return -EIO;
  1102. rc = small_smb2_init(SMB2_FLUSH, tcon, (void **) &req);
  1103. if (rc)
  1104. return rc;
  1105. req->PersistentFileId = persistent_fid;
  1106. req->VolatileFileId = volatile_fid;
  1107. iov[0].iov_base = (char *)req;
  1108. /* 4 for rfc1002 length field */
  1109. iov[0].iov_len = get_rfc1002_length(req) + 4;
  1110. rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
  1111. if ((rc != 0) && tcon)
  1112. cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE);
  1113. free_rsp_buf(resp_buftype, iov[0].iov_base);
  1114. return rc;
  1115. }
  1116. /*
  1117. * To form a chain of read requests, any read requests after the first should
  1118. * have the end_of_chain boolean set to true.
  1119. */
  1120. static int
  1121. smb2_new_read_req(struct kvec *iov, struct cifs_io_parms *io_parms,
  1122. unsigned int remaining_bytes, int request_type)
  1123. {
  1124. int rc = -EACCES;
  1125. struct smb2_read_req *req = NULL;
  1126. rc = small_smb2_init(SMB2_READ, io_parms->tcon, (void **) &req);
  1127. if (rc)
  1128. return rc;
  1129. if (io_parms->tcon->ses->server == NULL)
  1130. return -ECONNABORTED;
  1131. req->hdr.ProcessId = cpu_to_le32(io_parms->pid);
  1132. req->PersistentFileId = io_parms->persistent_fid;
  1133. req->VolatileFileId = io_parms->volatile_fid;
  1134. req->ReadChannelInfoOffset = 0; /* reserved */
  1135. req->ReadChannelInfoLength = 0; /* reserved */
  1136. req->Channel = 0; /* reserved */
  1137. req->MinimumCount = 0;
  1138. req->Length = cpu_to_le32(io_parms->length);
  1139. req->Offset = cpu_to_le64(io_parms->offset);
  1140. if (request_type & CHAINED_REQUEST) {
  1141. if (!(request_type & END_OF_CHAIN)) {
  1142. /* 4 for rfc1002 length field */
  1143. req->hdr.NextCommand =
  1144. cpu_to_le32(get_rfc1002_length(req) + 4);
  1145. } else /* END_OF_CHAIN */
  1146. req->hdr.NextCommand = 0;
  1147. if (request_type & RELATED_REQUEST) {
  1148. req->hdr.Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
  1149. /*
  1150. * Related requests use info from previous read request
  1151. * in chain.
  1152. */
  1153. req->hdr.SessionId = 0xFFFFFFFF;
  1154. req->hdr.TreeId = 0xFFFFFFFF;
  1155. req->PersistentFileId = 0xFFFFFFFF;
  1156. req->VolatileFileId = 0xFFFFFFFF;
  1157. }
  1158. }
  1159. if (remaining_bytes > io_parms->length)
  1160. req->RemainingBytes = cpu_to_le32(remaining_bytes);
  1161. else
  1162. req->RemainingBytes = 0;
  1163. iov[0].iov_base = (char *)req;
  1164. /* 4 for rfc1002 length field */
  1165. iov[0].iov_len = get_rfc1002_length(req) + 4;
  1166. return rc;
  1167. }
  1168. static void
  1169. smb2_readv_callback(struct mid_q_entry *mid)
  1170. {
  1171. struct cifs_readdata *rdata = mid->callback_data;
  1172. struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
  1173. struct TCP_Server_Info *server = tcon->ses->server;
  1174. struct smb2_hdr *buf = (struct smb2_hdr *)rdata->iov.iov_base;
  1175. unsigned int credits_received = 1;
  1176. struct smb_rqst rqst = { .rq_iov = &rdata->iov,
  1177. .rq_nvec = 1,
  1178. .rq_pages = rdata->pages,
  1179. .rq_npages = rdata->nr_pages,
  1180. .rq_pagesz = rdata->pagesz,
  1181. .rq_tailsz = rdata->tailsz };
  1182. cFYI(1, "%s: mid=%llu state=%d result=%d bytes=%u", __func__,
  1183. mid->mid, mid->mid_state, rdata->result, rdata->bytes);
  1184. switch (mid->mid_state) {
  1185. case MID_RESPONSE_RECEIVED:
  1186. credits_received = le16_to_cpu(buf->CreditRequest);
  1187. /* result already set, check signature */
  1188. if (server->sec_mode &
  1189. (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
  1190. int rc;
  1191. rc = smb2_verify_signature(&rqst, server);
  1192. if (rc)
  1193. cERROR(1, "SMB signature verification returned "
  1194. "error = %d", rc);
  1195. }
  1196. /* FIXME: should this be counted toward the initiating task? */
  1197. task_io_account_read(rdata->bytes);
  1198. cifs_stats_bytes_read(tcon, rdata->bytes);
  1199. break;
  1200. case MID_REQUEST_SUBMITTED:
  1201. case MID_RETRY_NEEDED:
  1202. rdata->result = -EAGAIN;
  1203. break;
  1204. default:
  1205. if (rdata->result != -ENODATA)
  1206. rdata->result = -EIO;
  1207. }
  1208. if (rdata->result)
  1209. cifs_stats_fail_inc(tcon, SMB2_READ_HE);
  1210. queue_work(cifsiod_wq, &rdata->work);
  1211. DeleteMidQEntry(mid);
  1212. add_credits(server, credits_received, 0);
  1213. }
  1214. /* smb2_async_readv - send an async write, and set up mid to handle result */
  1215. int
  1216. smb2_async_readv(struct cifs_readdata *rdata)
  1217. {
  1218. int rc;
  1219. struct smb2_hdr *buf;
  1220. struct cifs_io_parms io_parms;
  1221. struct smb_rqst rqst = { .rq_iov = &rdata->iov,
  1222. .rq_nvec = 1 };
  1223. cFYI(1, "%s: offset=%llu bytes=%u", __func__,
  1224. rdata->offset, rdata->bytes);
  1225. io_parms.tcon = tlink_tcon(rdata->cfile->tlink);
  1226. io_parms.offset = rdata->offset;
  1227. io_parms.length = rdata->bytes;
  1228. io_parms.persistent_fid = rdata->cfile->fid.persistent_fid;
  1229. io_parms.volatile_fid = rdata->cfile->fid.volatile_fid;
  1230. io_parms.pid = rdata->pid;
  1231. rc = smb2_new_read_req(&rdata->iov, &io_parms, 0, 0);
  1232. if (rc)
  1233. return rc;
  1234. buf = (struct smb2_hdr *)rdata->iov.iov_base;
  1235. /* 4 for rfc1002 length field */
  1236. rdata->iov.iov_len = get_rfc1002_length(rdata->iov.iov_base) + 4;
  1237. kref_get(&rdata->refcount);
  1238. rc = cifs_call_async(io_parms.tcon->ses->server, &rqst,
  1239. cifs_readv_receive, smb2_readv_callback,
  1240. rdata, 0);
  1241. if (rc) {
  1242. kref_put(&rdata->refcount, cifs_readdata_release);
  1243. cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
  1244. }
  1245. cifs_small_buf_release(buf);
  1246. return rc;
  1247. }
  1248. int
  1249. SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
  1250. unsigned int *nbytes, char **buf, int *buf_type)
  1251. {
  1252. int resp_buftype, rc = -EACCES;
  1253. struct smb2_read_rsp *rsp = NULL;
  1254. struct kvec iov[1];
  1255. *nbytes = 0;
  1256. rc = smb2_new_read_req(iov, io_parms, 0, 0);
  1257. if (rc)
  1258. return rc;
  1259. rc = SendReceive2(xid, io_parms->tcon->ses, iov, 1,
  1260. &resp_buftype, CIFS_LOG_ERROR);
  1261. rsp = (struct smb2_read_rsp *)iov[0].iov_base;
  1262. if (rsp->hdr.Status == STATUS_END_OF_FILE) {
  1263. free_rsp_buf(resp_buftype, iov[0].iov_base);
  1264. return 0;
  1265. }
  1266. if (rc) {
  1267. cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
  1268. cERROR(1, "Send error in read = %d", rc);
  1269. } else {
  1270. *nbytes = le32_to_cpu(rsp->DataLength);
  1271. if ((*nbytes > CIFS_MAX_MSGSIZE) ||
  1272. (*nbytes > io_parms->length)) {
  1273. cFYI(1, "bad length %d for count %d", *nbytes,
  1274. io_parms->length);
  1275. rc = -EIO;
  1276. *nbytes = 0;
  1277. }
  1278. }
  1279. if (*buf) {
  1280. memcpy(*buf, (char *)rsp->hdr.ProtocolId + rsp->DataOffset,
  1281. *nbytes);
  1282. free_rsp_buf(resp_buftype, iov[0].iov_base);
  1283. } else if (resp_buftype != CIFS_NO_BUFFER) {
  1284. *buf = iov[0].iov_base;
  1285. if (resp_buftype == CIFS_SMALL_BUFFER)
  1286. *buf_type = CIFS_SMALL_BUFFER;
  1287. else if (resp_buftype == CIFS_LARGE_BUFFER)
  1288. *buf_type = CIFS_LARGE_BUFFER;
  1289. }
  1290. return rc;
  1291. }
  1292. /*
  1293. * Check the mid_state and signature on received buffer (if any), and queue the
  1294. * workqueue completion task.
  1295. */
  1296. static void
  1297. smb2_writev_callback(struct mid_q_entry *mid)
  1298. {
  1299. struct cifs_writedata *wdata = mid->callback_data;
  1300. struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
  1301. unsigned int written;
  1302. struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
  1303. unsigned int credits_received = 1;
  1304. switch (mid->mid_state) {
  1305. case MID_RESPONSE_RECEIVED:
  1306. credits_received = le16_to_cpu(rsp->hdr.CreditRequest);
  1307. wdata->result = smb2_check_receive(mid, tcon->ses->server, 0);
  1308. if (wdata->result != 0)
  1309. break;
  1310. written = le32_to_cpu(rsp->DataLength);
  1311. /*
  1312. * Mask off high 16 bits when bytes written as returned
  1313. * by the server is greater than bytes requested by the
  1314. * client. OS/2 servers are known to set incorrect
  1315. * CountHigh values.
  1316. */
  1317. if (written > wdata->bytes)
  1318. written &= 0xFFFF;
  1319. if (written < wdata->bytes)
  1320. wdata->result = -ENOSPC;
  1321. else
  1322. wdata->bytes = written;
  1323. break;
  1324. case MID_REQUEST_SUBMITTED:
  1325. case MID_RETRY_NEEDED:
  1326. wdata->result = -EAGAIN;
  1327. break;
  1328. default:
  1329. wdata->result = -EIO;
  1330. break;
  1331. }
  1332. if (wdata->result)
  1333. cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
  1334. queue_work(cifsiod_wq, &wdata->work);
  1335. DeleteMidQEntry(mid);
  1336. add_credits(tcon->ses->server, credits_received, 0);
  1337. }
  1338. /* smb2_async_writev - send an async write, and set up mid to handle result */
  1339. int
  1340. smb2_async_writev(struct cifs_writedata *wdata)
  1341. {
  1342. int rc = -EACCES;
  1343. struct smb2_write_req *req = NULL;
  1344. struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
  1345. struct kvec iov;
  1346. struct smb_rqst rqst;
  1347. rc = small_smb2_init(SMB2_WRITE, tcon, (void **) &req);
  1348. if (rc)
  1349. goto async_writev_out;
  1350. req->hdr.ProcessId = cpu_to_le32(wdata->cfile->pid);
  1351. req->PersistentFileId = wdata->cfile->fid.persistent_fid;
  1352. req->VolatileFileId = wdata->cfile->fid.volatile_fid;
  1353. req->WriteChannelInfoOffset = 0;
  1354. req->WriteChannelInfoLength = 0;
  1355. req->Channel = 0;
  1356. req->Offset = cpu_to_le64(wdata->offset);
  1357. /* 4 for rfc1002 length field */
  1358. req->DataOffset = cpu_to_le16(
  1359. offsetof(struct smb2_write_req, Buffer) - 4);
  1360. req->RemainingBytes = 0;
  1361. /* 4 for rfc1002 length field and 1 for Buffer */
  1362. iov.iov_len = get_rfc1002_length(req) + 4 - 1;
  1363. iov.iov_base = req;
  1364. rqst.rq_iov = &iov;
  1365. rqst.rq_nvec = 1;
  1366. rqst.rq_pages = wdata->pages;
  1367. rqst.rq_npages = wdata->nr_pages;
  1368. rqst.rq_pagesz = wdata->pagesz;
  1369. rqst.rq_tailsz = wdata->tailsz;
  1370. cFYI(1, "async write at %llu %u bytes", wdata->offset, wdata->bytes);
  1371. req->Length = cpu_to_le32(wdata->bytes);
  1372. inc_rfc1001_len(&req->hdr, wdata->bytes - 1 /* Buffer */);
  1373. kref_get(&wdata->refcount);
  1374. rc = cifs_call_async(tcon->ses->server, &rqst, NULL,
  1375. smb2_writev_callback, wdata, 0);
  1376. if (rc) {
  1377. kref_put(&wdata->refcount, cifs_writedata_release);
  1378. cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
  1379. }
  1380. async_writev_out:
  1381. cifs_small_buf_release(req);
  1382. return rc;
  1383. }
  1384. /*
  1385. * SMB2_write function gets iov pointer to kvec array with n_vec as a length.
  1386. * The length field from io_parms must be at least 1 and indicates a number of
  1387. * elements with data to write that begins with position 1 in iov array. All
  1388. * data length is specified by count.
  1389. */
  1390. int
  1391. SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
  1392. unsigned int *nbytes, struct kvec *iov, int n_vec)
  1393. {
  1394. int rc = 0;
  1395. struct smb2_write_req *req = NULL;
  1396. struct smb2_write_rsp *rsp = NULL;
  1397. int resp_buftype;
  1398. *nbytes = 0;
  1399. if (n_vec < 1)
  1400. return rc;
  1401. rc = small_smb2_init(SMB2_WRITE, io_parms->tcon, (void **) &req);
  1402. if (rc)
  1403. return rc;
  1404. if (io_parms->tcon->ses->server == NULL)
  1405. return -ECONNABORTED;
  1406. req->hdr.ProcessId = cpu_to_le32(io_parms->pid);
  1407. req->PersistentFileId = io_parms->persistent_fid;
  1408. req->VolatileFileId = io_parms->volatile_fid;
  1409. req->WriteChannelInfoOffset = 0;
  1410. req->WriteChannelInfoLength = 0;
  1411. req->Channel = 0;
  1412. req->Length = cpu_to_le32(io_parms->length);
  1413. req->Offset = cpu_to_le64(io_parms->offset);
  1414. /* 4 for rfc1002 length field */
  1415. req->DataOffset = cpu_to_le16(
  1416. offsetof(struct smb2_write_req, Buffer) - 4);
  1417. req->RemainingBytes = 0;
  1418. iov[0].iov_base = (char *)req;
  1419. /* 4 for rfc1002 length field and 1 for Buffer */
  1420. iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
  1421. /* length of entire message including data to be written */
  1422. inc_rfc1001_len(req, io_parms->length - 1 /* Buffer */);
  1423. rc = SendReceive2(xid, io_parms->tcon->ses, iov, n_vec + 1,
  1424. &resp_buftype, 0);
  1425. rsp = (struct smb2_write_rsp *)iov[0].iov_base;
  1426. if (rc) {
  1427. cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE);
  1428. cERROR(1, "Send error in write = %d", rc);
  1429. } else
  1430. *nbytes = le32_to_cpu(rsp->DataLength);
  1431. free_rsp_buf(resp_buftype, rsp);
  1432. return rc;
  1433. }
  1434. static unsigned int
  1435. num_entries(char *bufstart, char *end_of_buf, char **lastentry, size_t size)
  1436. {
  1437. int len;
  1438. unsigned int entrycount = 0;
  1439. unsigned int next_offset = 0;
  1440. FILE_DIRECTORY_INFO *entryptr;
  1441. if (bufstart == NULL)
  1442. return 0;
  1443. entryptr = (FILE_DIRECTORY_INFO *)bufstart;
  1444. while (1) {
  1445. entryptr = (FILE_DIRECTORY_INFO *)
  1446. ((char *)entryptr + next_offset);
  1447. if ((char *)entryptr + size > end_of_buf) {
  1448. cERROR(1, "malformed search entry would overflow");
  1449. break;
  1450. }
  1451. len = le32_to_cpu(entryptr->FileNameLength);
  1452. if ((char *)entryptr + len + size > end_of_buf) {
  1453. cERROR(1, "directory entry name would overflow frame "
  1454. "end of buf %p", end_of_buf);
  1455. break;
  1456. }
  1457. *lastentry = (char *)entryptr;
  1458. entrycount++;
  1459. next_offset = le32_to_cpu(entryptr->NextEntryOffset);
  1460. if (!next_offset)
  1461. break;
  1462. }
  1463. return entrycount;
  1464. }
  1465. /*
  1466. * Readdir/FindFirst
  1467. */
  1468. int
  1469. SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
  1470. u64 persistent_fid, u64 volatile_fid, int index,
  1471. struct cifs_search_info *srch_inf)
  1472. {
  1473. struct smb2_query_directory_req *req;
  1474. struct smb2_query_directory_rsp *rsp = NULL;
  1475. struct kvec iov[2];
  1476. int rc = 0;
  1477. int len;
  1478. int resp_buftype;
  1479. unsigned char *bufptr;
  1480. struct TCP_Server_Info *server;
  1481. struct cifs_ses *ses = tcon->ses;
  1482. __le16 asteriks = cpu_to_le16('*');
  1483. char *end_of_smb;
  1484. unsigned int output_size = CIFSMaxBufSize;
  1485. size_t info_buf_size;
  1486. if (ses && (ses->server))
  1487. server = ses->server;
  1488. else
  1489. return -EIO;
  1490. rc = small_smb2_init(SMB2_QUERY_DIRECTORY, tcon, (void **) &req);
  1491. if (rc)
  1492. return rc;
  1493. switch (srch_inf->info_level) {
  1494. case SMB_FIND_FILE_DIRECTORY_INFO:
  1495. req->FileInformationClass = FILE_DIRECTORY_INFORMATION;
  1496. info_buf_size = sizeof(FILE_DIRECTORY_INFO) - 1;
  1497. break;
  1498. case SMB_FIND_FILE_ID_FULL_DIR_INFO:
  1499. req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION;
  1500. info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO) - 1;
  1501. break;
  1502. default:
  1503. cERROR(1, "info level %u isn't supported",
  1504. srch_inf->info_level);
  1505. rc = -EINVAL;
  1506. goto qdir_exit;
  1507. }
  1508. req->FileIndex = cpu_to_le32(index);
  1509. req->PersistentFileId = persistent_fid;
  1510. req->VolatileFileId = volatile_fid;
  1511. len = 0x2;
  1512. bufptr = req->Buffer;
  1513. memcpy(bufptr, &asteriks, len);
  1514. req->FileNameOffset =
  1515. cpu_to_le16(sizeof(struct smb2_query_directory_req) - 1 - 4);
  1516. req->FileNameLength = cpu_to_le16(len);
  1517. /*
  1518. * BB could be 30 bytes or so longer if we used SMB2 specific
  1519. * buffer lengths, but this is safe and close enough.
  1520. */
  1521. output_size = min_t(unsigned int, output_size, server->maxBuf);
  1522. output_size = min_t(unsigned int, output_size, 2 << 15);
  1523. req->OutputBufferLength = cpu_to_le32(output_size);
  1524. iov[0].iov_base = (char *)req;
  1525. /* 4 for RFC1001 length and 1 for Buffer */
  1526. iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
  1527. iov[1].iov_base = (char *)(req->Buffer);
  1528. iov[1].iov_len = len;
  1529. inc_rfc1001_len(req, len - 1 /* Buffer */);
  1530. rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, 0);
  1531. rsp = (struct smb2_query_directory_rsp *)iov[0].iov_base;
  1532. if (rc) {
  1533. cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
  1534. goto qdir_exit;
  1535. }
  1536. rc = validate_buf(le16_to_cpu(rsp->OutputBufferOffset),
  1537. le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr,
  1538. info_buf_size);
  1539. if (rc)
  1540. goto qdir_exit;
  1541. srch_inf->unicode = true;
  1542. if (srch_inf->ntwrk_buf_start) {
  1543. if (srch_inf->smallBuf)
  1544. cifs_small_buf_release(srch_inf->ntwrk_buf_start);
  1545. else
  1546. cifs_buf_release(srch_inf->ntwrk_buf_start);
  1547. }
  1548. srch_inf->ntwrk_buf_start = (char *)rsp;
  1549. srch_inf->srch_entries_start = srch_inf->last_entry = 4 /* rfclen */ +
  1550. (char *)&rsp->hdr + le16_to_cpu(rsp->OutputBufferOffset);
  1551. /* 4 for rfc1002 length field */
  1552. end_of_smb = get_rfc1002_length(rsp) + 4 + (char *)&rsp->hdr;
  1553. srch_inf->entries_in_buffer =
  1554. num_entries(srch_inf->srch_entries_start, end_of_smb,
  1555. &srch_inf->last_entry, info_buf_size);
  1556. srch_inf->index_of_last_entry += srch_inf->entries_in_buffer;
  1557. cFYI(1, "num entries %d last_index %lld srch start %p srch end %p",
  1558. srch_inf->entries_in_buffer, srch_inf->index_of_last_entry,
  1559. srch_inf->srch_entries_start, srch_inf->last_entry);
  1560. if (resp_buftype == CIFS_LARGE_BUFFER)
  1561. srch_inf->smallBuf = false;
  1562. else if (resp_buftype == CIFS_SMALL_BUFFER)
  1563. srch_inf->smallBuf = true;
  1564. else
  1565. cERROR(1, "illegal search buffer type");
  1566. if (rsp->hdr.Status == STATUS_NO_MORE_FILES)
  1567. srch_inf->endOfSearch = 1;
  1568. else
  1569. srch_inf->endOfSearch = 0;
  1570. return rc;
  1571. qdir_exit:
  1572. free_rsp_buf(resp_buftype, rsp);
  1573. return rc;
  1574. }
  1575. static int
  1576. send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
  1577. u64 persistent_fid, u64 volatile_fid, u32 pid, int info_class,
  1578. unsigned int num, void **data, unsigned int *size)
  1579. {
  1580. struct smb2_set_info_req *req;
  1581. struct smb2_set_info_rsp *rsp = NULL;
  1582. struct kvec *iov;
  1583. int rc = 0;
  1584. int resp_buftype;
  1585. unsigned int i;
  1586. struct TCP_Server_Info *server;
  1587. struct cifs_ses *ses = tcon->ses;
  1588. if (ses && (ses->server))
  1589. server = ses->server;
  1590. else
  1591. return -EIO;
  1592. if (!num)
  1593. return -EINVAL;
  1594. iov = kmalloc(sizeof(struct kvec) * num, GFP_KERNEL);
  1595. if (!iov)
  1596. return -ENOMEM;
  1597. rc = small_smb2_init(SMB2_SET_INFO, tcon, (void **) &req);
  1598. if (rc) {
  1599. kfree(iov);
  1600. return rc;
  1601. }
  1602. req->hdr.ProcessId = cpu_to_le32(pid);
  1603. req->InfoType = SMB2_O_INFO_FILE;
  1604. req->FileInfoClass = info_class;
  1605. req->PersistentFileId = persistent_fid;
  1606. req->VolatileFileId = volatile_fid;
  1607. /* 4 for RFC1001 length and 1 for Buffer */
  1608. req->BufferOffset =
  1609. cpu_to_le16(sizeof(struct smb2_set_info_req) - 1 - 4);
  1610. req->BufferLength = cpu_to_le32(*size);
  1611. inc_rfc1001_len(req, *size - 1 /* Buffer */);
  1612. memcpy(req->Buffer, *data, *size);
  1613. iov[0].iov_base = (char *)req;
  1614. /* 4 for RFC1001 length */
  1615. iov[0].iov_len = get_rfc1002_length(req) + 4;
  1616. for (i = 1; i < num; i++) {
  1617. inc_rfc1001_len(req, size[i]);
  1618. le32_add_cpu(&req->BufferLength, size[i]);
  1619. iov[i].iov_base = (char *)data[i];
  1620. iov[i].iov_len = size[i];
  1621. }
  1622. rc = SendReceive2(xid, ses, iov, num, &resp_buftype, 0);
  1623. rsp = (struct smb2_set_info_rsp *)iov[0].iov_base;
  1624. if (rc != 0) {
  1625. cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE);
  1626. goto out;
  1627. }
  1628. out:
  1629. free_rsp_buf(resp_buftype, rsp);
  1630. kfree(iov);
  1631. return rc;
  1632. }
  1633. int
  1634. SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
  1635. u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
  1636. {
  1637. struct smb2_file_rename_info info;
  1638. void **data;
  1639. unsigned int size[2];
  1640. int rc;
  1641. int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX));
  1642. data = kmalloc(sizeof(void *) * 2, GFP_KERNEL);
  1643. if (!data)
  1644. return -ENOMEM;
  1645. info.ReplaceIfExists = 1; /* 1 = replace existing target with new */
  1646. /* 0 = fail if target already exists */
  1647. info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */
  1648. info.FileNameLength = cpu_to_le32(len);
  1649. data[0] = &info;
  1650. size[0] = sizeof(struct smb2_file_rename_info);
  1651. data[1] = target_file;
  1652. size[1] = len + 2 /* null */;
  1653. rc = send_set_info(xid, tcon, persistent_fid, volatile_fid,
  1654. current->tgid, FILE_RENAME_INFORMATION, 2, data,
  1655. size);
  1656. kfree(data);
  1657. return rc;
  1658. }
  1659. int
  1660. SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
  1661. u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
  1662. {
  1663. struct smb2_file_link_info info;
  1664. void **data;
  1665. unsigned int size[2];
  1666. int rc;
  1667. int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX));
  1668. data = kmalloc(sizeof(void *) * 2, GFP_KERNEL);
  1669. if (!data)
  1670. return -ENOMEM;
  1671. info.ReplaceIfExists = 0; /* 1 = replace existing link with new */
  1672. /* 0 = fail if link already exists */
  1673. info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */
  1674. info.FileNameLength = cpu_to_le32(len);
  1675. data[0] = &info;
  1676. size[0] = sizeof(struct smb2_file_link_info);
  1677. data[1] = target_file;
  1678. size[1] = len + 2 /* null */;
  1679. rc = send_set_info(xid, tcon, persistent_fid, volatile_fid,
  1680. current->tgid, FILE_LINK_INFORMATION, 2, data, size);
  1681. kfree(data);
  1682. return rc;
  1683. }
  1684. int
  1685. SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
  1686. u64 volatile_fid, u32 pid, __le64 *eof)
  1687. {
  1688. struct smb2_file_eof_info info;
  1689. void *data;
  1690. unsigned int size;
  1691. info.EndOfFile = *eof;
  1692. data = &info;
  1693. size = sizeof(struct smb2_file_eof_info);
  1694. return send_set_info(xid, tcon, persistent_fid, volatile_fid, pid,
  1695. FILE_END_OF_FILE_INFORMATION, 1, &data, &size);
  1696. }
  1697. int
  1698. SMB2_set_info(const unsigned int xid, struct cifs_tcon *tcon,
  1699. u64 persistent_fid, u64 volatile_fid, FILE_BASIC_INFO *buf)
  1700. {
  1701. unsigned int size;
  1702. size = sizeof(FILE_BASIC_INFO);
  1703. return send_set_info(xid, tcon, persistent_fid, volatile_fid,
  1704. current->tgid, FILE_BASIC_INFORMATION, 1,
  1705. (void **)&buf, &size);
  1706. }
  1707. int
  1708. SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
  1709. const u64 persistent_fid, const u64 volatile_fid,
  1710. __u8 oplock_level)
  1711. {
  1712. int rc;
  1713. struct smb2_oplock_break *req = NULL;
  1714. cFYI(1, "SMB2_oplock_break");
  1715. rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req);
  1716. if (rc)
  1717. return rc;
  1718. req->VolatileFid = volatile_fid;
  1719. req->PersistentFid = persistent_fid;
  1720. req->OplockLevel = oplock_level;
  1721. req->hdr.CreditRequest = cpu_to_le16(1);
  1722. rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, CIFS_OBREAK_OP);
  1723. /* SMB2 buffer freed by function above */
  1724. if (rc) {
  1725. cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
  1726. cFYI(1, "Send error in Oplock Break = %d", rc);
  1727. }
  1728. return rc;
  1729. }
  1730. static void
  1731. copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
  1732. struct kstatfs *kst)
  1733. {
  1734. kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
  1735. le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
  1736. kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
  1737. kst->f_bfree = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits);
  1738. kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
  1739. return;
  1740. }
  1741. static int
  1742. build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, int level,
  1743. int outbuf_len, u64 persistent_fid, u64 volatile_fid)
  1744. {
  1745. int rc;
  1746. struct smb2_query_info_req *req;
  1747. cFYI(1, "Query FSInfo level %d", level);
  1748. if ((tcon->ses == NULL) || (tcon->ses->server == NULL))
  1749. return -EIO;
  1750. rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req);
  1751. if (rc)
  1752. return rc;
  1753. req->InfoType = SMB2_O_INFO_FILESYSTEM;
  1754. req->FileInfoClass = level;
  1755. req->PersistentFileId = persistent_fid;
  1756. req->VolatileFileId = volatile_fid;
  1757. /* 4 for rfc1002 length field and 1 for pad */
  1758. req->InputBufferOffset =
  1759. cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4);
  1760. req->OutputBufferLength = cpu_to_le32(
  1761. outbuf_len + sizeof(struct smb2_query_info_rsp) - 1 - 4);
  1762. iov->iov_base = (char *)req;
  1763. /* 4 for rfc1002 length field */
  1764. iov->iov_len = get_rfc1002_length(req) + 4;
  1765. return 0;
  1766. }
  1767. int
  1768. SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
  1769. u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
  1770. {
  1771. struct smb2_query_info_rsp *rsp = NULL;
  1772. struct kvec iov;
  1773. int rc = 0;
  1774. int resp_buftype;
  1775. struct cifs_ses *ses = tcon->ses;
  1776. struct smb2_fs_full_size_info *info = NULL;
  1777. rc = build_qfs_info_req(&iov, tcon, FS_FULL_SIZE_INFORMATION,
  1778. sizeof(struct smb2_fs_full_size_info),
  1779. persistent_fid, volatile_fid);
  1780. if (rc)
  1781. return rc;
  1782. rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, 0);
  1783. if (rc) {
  1784. cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
  1785. goto qinf_exit;
  1786. }
  1787. rsp = (struct smb2_query_info_rsp *)iov.iov_base;
  1788. info = (struct smb2_fs_full_size_info *)(4 /* RFC1001 len */ +
  1789. le16_to_cpu(rsp->OutputBufferOffset) + (char *)&rsp->hdr);
  1790. rc = validate_buf(le16_to_cpu(rsp->OutputBufferOffset),
  1791. le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr,
  1792. sizeof(struct smb2_fs_full_size_info));
  1793. if (!rc)
  1794. copy_fs_info_to_kstatfs(info, fsdata);
  1795. qinf_exit:
  1796. free_rsp_buf(resp_buftype, iov.iov_base);
  1797. return rc;
  1798. }
  1799. int
  1800. smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
  1801. const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
  1802. const __u32 num_lock, struct smb2_lock_element *buf)
  1803. {
  1804. int rc = 0;
  1805. struct smb2_lock_req *req = NULL;
  1806. struct kvec iov[2];
  1807. int resp_buf_type;
  1808. unsigned int count;
  1809. cFYI(1, "smb2_lockv num lock %d", num_lock);
  1810. rc = small_smb2_init(SMB2_LOCK, tcon, (void **) &req);
  1811. if (rc)
  1812. return rc;
  1813. req->hdr.ProcessId = cpu_to_le32(pid);
  1814. req->LockCount = cpu_to_le16(num_lock);
  1815. req->PersistentFileId = persist_fid;
  1816. req->VolatileFileId = volatile_fid;
  1817. count = num_lock * sizeof(struct smb2_lock_element);
  1818. inc_rfc1001_len(req, count - sizeof(struct smb2_lock_element));
  1819. iov[0].iov_base = (char *)req;
  1820. /* 4 for rfc1002 length field and count for all locks */
  1821. iov[0].iov_len = get_rfc1002_length(req) + 4 - count;
  1822. iov[1].iov_base = (char *)buf;
  1823. iov[1].iov_len = count;
  1824. cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
  1825. rc = SendReceive2(xid, tcon->ses, iov, 2, &resp_buf_type, CIFS_NO_RESP);
  1826. if (rc) {
  1827. cFYI(1, "Send error in smb2_lockv = %d", rc);
  1828. cifs_stats_fail_inc(tcon, SMB2_LOCK_HE);
  1829. }
  1830. return rc;
  1831. }
  1832. int
  1833. SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon,
  1834. const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
  1835. const __u64 length, const __u64 offset, const __u32 lock_flags,
  1836. const bool wait)
  1837. {
  1838. struct smb2_lock_element lock;
  1839. lock.Offset = cpu_to_le64(offset);
  1840. lock.Length = cpu_to_le64(length);
  1841. lock.Flags = cpu_to_le32(lock_flags);
  1842. if (!wait && lock_flags != SMB2_LOCKFLAG_UNLOCK)
  1843. lock.Flags |= cpu_to_le32(SMB2_LOCKFLAG_FAIL_IMMEDIATELY);
  1844. return smb2_lockv(xid, tcon, persist_fid, volatile_fid, pid, 1, &lock);
  1845. }
  1846. int
  1847. SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
  1848. __u8 *lease_key, const __le32 lease_state)
  1849. {
  1850. int rc;
  1851. struct smb2_lease_ack *req = NULL;
  1852. cFYI(1, "SMB2_lease_break");
  1853. rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req);
  1854. if (rc)
  1855. return rc;
  1856. req->hdr.CreditRequest = cpu_to_le16(1);
  1857. req->StructureSize = cpu_to_le16(36);
  1858. inc_rfc1001_len(req, 12);
  1859. memcpy(req->LeaseKey, lease_key, 16);
  1860. req->LeaseState = lease_state;
  1861. rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, CIFS_OBREAK_OP);
  1862. /* SMB2 buffer freed by function above */
  1863. if (rc) {
  1864. cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
  1865. cFYI(1, "Send error in Lease Break = %d", rc);
  1866. }
  1867. return rc;
  1868. }