misc.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655
  1. /*
  2. * fs/cifs/misc.c
  3. *
  4. * Copyright (C) International Business Machines Corp., 2002,2008
  5. * Author(s): Steve French (sfrench@us.ibm.com)
  6. *
  7. * This library is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published
  9. * by the Free Software Foundation; either version 2.1 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This library is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
  15. * the GNU Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public License
  18. * along with this library; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20. */
  21. #include <linux/slab.h>
  22. #include <linux/ctype.h>
  23. #include <linux/mempool.h>
  24. #include "cifspdu.h"
  25. #include "cifsglob.h"
  26. #include "cifsproto.h"
  27. #include "cifs_debug.h"
  28. #include "smberr.h"
  29. #include "nterr.h"
  30. #include "cifs_unicode.h"
  31. extern mempool_t *cifs_sm_req_poolp;
  32. extern mempool_t *cifs_req_poolp;
  33. /* The xid serves as a useful identifier for each incoming vfs request,
  34. in a similar way to the mid which is useful to track each sent smb,
  35. and CurrentXid can also provide a running counter (although it
  36. will eventually wrap past zero) of the total vfs operations handled
  37. since the cifs fs was mounted */
  38. unsigned int
  39. _GetXid(void)
  40. {
  41. unsigned int xid;
  42. spin_lock(&GlobalMid_Lock);
  43. GlobalTotalActiveXid++;
  44. /* keep high water mark for number of simultaneous ops in filesystem */
  45. if (GlobalTotalActiveXid > GlobalMaxActiveXid)
  46. GlobalMaxActiveXid = GlobalTotalActiveXid;
  47. if (GlobalTotalActiveXid > 65000)
  48. cFYI(1, "warning: more than 65000 requests active");
  49. xid = GlobalCurrentXid++;
  50. spin_unlock(&GlobalMid_Lock);
  51. return xid;
  52. }
  53. void
  54. _FreeXid(unsigned int xid)
  55. {
  56. spin_lock(&GlobalMid_Lock);
  57. /* if (GlobalTotalActiveXid == 0)
  58. BUG(); */
  59. GlobalTotalActiveXid--;
  60. spin_unlock(&GlobalMid_Lock);
  61. }
  62. struct cifs_ses *
  63. sesInfoAlloc(void)
  64. {
  65. struct cifs_ses *ret_buf;
  66. ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
  67. if (ret_buf) {
  68. atomic_inc(&sesInfoAllocCount);
  69. ret_buf->status = CifsNew;
  70. ++ret_buf->ses_count;
  71. INIT_LIST_HEAD(&ret_buf->smb_ses_list);
  72. INIT_LIST_HEAD(&ret_buf->tcon_list);
  73. mutex_init(&ret_buf->session_mutex);
  74. }
  75. return ret_buf;
  76. }
  77. void
  78. sesInfoFree(struct cifs_ses *buf_to_free)
  79. {
  80. if (buf_to_free == NULL) {
  81. cFYI(1, "Null buffer passed to sesInfoFree");
  82. return;
  83. }
  84. atomic_dec(&sesInfoAllocCount);
  85. kfree(buf_to_free->serverOS);
  86. kfree(buf_to_free->serverDomain);
  87. kfree(buf_to_free->serverNOS);
  88. if (buf_to_free->password) {
  89. memset(buf_to_free->password, 0, strlen(buf_to_free->password));
  90. kfree(buf_to_free->password);
  91. }
  92. kfree(buf_to_free->user_name);
  93. kfree(buf_to_free->domainName);
  94. kfree(buf_to_free);
  95. }
  96. struct cifs_tcon *
  97. tconInfoAlloc(void)
  98. {
  99. struct cifs_tcon *ret_buf;
  100. ret_buf = kzalloc(sizeof(struct cifs_tcon), GFP_KERNEL);
  101. if (ret_buf) {
  102. atomic_inc(&tconInfoAllocCount);
  103. ret_buf->tidStatus = CifsNew;
  104. ++ret_buf->tc_count;
  105. INIT_LIST_HEAD(&ret_buf->openFileList);
  106. INIT_LIST_HEAD(&ret_buf->tcon_list);
  107. #ifdef CONFIG_CIFS_STATS
  108. spin_lock_init(&ret_buf->stat_lock);
  109. #endif
  110. }
  111. return ret_buf;
  112. }
  113. void
  114. tconInfoFree(struct cifs_tcon *buf_to_free)
  115. {
  116. if (buf_to_free == NULL) {
  117. cFYI(1, "Null buffer passed to tconInfoFree");
  118. return;
  119. }
  120. atomic_dec(&tconInfoAllocCount);
  121. kfree(buf_to_free->nativeFileSystem);
  122. if (buf_to_free->password) {
  123. memset(buf_to_free->password, 0, strlen(buf_to_free->password));
  124. kfree(buf_to_free->password);
  125. }
  126. kfree(buf_to_free);
  127. }
  128. struct smb_hdr *
  129. cifs_buf_get(void)
  130. {
  131. struct smb_hdr *ret_buf = NULL;
  132. /* We could use negotiated size instead of max_msgsize -
  133. but it may be more efficient to always alloc same size
  134. albeit slightly larger than necessary and maxbuffersize
  135. defaults to this and can not be bigger */
  136. ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
  137. /* clear the first few header bytes */
  138. /* for most paths, more is cleared in header_assemble */
  139. if (ret_buf) {
  140. memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
  141. atomic_inc(&bufAllocCount);
  142. #ifdef CONFIG_CIFS_STATS2
  143. atomic_inc(&totBufAllocCount);
  144. #endif /* CONFIG_CIFS_STATS2 */
  145. }
  146. return ret_buf;
  147. }
  148. void
  149. cifs_buf_release(void *buf_to_free)
  150. {
  151. if (buf_to_free == NULL) {
  152. /* cFYI(1, "Null buffer passed to cifs_buf_release");*/
  153. return;
  154. }
  155. mempool_free(buf_to_free, cifs_req_poolp);
  156. atomic_dec(&bufAllocCount);
  157. return;
  158. }
  159. struct smb_hdr *
  160. cifs_small_buf_get(void)
  161. {
  162. struct smb_hdr *ret_buf = NULL;
  163. /* We could use negotiated size instead of max_msgsize -
  164. but it may be more efficient to always alloc same size
  165. albeit slightly larger than necessary and maxbuffersize
  166. defaults to this and can not be bigger */
  167. ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
  168. if (ret_buf) {
  169. /* No need to clear memory here, cleared in header assemble */
  170. /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
  171. atomic_inc(&smBufAllocCount);
  172. #ifdef CONFIG_CIFS_STATS2
  173. atomic_inc(&totSmBufAllocCount);
  174. #endif /* CONFIG_CIFS_STATS2 */
  175. }
  176. return ret_buf;
  177. }
  178. void
  179. cifs_small_buf_release(void *buf_to_free)
  180. {
  181. if (buf_to_free == NULL) {
  182. cFYI(1, "Null buffer passed to cifs_small_buf_release");
  183. return;
  184. }
  185. mempool_free(buf_to_free, cifs_sm_req_poolp);
  186. atomic_dec(&smBufAllocCount);
  187. return;
  188. }
  189. /*
  190. * Find a free multiplex id (SMB mid). Otherwise there could be
  191. * mid collisions which might cause problems, demultiplexing the
  192. * wrong response to this request. Multiplex ids could collide if
  193. * one of a series requests takes much longer than the others, or
  194. * if a very large number of long lived requests (byte range
  195. * locks or FindNotify requests) are pending. No more than
  196. * 64K-1 requests can be outstanding at one time. If no
  197. * mids are available, return zero. A future optimization
  198. * could make the combination of mids and uid the key we use
  199. * to demultiplex on (rather than mid alone).
  200. * In addition to the above check, the cifs demultiplex
  201. * code already used the command code as a secondary
  202. * check of the frame and if signing is negotiated the
  203. * response would be discarded if the mid were the same
  204. * but the signature was wrong. Since the mid is not put in the
  205. * pending queue until later (when it is about to be dispatched)
  206. * we do have to limit the number of outstanding requests
  207. * to somewhat less than 64K-1 although it is hard to imagine
  208. * so many threads being in the vfs at one time.
  209. */
  210. __u64 GetNextMid(struct TCP_Server_Info *server)
  211. {
  212. __u64 mid = 0;
  213. __u16 last_mid, cur_mid;
  214. bool collision;
  215. spin_lock(&GlobalMid_Lock);
  216. /* mid is 16 bit only for CIFS/SMB */
  217. cur_mid = (__u16)((server->CurrentMid) & 0xffff);
  218. /* we do not want to loop forever */
  219. last_mid = cur_mid;
  220. cur_mid++;
  221. /*
  222. * This nested loop looks more expensive than it is.
  223. * In practice the list of pending requests is short,
  224. * fewer than 50, and the mids are likely to be unique
  225. * on the first pass through the loop unless some request
  226. * takes longer than the 64 thousand requests before it
  227. * (and it would also have to have been a request that
  228. * did not time out).
  229. */
  230. while (cur_mid != last_mid) {
  231. struct mid_q_entry *mid_entry;
  232. unsigned int num_mids;
  233. collision = false;
  234. if (cur_mid == 0)
  235. cur_mid++;
  236. num_mids = 0;
  237. list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
  238. ++num_mids;
  239. if (mid_entry->mid == cur_mid &&
  240. mid_entry->mid_state == MID_REQUEST_SUBMITTED) {
  241. /* This mid is in use, try a different one */
  242. collision = true;
  243. break;
  244. }
  245. }
  246. /*
  247. * if we have more than 32k mids in the list, then something
  248. * is very wrong. Possibly a local user is trying to DoS the
  249. * box by issuing long-running calls and SIGKILL'ing them. If
  250. * we get to 2^16 mids then we're in big trouble as this
  251. * function could loop forever.
  252. *
  253. * Go ahead and assign out the mid in this situation, but force
  254. * an eventual reconnect to clean out the pending_mid_q.
  255. */
  256. if (num_mids > 32768)
  257. server->tcpStatus = CifsNeedReconnect;
  258. if (!collision) {
  259. mid = (__u64)cur_mid;
  260. server->CurrentMid = mid;
  261. break;
  262. }
  263. cur_mid++;
  264. }
  265. spin_unlock(&GlobalMid_Lock);
  266. return mid;
  267. }
  268. /* NB: MID can not be set if treeCon not passed in, in that
  269. case it is responsbility of caller to set the mid */
  270. void
  271. header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
  272. const struct cifs_tcon *treeCon, int word_count
  273. /* length of fixed section (word count) in two byte units */)
  274. {
  275. char *temp = (char *) buffer;
  276. memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
  277. buffer->smb_buf_length = cpu_to_be32(
  278. (2 * word_count) + sizeof(struct smb_hdr) -
  279. 4 /* RFC 1001 length field does not count */ +
  280. 2 /* for bcc field itself */) ;
  281. buffer->Protocol[0] = 0xFF;
  282. buffer->Protocol[1] = 'S';
  283. buffer->Protocol[2] = 'M';
  284. buffer->Protocol[3] = 'B';
  285. buffer->Command = smb_command;
  286. buffer->Flags = 0x00; /* case sensitive */
  287. buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES;
  288. buffer->Pid = cpu_to_le16((__u16)current->tgid);
  289. buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16));
  290. if (treeCon) {
  291. buffer->Tid = treeCon->tid;
  292. if (treeCon->ses) {
  293. if (treeCon->ses->capabilities & CAP_UNICODE)
  294. buffer->Flags2 |= SMBFLG2_UNICODE;
  295. if (treeCon->ses->capabilities & CAP_STATUS32)
  296. buffer->Flags2 |= SMBFLG2_ERR_STATUS;
  297. /* Uid is not converted */
  298. buffer->Uid = treeCon->ses->Suid;
  299. buffer->Mid = GetNextMid(treeCon->ses->server);
  300. }
  301. if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
  302. buffer->Flags2 |= SMBFLG2_DFS;
  303. if (treeCon->nocase)
  304. buffer->Flags |= SMBFLG_CASELESS;
  305. if ((treeCon->ses) && (treeCon->ses->server))
  306. if (treeCon->ses->server->sec_mode &
  307. (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
  308. buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
  309. }
  310. /* endian conversion of flags is now done just before sending */
  311. buffer->WordCount = (char) word_count;
  312. return;
  313. }
  314. static int
  315. check_smb_hdr(struct smb_hdr *smb, __u16 mid)
  316. {
  317. /* does it have the right SMB "signature" ? */
  318. if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
  319. cERROR(1, "Bad protocol string signature header 0x%x",
  320. *(unsigned int *)smb->Protocol);
  321. return 1;
  322. }
  323. /* Make sure that message ids match */
  324. if (mid != smb->Mid) {
  325. cERROR(1, "Mids do not match. received=%u expected=%u",
  326. smb->Mid, mid);
  327. return 1;
  328. }
  329. /* if it's a response then accept */
  330. if (smb->Flags & SMBFLG_RESPONSE)
  331. return 0;
  332. /* only one valid case where server sends us request */
  333. if (smb->Command == SMB_COM_LOCKING_ANDX)
  334. return 0;
  335. cERROR(1, "Server sent request, not response. mid=%u", smb->Mid);
  336. return 1;
  337. }
  338. int
  339. checkSMB(char *buf, unsigned int total_read)
  340. {
  341. struct smb_hdr *smb = (struct smb_hdr *)buf;
  342. __u16 mid = smb->Mid;
  343. __u32 rfclen = be32_to_cpu(smb->smb_buf_length);
  344. __u32 clc_len; /* calculated length */
  345. cFYI(0, "checkSMB Length: 0x%x, smb_buf_length: 0x%x",
  346. total_read, rfclen);
  347. /* is this frame too small to even get to a BCC? */
  348. if (total_read < 2 + sizeof(struct smb_hdr)) {
  349. if ((total_read >= sizeof(struct smb_hdr) - 1)
  350. && (smb->Status.CifsError != 0)) {
  351. /* it's an error return */
  352. smb->WordCount = 0;
  353. /* some error cases do not return wct and bcc */
  354. return 0;
  355. } else if ((total_read == sizeof(struct smb_hdr) + 1) &&
  356. (smb->WordCount == 0)) {
  357. char *tmp = (char *)smb;
  358. /* Need to work around a bug in two servers here */
  359. /* First, check if the part of bcc they sent was zero */
  360. if (tmp[sizeof(struct smb_hdr)] == 0) {
  361. /* some servers return only half of bcc
  362. * on simple responses (wct, bcc both zero)
  363. * in particular have seen this on
  364. * ulogoffX and FindClose. This leaves
  365. * one byte of bcc potentially unitialized
  366. */
  367. /* zero rest of bcc */
  368. tmp[sizeof(struct smb_hdr)+1] = 0;
  369. return 0;
  370. }
  371. cERROR(1, "rcvd invalid byte count (bcc)");
  372. } else {
  373. cERROR(1, "Length less than smb header size");
  374. }
  375. return -EIO;
  376. }
  377. /* otherwise, there is enough to get to the BCC */
  378. if (check_smb_hdr(smb, mid))
  379. return -EIO;
  380. clc_len = smbCalcSize(smb);
  381. if (4 + rfclen != total_read) {
  382. cERROR(1, "Length read does not match RFC1001 length %d",
  383. rfclen);
  384. return -EIO;
  385. }
  386. if (4 + rfclen != clc_len) {
  387. /* check if bcc wrapped around for large read responses */
  388. if ((rfclen > 64 * 1024) && (rfclen > clc_len)) {
  389. /* check if lengths match mod 64K */
  390. if (((4 + rfclen) & 0xFFFF) == (clc_len & 0xFFFF))
  391. return 0; /* bcc wrapped */
  392. }
  393. cFYI(1, "Calculated size %u vs length %u mismatch for mid=%u",
  394. clc_len, 4 + rfclen, smb->Mid);
  395. if (4 + rfclen < clc_len) {
  396. cERROR(1, "RFC1001 size %u smaller than SMB for mid=%u",
  397. rfclen, smb->Mid);
  398. return -EIO;
  399. } else if (rfclen > clc_len + 512) {
  400. /*
  401. * Some servers (Windows XP in particular) send more
  402. * data than the lengths in the SMB packet would
  403. * indicate on certain calls (byte range locks and
  404. * trans2 find first calls in particular). While the
  405. * client can handle such a frame by ignoring the
  406. * trailing data, we choose limit the amount of extra
  407. * data to 512 bytes.
  408. */
  409. cERROR(1, "RFC1001 size %u more than 512 bytes larger "
  410. "than SMB for mid=%u", rfclen, smb->Mid);
  411. return -EIO;
  412. }
  413. }
  414. return 0;
  415. }
  416. bool
  417. is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
  418. {
  419. struct smb_hdr *buf = (struct smb_hdr *)buffer;
  420. struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
  421. struct list_head *tmp, *tmp1, *tmp2;
  422. struct cifs_ses *ses;
  423. struct cifs_tcon *tcon;
  424. struct cifsInodeInfo *pCifsInode;
  425. struct cifsFileInfo *netfile;
  426. cFYI(1, "Checking for oplock break or dnotify response");
  427. if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
  428. (pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
  429. struct smb_com_transaction_change_notify_rsp *pSMBr =
  430. (struct smb_com_transaction_change_notify_rsp *)buf;
  431. struct file_notify_information *pnotify;
  432. __u32 data_offset = 0;
  433. if (get_bcc(buf) > sizeof(struct file_notify_information)) {
  434. data_offset = le32_to_cpu(pSMBr->DataOffset);
  435. pnotify = (struct file_notify_information *)
  436. ((char *)&pSMBr->hdr.Protocol + data_offset);
  437. cFYI(1, "dnotify on %s Action: 0x%x",
  438. pnotify->FileName, pnotify->Action);
  439. /* cifs_dump_mem("Rcvd notify Data: ",buf,
  440. sizeof(struct smb_hdr)+60); */
  441. return true;
  442. }
  443. if (pSMBr->hdr.Status.CifsError) {
  444. cFYI(1, "notify err 0x%d",
  445. pSMBr->hdr.Status.CifsError);
  446. return true;
  447. }
  448. return false;
  449. }
  450. if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
  451. return false;
  452. if (pSMB->hdr.Flags & SMBFLG_RESPONSE) {
  453. /* no sense logging error on invalid handle on oplock
  454. break - harmless race between close request and oplock
  455. break response is expected from time to time writing out
  456. large dirty files cached on the client */
  457. if ((NT_STATUS_INVALID_HANDLE) ==
  458. le32_to_cpu(pSMB->hdr.Status.CifsError)) {
  459. cFYI(1, "invalid handle on oplock break");
  460. return true;
  461. } else if (ERRbadfid ==
  462. le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
  463. return true;
  464. } else {
  465. return false; /* on valid oplock brk we get "request" */
  466. }
  467. }
  468. if (pSMB->hdr.WordCount != 8)
  469. return false;
  470. cFYI(1, "oplock type 0x%d level 0x%d",
  471. pSMB->LockType, pSMB->OplockLevel);
  472. if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
  473. return false;
  474. /* look up tcon based on tid & uid */
  475. spin_lock(&cifs_tcp_ses_lock);
  476. list_for_each(tmp, &srv->smb_ses_list) {
  477. ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
  478. list_for_each(tmp1, &ses->tcon_list) {
  479. tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
  480. if (tcon->tid != buf->Tid)
  481. continue;
  482. cifs_stats_inc(&tcon->num_oplock_brks);
  483. spin_lock(&cifs_file_list_lock);
  484. list_for_each(tmp2, &tcon->openFileList) {
  485. netfile = list_entry(tmp2, struct cifsFileInfo,
  486. tlist);
  487. if (pSMB->Fid != netfile->netfid)
  488. continue;
  489. cFYI(1, "file id match, oplock break");
  490. pCifsInode = CIFS_I(netfile->dentry->d_inode);
  491. cifs_set_oplock_level(pCifsInode,
  492. pSMB->OplockLevel ? OPLOCK_READ : 0);
  493. queue_work(cifsiod_wq,
  494. &netfile->oplock_break);
  495. netfile->oplock_break_cancelled = false;
  496. spin_unlock(&cifs_file_list_lock);
  497. spin_unlock(&cifs_tcp_ses_lock);
  498. return true;
  499. }
  500. spin_unlock(&cifs_file_list_lock);
  501. spin_unlock(&cifs_tcp_ses_lock);
  502. cFYI(1, "No matching file for oplock break");
  503. return true;
  504. }
  505. }
  506. spin_unlock(&cifs_tcp_ses_lock);
  507. cFYI(1, "Can not process oplock break for non-existent connection");
  508. return true;
  509. }
  510. void
  511. dump_smb(void *buf, int smb_buf_length)
  512. {
  513. int i, j;
  514. char debug_line[17];
  515. unsigned char *buffer = buf;
  516. if (traceSMB == 0)
  517. return;
  518. for (i = 0, j = 0; i < smb_buf_length; i++, j++) {
  519. if (i % 8 == 0) {
  520. /* have reached the beginning of line */
  521. printk(KERN_DEBUG "| ");
  522. j = 0;
  523. }
  524. printk("%0#4x ", buffer[i]);
  525. debug_line[2 * j] = ' ';
  526. if (isprint(buffer[i]))
  527. debug_line[1 + (2 * j)] = buffer[i];
  528. else
  529. debug_line[1 + (2 * j)] = '_';
  530. if (i % 8 == 7) {
  531. /* reached end of line, time to print ascii */
  532. debug_line[16] = 0;
  533. printk(" | %s\n", debug_line);
  534. }
  535. }
  536. for (; j < 8; j++) {
  537. printk(" ");
  538. debug_line[2 * j] = ' ';
  539. debug_line[1 + (2 * j)] = ' ';
  540. }
  541. printk(" | %s\n", debug_line);
  542. return;
  543. }
  544. void
  545. cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
  546. {
  547. if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
  548. cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
  549. cERROR(1, "Autodisabling the use of server inode numbers on "
  550. "%s. This server doesn't seem to support them "
  551. "properly. Hardlinks will not be recognized on this "
  552. "mount. Consider mounting with the \"noserverino\" "
  553. "option to silence this message.",
  554. cifs_sb_master_tcon(cifs_sb)->treeName);
  555. }
  556. }
  557. void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
  558. {
  559. oplock &= 0xF;
  560. if (oplock == OPLOCK_EXCLUSIVE) {
  561. cinode->clientCanCacheAll = true;
  562. cinode->clientCanCacheRead = true;
  563. cFYI(1, "Exclusive Oplock granted on inode %p",
  564. &cinode->vfs_inode);
  565. } else if (oplock == OPLOCK_READ) {
  566. cinode->clientCanCacheAll = false;
  567. cinode->clientCanCacheRead = true;
  568. cFYI(1, "Level II Oplock granted on inode %p",
  569. &cinode->vfs_inode);
  570. } else {
  571. cinode->clientCanCacheAll = false;
  572. cinode->clientCanCacheRead = false;
  573. }
  574. }
  575. bool
  576. backup_cred(struct cifs_sb_info *cifs_sb)
  577. {
  578. if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) {
  579. if (cifs_sb->mnt_backupuid == current_fsuid())
  580. return true;
  581. }
  582. if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) {
  583. if (in_group_p(cifs_sb->mnt_backupgid))
  584. return true;
  585. }
  586. return false;
  587. }