misc.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722
  1. /*
  2. * fs/cifs/misc.c
  3. *
  4. * Copyright (C) International Business Machines Corp., 2002,2008
  5. * Author(s): Steve French (sfrench@us.ibm.com)
  6. *
  7. * This library is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published
  9. * by the Free Software Foundation; either version 2.1 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This library is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
  15. * the GNU Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public License
  18. * along with this library; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20. */
  21. #include <linux/slab.h>
  22. #include <linux/ctype.h>
  23. #include <linux/mempool.h>
  24. #include "cifspdu.h"
  25. #include "cifsglob.h"
  26. #include "cifsproto.h"
  27. #include "cifs_debug.h"
  28. #include "smberr.h"
  29. #include "nterr.h"
  30. #include "cifs_unicode.h"
  31. extern mempool_t *cifs_sm_req_poolp;
  32. extern mempool_t *cifs_req_poolp;
  33. /* The xid serves as a useful identifier for each incoming vfs request,
  34. in a similar way to the mid which is useful to track each sent smb,
  35. and CurrentXid can also provide a running counter (although it
  36. will eventually wrap past zero) of the total vfs operations handled
  37. since the cifs fs was mounted */
  38. unsigned int
  39. _GetXid(void)
  40. {
  41. unsigned int xid;
  42. spin_lock(&GlobalMid_Lock);
  43. GlobalTotalActiveXid++;
  44. /* keep high water mark for number of simultaneous ops in filesystem */
  45. if (GlobalTotalActiveXid > GlobalMaxActiveXid)
  46. GlobalMaxActiveXid = GlobalTotalActiveXid;
  47. if (GlobalTotalActiveXid > 65000)
  48. cFYI(1, "warning: more than 65000 requests active");
  49. xid = GlobalCurrentXid++;
  50. spin_unlock(&GlobalMid_Lock);
  51. return xid;
  52. }
  53. void
  54. _FreeXid(unsigned int xid)
  55. {
  56. spin_lock(&GlobalMid_Lock);
  57. /* if (GlobalTotalActiveXid == 0)
  58. BUG(); */
  59. GlobalTotalActiveXid--;
  60. spin_unlock(&GlobalMid_Lock);
  61. }
  62. struct cifs_ses *
  63. sesInfoAlloc(void)
  64. {
  65. struct cifs_ses *ret_buf;
  66. ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
  67. if (ret_buf) {
  68. atomic_inc(&sesInfoAllocCount);
  69. ret_buf->status = CifsNew;
  70. ++ret_buf->ses_count;
  71. INIT_LIST_HEAD(&ret_buf->smb_ses_list);
  72. INIT_LIST_HEAD(&ret_buf->tcon_list);
  73. mutex_init(&ret_buf->session_mutex);
  74. }
  75. return ret_buf;
  76. }
  77. void
  78. sesInfoFree(struct cifs_ses *buf_to_free)
  79. {
  80. if (buf_to_free == NULL) {
  81. cFYI(1, "Null buffer passed to sesInfoFree");
  82. return;
  83. }
  84. atomic_dec(&sesInfoAllocCount);
  85. kfree(buf_to_free->serverOS);
  86. kfree(buf_to_free->serverDomain);
  87. kfree(buf_to_free->serverNOS);
  88. if (buf_to_free->password) {
  89. memset(buf_to_free->password, 0, strlen(buf_to_free->password));
  90. kfree(buf_to_free->password);
  91. }
  92. kfree(buf_to_free->user_name);
  93. kfree(buf_to_free->domainName);
  94. kfree(buf_to_free);
  95. }
  96. struct cifs_tcon *
  97. tconInfoAlloc(void)
  98. {
  99. struct cifs_tcon *ret_buf;
  100. ret_buf = kzalloc(sizeof(struct cifs_tcon), GFP_KERNEL);
  101. if (ret_buf) {
  102. atomic_inc(&tconInfoAllocCount);
  103. ret_buf->tidStatus = CifsNew;
  104. ++ret_buf->tc_count;
  105. INIT_LIST_HEAD(&ret_buf->openFileList);
  106. INIT_LIST_HEAD(&ret_buf->tcon_list);
  107. #ifdef CONFIG_CIFS_STATS
  108. spin_lock_init(&ret_buf->stat_lock);
  109. #endif
  110. }
  111. return ret_buf;
  112. }
  113. void
  114. tconInfoFree(struct cifs_tcon *buf_to_free)
  115. {
  116. if (buf_to_free == NULL) {
  117. cFYI(1, "Null buffer passed to tconInfoFree");
  118. return;
  119. }
  120. atomic_dec(&tconInfoAllocCount);
  121. kfree(buf_to_free->nativeFileSystem);
  122. if (buf_to_free->password) {
  123. memset(buf_to_free->password, 0, strlen(buf_to_free->password));
  124. kfree(buf_to_free->password);
  125. }
  126. kfree(buf_to_free);
  127. }
  128. struct smb_hdr *
  129. cifs_buf_get(void)
  130. {
  131. struct smb_hdr *ret_buf = NULL;
  132. /* We could use negotiated size instead of max_msgsize -
  133. but it may be more efficient to always alloc same size
  134. albeit slightly larger than necessary and maxbuffersize
  135. defaults to this and can not be bigger */
  136. ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
  137. /* clear the first few header bytes */
  138. /* for most paths, more is cleared in header_assemble */
  139. if (ret_buf) {
  140. memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
  141. atomic_inc(&bufAllocCount);
  142. #ifdef CONFIG_CIFS_STATS2
  143. atomic_inc(&totBufAllocCount);
  144. #endif /* CONFIG_CIFS_STATS2 */
  145. }
  146. return ret_buf;
  147. }
  148. void
  149. cifs_buf_release(void *buf_to_free)
  150. {
  151. if (buf_to_free == NULL) {
  152. /* cFYI(1, "Null buffer passed to cifs_buf_release");*/
  153. return;
  154. }
  155. mempool_free(buf_to_free, cifs_req_poolp);
  156. atomic_dec(&bufAllocCount);
  157. return;
  158. }
  159. struct smb_hdr *
  160. cifs_small_buf_get(void)
  161. {
  162. struct smb_hdr *ret_buf = NULL;
  163. /* We could use negotiated size instead of max_msgsize -
  164. but it may be more efficient to always alloc same size
  165. albeit slightly larger than necessary and maxbuffersize
  166. defaults to this and can not be bigger */
  167. ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
  168. if (ret_buf) {
  169. /* No need to clear memory here, cleared in header assemble */
  170. /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
  171. atomic_inc(&smBufAllocCount);
  172. #ifdef CONFIG_CIFS_STATS2
  173. atomic_inc(&totSmBufAllocCount);
  174. #endif /* CONFIG_CIFS_STATS2 */
  175. }
  176. return ret_buf;
  177. }
  178. void
  179. cifs_small_buf_release(void *buf_to_free)
  180. {
  181. if (buf_to_free == NULL) {
  182. cFYI(1, "Null buffer passed to cifs_small_buf_release");
  183. return;
  184. }
  185. mempool_free(buf_to_free, cifs_sm_req_poolp);
  186. atomic_dec(&smBufAllocCount);
  187. return;
  188. }
  189. /*
  190. * Find a free multiplex id (SMB mid). Otherwise there could be
  191. * mid collisions which might cause problems, demultiplexing the
  192. * wrong response to this request. Multiplex ids could collide if
  193. * one of a series requests takes much longer than the others, or
  194. * if a very large number of long lived requests (byte range
  195. * locks or FindNotify requests) are pending. No more than
  196. * 64K-1 requests can be outstanding at one time. If no
  197. * mids are available, return zero. A future optimization
  198. * could make the combination of mids and uid the key we use
  199. * to demultiplex on (rather than mid alone).
  200. * In addition to the above check, the cifs demultiplex
  201. * code already used the command code as a secondary
  202. * check of the frame and if signing is negotiated the
  203. * response would be discarded if the mid were the same
  204. * but the signature was wrong. Since the mid is not put in the
  205. * pending queue until later (when it is about to be dispatched)
  206. * we do have to limit the number of outstanding requests
  207. * to somewhat less than 64K-1 although it is hard to imagine
  208. * so many threads being in the vfs at one time.
  209. */
  210. __u64 GetNextMid(struct TCP_Server_Info *server)
  211. {
  212. __u64 mid = 0;
  213. __u16 last_mid, cur_mid;
  214. bool collision;
  215. spin_lock(&GlobalMid_Lock);
  216. /* mid is 16 bit only for CIFS/SMB */
  217. cur_mid = (__u16)((server->CurrentMid) & 0xffff);
  218. /* we do not want to loop forever */
  219. last_mid = cur_mid;
  220. cur_mid++;
  221. /*
  222. * This nested loop looks more expensive than it is.
  223. * In practice the list of pending requests is short,
  224. * fewer than 50, and the mids are likely to be unique
  225. * on the first pass through the loop unless some request
  226. * takes longer than the 64 thousand requests before it
  227. * (and it would also have to have been a request that
  228. * did not time out).
  229. */
  230. while (cur_mid != last_mid) {
  231. struct mid_q_entry *mid_entry;
  232. unsigned int num_mids;
  233. collision = false;
  234. if (cur_mid == 0)
  235. cur_mid++;
  236. num_mids = 0;
  237. list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
  238. ++num_mids;
  239. if (mid_entry->mid == cur_mid &&
  240. mid_entry->mid_state == MID_REQUEST_SUBMITTED) {
  241. /* This mid is in use, try a different one */
  242. collision = true;
  243. break;
  244. }
  245. }
  246. /*
  247. * if we have more than 32k mids in the list, then something
  248. * is very wrong. Possibly a local user is trying to DoS the
  249. * box by issuing long-running calls and SIGKILL'ing them. If
  250. * we get to 2^16 mids then we're in big trouble as this
  251. * function could loop forever.
  252. *
  253. * Go ahead and assign out the mid in this situation, but force
  254. * an eventual reconnect to clean out the pending_mid_q.
  255. */
  256. if (num_mids > 32768)
  257. server->tcpStatus = CifsNeedReconnect;
  258. if (!collision) {
  259. mid = (__u64)cur_mid;
  260. server->CurrentMid = mid;
  261. break;
  262. }
  263. cur_mid++;
  264. }
  265. spin_unlock(&GlobalMid_Lock);
  266. return mid;
  267. }
  268. /* NB: MID can not be set if treeCon not passed in, in that
  269. case it is responsbility of caller to set the mid */
  270. void
  271. header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
  272. const struct cifs_tcon *treeCon, int word_count
  273. /* length of fixed section (word count) in two byte units */)
  274. {
  275. struct list_head *temp_item;
  276. struct cifs_ses *ses;
  277. char *temp = (char *) buffer;
  278. memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
  279. buffer->smb_buf_length = cpu_to_be32(
  280. (2 * word_count) + sizeof(struct smb_hdr) -
  281. 4 /* RFC 1001 length field does not count */ +
  282. 2 /* for bcc field itself */) ;
  283. buffer->Protocol[0] = 0xFF;
  284. buffer->Protocol[1] = 'S';
  285. buffer->Protocol[2] = 'M';
  286. buffer->Protocol[3] = 'B';
  287. buffer->Command = smb_command;
  288. buffer->Flags = 0x00; /* case sensitive */
  289. buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES;
  290. buffer->Pid = cpu_to_le16((__u16)current->tgid);
  291. buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16));
  292. if (treeCon) {
  293. buffer->Tid = treeCon->tid;
  294. if (treeCon->ses) {
  295. if (treeCon->ses->capabilities & CAP_UNICODE)
  296. buffer->Flags2 |= SMBFLG2_UNICODE;
  297. if (treeCon->ses->capabilities & CAP_STATUS32)
  298. buffer->Flags2 |= SMBFLG2_ERR_STATUS;
  299. /* Uid is not converted */
  300. buffer->Uid = treeCon->ses->Suid;
  301. buffer->Mid = GetNextMid(treeCon->ses->server);
  302. if (multiuser_mount != 0) {
  303. /* For the multiuser case, there are few obvious technically */
  304. /* possible mechanisms to match the local linux user (uid) */
  305. /* to a valid remote smb user (smb_uid): */
  306. /* 1) Query Winbind (or other local pam/nss daemon */
  307. /* for userid/password/logon_domain or credential */
  308. /* 2) Query Winbind for uid to sid to username mapping */
  309. /* and see if we have a matching password for existing*/
  310. /* session for that user perhas getting password by */
  311. /* adding a new pam_cifs module that stores passwords */
  312. /* so that the cifs vfs can get at that for all logged*/
  313. /* on users */
  314. /* 3) (Which is the mechanism we have chosen) */
  315. /* Search through sessions to the same server for a */
  316. /* a match on the uid that was passed in on mount */
  317. /* with the current processes uid (or euid?) and use */
  318. /* that smb uid. If no existing smb session for */
  319. /* that uid found, use the default smb session ie */
  320. /* the smb session for the volume mounted which is */
  321. /* the same as would be used if the multiuser mount */
  322. /* flag were disabled. */
  323. /* BB Add support for establishing new tCon and SMB Session */
  324. /* with userid/password pairs found on the smb session */
  325. /* for other target tcp/ip addresses BB */
  326. if (current_fsuid() != treeCon->ses->linux_uid) {
  327. cFYI(1, "Multiuser mode and UID "
  328. "did not match tcon uid");
  329. spin_lock(&cifs_tcp_ses_lock);
  330. list_for_each(temp_item, &treeCon->ses->server->smb_ses_list) {
  331. ses = list_entry(temp_item, struct cifs_ses, smb_ses_list);
  332. if (ses->linux_uid == current_fsuid()) {
  333. if (ses->server == treeCon->ses->server) {
  334. cFYI(1, "found matching uid substitute right smb_uid");
  335. buffer->Uid = ses->Suid;
  336. break;
  337. } else {
  338. /* BB eventually call cifs_setup_session here */
  339. cFYI(1, "local UID found but no smb sess with this server exists");
  340. }
  341. }
  342. }
  343. spin_unlock(&cifs_tcp_ses_lock);
  344. }
  345. }
  346. }
  347. if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
  348. buffer->Flags2 |= SMBFLG2_DFS;
  349. if (treeCon->nocase)
  350. buffer->Flags |= SMBFLG_CASELESS;
  351. if ((treeCon->ses) && (treeCon->ses->server))
  352. if (treeCon->ses->server->sec_mode &
  353. (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
  354. buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
  355. }
  356. /* endian conversion of flags is now done just before sending */
  357. buffer->WordCount = (char) word_count;
  358. return;
  359. }
  360. static int
  361. check_smb_hdr(struct smb_hdr *smb, __u16 mid)
  362. {
  363. /* does it have the right SMB "signature" ? */
  364. if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
  365. cERROR(1, "Bad protocol string signature header 0x%x",
  366. *(unsigned int *)smb->Protocol);
  367. return 1;
  368. }
  369. /* Make sure that message ids match */
  370. if (mid != smb->Mid) {
  371. cERROR(1, "Mids do not match. received=%u expected=%u",
  372. smb->Mid, mid);
  373. return 1;
  374. }
  375. /* if it's a response then accept */
  376. if (smb->Flags & SMBFLG_RESPONSE)
  377. return 0;
  378. /* only one valid case where server sends us request */
  379. if (smb->Command == SMB_COM_LOCKING_ANDX)
  380. return 0;
  381. cERROR(1, "Server sent request, not response. mid=%u", smb->Mid);
  382. return 1;
  383. }
  384. int
  385. checkSMB(char *buf, unsigned int total_read)
  386. {
  387. struct smb_hdr *smb = (struct smb_hdr *)buf;
  388. __u16 mid = smb->Mid;
  389. __u32 rfclen = be32_to_cpu(smb->smb_buf_length);
  390. __u32 clc_len; /* calculated length */
  391. cFYI(0, "checkSMB Length: 0x%x, smb_buf_length: 0x%x",
  392. total_read, rfclen);
  393. /* is this frame too small to even get to a BCC? */
  394. if (total_read < 2 + sizeof(struct smb_hdr)) {
  395. if ((total_read >= sizeof(struct smb_hdr) - 1)
  396. && (smb->Status.CifsError != 0)) {
  397. /* it's an error return */
  398. smb->WordCount = 0;
  399. /* some error cases do not return wct and bcc */
  400. return 0;
  401. } else if ((total_read == sizeof(struct smb_hdr) + 1) &&
  402. (smb->WordCount == 0)) {
  403. char *tmp = (char *)smb;
  404. /* Need to work around a bug in two servers here */
  405. /* First, check if the part of bcc they sent was zero */
  406. if (tmp[sizeof(struct smb_hdr)] == 0) {
  407. /* some servers return only half of bcc
  408. * on simple responses (wct, bcc both zero)
  409. * in particular have seen this on
  410. * ulogoffX and FindClose. This leaves
  411. * one byte of bcc potentially unitialized
  412. */
  413. /* zero rest of bcc */
  414. tmp[sizeof(struct smb_hdr)+1] = 0;
  415. return 0;
  416. }
  417. cERROR(1, "rcvd invalid byte count (bcc)");
  418. } else {
  419. cERROR(1, "Length less than smb header size");
  420. }
  421. return -EIO;
  422. }
  423. /* otherwise, there is enough to get to the BCC */
  424. if (check_smb_hdr(smb, mid))
  425. return -EIO;
  426. clc_len = smbCalcSize(smb);
  427. if (4 + rfclen != total_read) {
  428. cERROR(1, "Length read does not match RFC1001 length %d",
  429. rfclen);
  430. return -EIO;
  431. }
  432. if (4 + rfclen != clc_len) {
  433. /* check if bcc wrapped around for large read responses */
  434. if ((rfclen > 64 * 1024) && (rfclen > clc_len)) {
  435. /* check if lengths match mod 64K */
  436. if (((4 + rfclen) & 0xFFFF) == (clc_len & 0xFFFF))
  437. return 0; /* bcc wrapped */
  438. }
  439. cFYI(1, "Calculated size %u vs length %u mismatch for mid=%u",
  440. clc_len, 4 + rfclen, smb->Mid);
  441. if (4 + rfclen < clc_len) {
  442. cERROR(1, "RFC1001 size %u smaller than SMB for mid=%u",
  443. rfclen, smb->Mid);
  444. return -EIO;
  445. } else if (rfclen > clc_len + 512) {
  446. /*
  447. * Some servers (Windows XP in particular) send more
  448. * data than the lengths in the SMB packet would
  449. * indicate on certain calls (byte range locks and
  450. * trans2 find first calls in particular). While the
  451. * client can handle such a frame by ignoring the
  452. * trailing data, we choose limit the amount of extra
  453. * data to 512 bytes.
  454. */
  455. cERROR(1, "RFC1001 size %u more than 512 bytes larger "
  456. "than SMB for mid=%u", rfclen, smb->Mid);
  457. return -EIO;
  458. }
  459. }
  460. return 0;
  461. }
  462. bool
  463. is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
  464. {
  465. struct smb_hdr *buf = (struct smb_hdr *)buffer;
  466. struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
  467. struct list_head *tmp, *tmp1, *tmp2;
  468. struct cifs_ses *ses;
  469. struct cifs_tcon *tcon;
  470. struct cifsInodeInfo *pCifsInode;
  471. struct cifsFileInfo *netfile;
  472. cFYI(1, "Checking for oplock break or dnotify response");
  473. if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
  474. (pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
  475. struct smb_com_transaction_change_notify_rsp *pSMBr =
  476. (struct smb_com_transaction_change_notify_rsp *)buf;
  477. struct file_notify_information *pnotify;
  478. __u32 data_offset = 0;
  479. if (get_bcc(buf) > sizeof(struct file_notify_information)) {
  480. data_offset = le32_to_cpu(pSMBr->DataOffset);
  481. pnotify = (struct file_notify_information *)
  482. ((char *)&pSMBr->hdr.Protocol + data_offset);
  483. cFYI(1, "dnotify on %s Action: 0x%x",
  484. pnotify->FileName, pnotify->Action);
  485. /* cifs_dump_mem("Rcvd notify Data: ",buf,
  486. sizeof(struct smb_hdr)+60); */
  487. return true;
  488. }
  489. if (pSMBr->hdr.Status.CifsError) {
  490. cFYI(1, "notify err 0x%d",
  491. pSMBr->hdr.Status.CifsError);
  492. return true;
  493. }
  494. return false;
  495. }
  496. if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
  497. return false;
  498. if (pSMB->hdr.Flags & SMBFLG_RESPONSE) {
  499. /* no sense logging error on invalid handle on oplock
  500. break - harmless race between close request and oplock
  501. break response is expected from time to time writing out
  502. large dirty files cached on the client */
  503. if ((NT_STATUS_INVALID_HANDLE) ==
  504. le32_to_cpu(pSMB->hdr.Status.CifsError)) {
  505. cFYI(1, "invalid handle on oplock break");
  506. return true;
  507. } else if (ERRbadfid ==
  508. le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
  509. return true;
  510. } else {
  511. return false; /* on valid oplock brk we get "request" */
  512. }
  513. }
  514. if (pSMB->hdr.WordCount != 8)
  515. return false;
  516. cFYI(1, "oplock type 0x%d level 0x%d",
  517. pSMB->LockType, pSMB->OplockLevel);
  518. if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
  519. return false;
  520. /* look up tcon based on tid & uid */
  521. spin_lock(&cifs_tcp_ses_lock);
  522. list_for_each(tmp, &srv->smb_ses_list) {
  523. ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
  524. list_for_each(tmp1, &ses->tcon_list) {
  525. tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
  526. if (tcon->tid != buf->Tid)
  527. continue;
  528. cifs_stats_inc(&tcon->num_oplock_brks);
  529. spin_lock(&cifs_file_list_lock);
  530. list_for_each(tmp2, &tcon->openFileList) {
  531. netfile = list_entry(tmp2, struct cifsFileInfo,
  532. tlist);
  533. if (pSMB->Fid != netfile->netfid)
  534. continue;
  535. cFYI(1, "file id match, oplock break");
  536. pCifsInode = CIFS_I(netfile->dentry->d_inode);
  537. cifs_set_oplock_level(pCifsInode,
  538. pSMB->OplockLevel ? OPLOCK_READ : 0);
  539. queue_work(cifsiod_wq,
  540. &netfile->oplock_break);
  541. netfile->oplock_break_cancelled = false;
  542. spin_unlock(&cifs_file_list_lock);
  543. spin_unlock(&cifs_tcp_ses_lock);
  544. return true;
  545. }
  546. spin_unlock(&cifs_file_list_lock);
  547. spin_unlock(&cifs_tcp_ses_lock);
  548. cFYI(1, "No matching file for oplock break");
  549. return true;
  550. }
  551. }
  552. spin_unlock(&cifs_tcp_ses_lock);
  553. cFYI(1, "Can not process oplock break for non-existent connection");
  554. return true;
  555. }
  556. void
  557. dump_smb(void *buf, int smb_buf_length)
  558. {
  559. int i, j;
  560. char debug_line[17];
  561. unsigned char *buffer = buf;
  562. if (traceSMB == 0)
  563. return;
  564. for (i = 0, j = 0; i < smb_buf_length; i++, j++) {
  565. if (i % 8 == 0) {
  566. /* have reached the beginning of line */
  567. printk(KERN_DEBUG "| ");
  568. j = 0;
  569. }
  570. printk("%0#4x ", buffer[i]);
  571. debug_line[2 * j] = ' ';
  572. if (isprint(buffer[i]))
  573. debug_line[1 + (2 * j)] = buffer[i];
  574. else
  575. debug_line[1 + (2 * j)] = '_';
  576. if (i % 8 == 7) {
  577. /* reached end of line, time to print ascii */
  578. debug_line[16] = 0;
  579. printk(" | %s\n", debug_line);
  580. }
  581. }
  582. for (; j < 8; j++) {
  583. printk(" ");
  584. debug_line[2 * j] = ' ';
  585. debug_line[1 + (2 * j)] = ' ';
  586. }
  587. printk(" | %s\n", debug_line);
  588. return;
  589. }
  590. void
  591. cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
  592. {
  593. if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
  594. cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
  595. cERROR(1, "Autodisabling the use of server inode numbers on "
  596. "%s. This server doesn't seem to support them "
  597. "properly. Hardlinks will not be recognized on this "
  598. "mount. Consider mounting with the \"noserverino\" "
  599. "option to silence this message.",
  600. cifs_sb_master_tcon(cifs_sb)->treeName);
  601. }
  602. }
  603. void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
  604. {
  605. oplock &= 0xF;
  606. if (oplock == OPLOCK_EXCLUSIVE) {
  607. cinode->clientCanCacheAll = true;
  608. cinode->clientCanCacheRead = true;
  609. cFYI(1, "Exclusive Oplock granted on inode %p",
  610. &cinode->vfs_inode);
  611. } else if (oplock == OPLOCK_READ) {
  612. cinode->clientCanCacheAll = false;
  613. cinode->clientCanCacheRead = true;
  614. cFYI(1, "Level II Oplock granted on inode %p",
  615. &cinode->vfs_inode);
  616. } else {
  617. cinode->clientCanCacheAll = false;
  618. cinode->clientCanCacheRead = false;
  619. }
  620. }
  621. bool
  622. backup_cred(struct cifs_sb_info *cifs_sb)
  623. {
  624. if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) {
  625. if (cifs_sb->mnt_backupuid == current_fsuid())
  626. return true;
  627. }
  628. if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) {
  629. if (in_group_p(cifs_sb->mnt_backupgid))
  630. return true;
  631. }
  632. return false;
  633. }
  634. void
  635. cifs_add_credits(struct TCP_Server_Info *server, const unsigned int add)
  636. {
  637. spin_lock(&server->req_lock);
  638. server->credits += add;
  639. server->in_flight--;
  640. spin_unlock(&server->req_lock);
  641. wake_up(&server->request_q);
  642. }
  643. void
  644. cifs_set_credits(struct TCP_Server_Info *server, const int val)
  645. {
  646. spin_lock(&server->req_lock);
  647. server->credits = val;
  648. server->oplocks = val > 1 ? enable_oplocks : false;
  649. spin_unlock(&server->req_lock);
  650. }