|
@@ -36,7 +36,13 @@
|
|
|
|
|
|
extern mempool_t *cifs_mid_poolp;
|
|
|
|
|
|
-static struct mid_q_entry *
|
|
|
+static void
|
|
|
+wake_up_task(struct mid_q_entry *mid)
|
|
|
+{
|
|
|
+ wake_up_process(mid->callback_data);
|
|
|
+}
|
|
|
+
|
|
|
+struct mid_q_entry *
|
|
|
AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
|
|
|
{
|
|
|
struct mid_q_entry *temp;
|
|
@@ -58,28 +64,28 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
|
|
|
/* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
|
|
|
/* when mid allocated can be before when sent */
|
|
|
temp->when_alloc = jiffies;
|
|
|
- temp->tsk = current;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The default is for the mid to be synchronous, so the
|
|
|
+ * default callback just wakes up the current task.
|
|
|
+ */
|
|
|
+ temp->callback = wake_up_task;
|
|
|
+ temp->callback_data = current;
|
|
|
}
|
|
|
|
|
|
- spin_lock(&GlobalMid_Lock);
|
|
|
- list_add_tail(&temp->qhead, &server->pending_mid_q);
|
|
|
atomic_inc(&midCount);
|
|
|
temp->midState = MID_REQUEST_ALLOCATED;
|
|
|
- spin_unlock(&GlobalMid_Lock);
|
|
|
return temp;
|
|
|
}
|
|
|
|
|
|
-static void
|
|
|
+void
|
|
|
DeleteMidQEntry(struct mid_q_entry *midEntry)
|
|
|
{
|
|
|
#ifdef CONFIG_CIFS_STATS2
|
|
|
unsigned long now;
|
|
|
#endif
|
|
|
- spin_lock(&GlobalMid_Lock);
|
|
|
midEntry->midState = MID_FREE;
|
|
|
- list_del(&midEntry->qhead);
|
|
|
atomic_dec(&midCount);
|
|
|
- spin_unlock(&GlobalMid_Lock);
|
|
|
if (midEntry->largeBuf)
|
|
|
cifs_buf_release(midEntry->resp_buf);
|
|
|
else
|
|
@@ -103,6 +109,16 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
|
|
|
mempool_free(midEntry, cifs_mid_poolp);
|
|
|
}
|
|
|
|
|
|
+static void
|
|
|
+delete_mid(struct mid_q_entry *mid)
|
|
|
+{
|
|
|
+ spin_lock(&GlobalMid_Lock);
|
|
|
+ list_del(&mid->qhead);
|
|
|
+ spin_unlock(&GlobalMid_Lock);
|
|
|
+
|
|
|
+ DeleteMidQEntry(mid);
|
|
|
+}
|
|
|
+
|
|
|
static int
|
|
|
smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
|
|
|
{
|
|
@@ -244,31 +260,31 @@ smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
|
|
|
return smb_sendv(server, &iov, 1);
|
|
|
}
|
|
|
|
|
|
-static int wait_for_free_request(struct cifsSesInfo *ses, const int long_op)
|
|
|
+static int wait_for_free_request(struct TCP_Server_Info *server,
|
|
|
+ const int long_op)
|
|
|
{
|
|
|
if (long_op == CIFS_ASYNC_OP) {
|
|
|
/* oplock breaks must not be held up */
|
|
|
- atomic_inc(&ses->server->inFlight);
|
|
|
+ atomic_inc(&server->inFlight);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
spin_lock(&GlobalMid_Lock);
|
|
|
while (1) {
|
|
|
- if (atomic_read(&ses->server->inFlight) >=
|
|
|
- cifs_max_pending){
|
|
|
+ if (atomic_read(&server->inFlight) >= cifs_max_pending) {
|
|
|
spin_unlock(&GlobalMid_Lock);
|
|
|
#ifdef CONFIG_CIFS_STATS2
|
|
|
- atomic_inc(&ses->server->num_waiters);
|
|
|
+ atomic_inc(&server->num_waiters);
|
|
|
#endif
|
|
|
- wait_event(ses->server->request_q,
|
|
|
- atomic_read(&ses->server->inFlight)
|
|
|
+ wait_event(server->request_q,
|
|
|
+ atomic_read(&server->inFlight)
|
|
|
< cifs_max_pending);
|
|
|
#ifdef CONFIG_CIFS_STATS2
|
|
|
- atomic_dec(&ses->server->num_waiters);
|
|
|
+ atomic_dec(&server->num_waiters);
|
|
|
#endif
|
|
|
spin_lock(&GlobalMid_Lock);
|
|
|
} else {
|
|
|
- if (ses->server->tcpStatus == CifsExiting) {
|
|
|
+ if (server->tcpStatus == CifsExiting) {
|
|
|
spin_unlock(&GlobalMid_Lock);
|
|
|
return -ENOENT;
|
|
|
}
|
|
@@ -278,7 +294,7 @@ static int wait_for_free_request(struct cifsSesInfo *ses, const int long_op)
|
|
|
|
|
|
/* update # of requests on the wire to server */
|
|
|
if (long_op != CIFS_BLOCKING_OP)
|
|
|
- atomic_inc(&ses->server->inFlight);
|
|
|
+ atomic_inc(&server->inFlight);
|
|
|
spin_unlock(&GlobalMid_Lock);
|
|
|
break;
|
|
|
}
|
|
@@ -308,53 +324,81 @@ static int allocate_mid(struct cifsSesInfo *ses, struct smb_hdr *in_buf,
|
|
|
*ppmidQ = AllocMidQEntry(in_buf, ses->server);
|
|
|
if (*ppmidQ == NULL)
|
|
|
return -ENOMEM;
|
|
|
+ spin_lock(&GlobalMid_Lock);
|
|
|
+ list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
|
|
|
+ spin_unlock(&GlobalMid_Lock);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int wait_for_response(struct cifsSesInfo *ses,
|
|
|
- struct mid_q_entry *midQ,
|
|
|
- unsigned long timeout,
|
|
|
- unsigned long time_to_wait)
|
|
|
+static int
|
|
|
+wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
|
|
|
{
|
|
|
- unsigned long curr_timeout;
|
|
|
+ int error;
|
|
|
|
|
|
- for (;;) {
|
|
|
- curr_timeout = timeout + jiffies;
|
|
|
- wait_event_timeout(ses->server->response_q,
|
|
|
- midQ->midState != MID_REQUEST_SUBMITTED, timeout);
|
|
|
+ error = wait_event_killable(server->response_q,
|
|
|
+ midQ->midState != MID_REQUEST_SUBMITTED);
|
|
|
+ if (error < 0)
|
|
|
+ return -ERESTARTSYS;
|
|
|
|
|
|
- if (time_after(jiffies, curr_timeout) &&
|
|
|
- (midQ->midState == MID_REQUEST_SUBMITTED) &&
|
|
|
- ((ses->server->tcpStatus == CifsGood) ||
|
|
|
- (ses->server->tcpStatus == CifsNew))) {
|
|
|
+ return 0;
|
|
|
+}
|
|
|
|
|
|
- unsigned long lrt;
|
|
|
|
|
|
- /* We timed out. Is the server still
|
|
|
- sending replies ? */
|
|
|
- spin_lock(&GlobalMid_Lock);
|
|
|
- lrt = ses->server->lstrp;
|
|
|
- spin_unlock(&GlobalMid_Lock);
|
|
|
+/*
|
|
|
+ * Send a SMB request and set the callback function in the mid to handle
|
|
|
+ * the result. Caller is responsible for dealing with timeouts.
|
|
|
+ */
|
|
|
+int
|
|
|
+cifs_call_async(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
|
|
|
+ mid_callback_t *callback, void *cbdata)
|
|
|
+{
|
|
|
+ int rc;
|
|
|
+ struct mid_q_entry *mid;
|
|
|
|
|
|
- /* Calculate time_to_wait past last receive time.
|
|
|
- Although we prefer not to time out if the
|
|
|
- server is still responding - we will time
|
|
|
- out if the server takes more than 15 (or 45
|
|
|
- or 180) seconds to respond to this request
|
|
|
- and has not responded to any request from
|
|
|
- other threads on the client within 10 seconds */
|
|
|
- lrt += time_to_wait;
|
|
|
- if (time_after(jiffies, lrt)) {
|
|
|
- /* No replies for time_to_wait. */
|
|
|
- cERROR(1, "server not responding");
|
|
|
- return -1;
|
|
|
- }
|
|
|
- } else {
|
|
|
- return 0;
|
|
|
- }
|
|
|
+ rc = wait_for_free_request(server, CIFS_ASYNC_OP);
|
|
|
+ if (rc)
|
|
|
+ return rc;
|
|
|
+
|
|
|
+ mutex_lock(&server->srv_mutex);
|
|
|
+ mid = AllocMidQEntry(in_buf, server);
|
|
|
+ if (mid == NULL) {
|
|
|
+ mutex_unlock(&server->srv_mutex);
|
|
|
+ return -ENOMEM;
|
|
|
}
|
|
|
-}
|
|
|
|
|
|
+ /* put it on the pending_mid_q */
|
|
|
+ spin_lock(&GlobalMid_Lock);
|
|
|
+ list_add_tail(&mid->qhead, &server->pending_mid_q);
|
|
|
+ spin_unlock(&GlobalMid_Lock);
|
|
|
+
|
|
|
+ rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
|
|
|
+ if (rc) {
|
|
|
+ mutex_unlock(&server->srv_mutex);
|
|
|
+ goto out_err;
|
|
|
+ }
|
|
|
+
|
|
|
+ mid->callback = callback;
|
|
|
+ mid->callback_data = cbdata;
|
|
|
+ mid->midState = MID_REQUEST_SUBMITTED;
|
|
|
+#ifdef CONFIG_CIFS_STATS2
|
|
|
+ atomic_inc(&server->inSend);
|
|
|
+#endif
|
|
|
+ rc = smb_send(server, in_buf, in_buf->smb_buf_length);
|
|
|
+#ifdef CONFIG_CIFS_STATS2
|
|
|
+ atomic_dec(&server->inSend);
|
|
|
+ mid->when_sent = jiffies;
|
|
|
+#endif
|
|
|
+ mutex_unlock(&server->srv_mutex);
|
|
|
+ if (rc)
|
|
|
+ goto out_err;
|
|
|
+
|
|
|
+ return rc;
|
|
|
+out_err:
|
|
|
+ delete_mid(mid);
|
|
|
+ atomic_dec(&server->inFlight);
|
|
|
+ wake_up(&server->request_q);
|
|
|
+ return rc;
|
|
|
+}
|
|
|
|
|
|
/*
|
|
|
*
|
|
@@ -382,6 +426,81 @@ SendReceiveNoRsp(const unsigned int xid, struct cifsSesInfo *ses,
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
+static int
|
|
|
+sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
|
|
|
+{
|
|
|
+ int rc = 0;
|
|
|
+
|
|
|
+ cFYI(1, "%s: cmd=%d mid=%d state=%d", __func__, mid->command,
|
|
|
+ mid->mid, mid->midState);
|
|
|
+
|
|
|
+ spin_lock(&GlobalMid_Lock);
|
|
|
+ /* ensure that it's no longer on the pending_mid_q */
|
|
|
+ list_del_init(&mid->qhead);
|
|
|
+
|
|
|
+ switch (mid->midState) {
|
|
|
+ case MID_RESPONSE_RECEIVED:
|
|
|
+ spin_unlock(&GlobalMid_Lock);
|
|
|
+ return rc;
|
|
|
+ case MID_REQUEST_SUBMITTED:
|
|
|
+ /* socket is going down, reject all calls */
|
|
|
+ if (server->tcpStatus == CifsExiting) {
|
|
|
+ cERROR(1, "%s: canceling mid=%d cmd=0x%x state=%d",
|
|
|
+ __func__, mid->mid, mid->command, mid->midState);
|
|
|
+ rc = -EHOSTDOWN;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case MID_RETRY_NEEDED:
|
|
|
+ rc = -EAGAIN;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ cERROR(1, "%s: invalid mid state mid=%d state=%d", __func__,
|
|
|
+ mid->mid, mid->midState);
|
|
|
+ rc = -EIO;
|
|
|
+ }
|
|
|
+ spin_unlock(&GlobalMid_Lock);
|
|
|
+
|
|
|
+ DeleteMidQEntry(mid);
|
|
|
+ return rc;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * An NT cancel request header looks just like the original request except:
|
|
|
+ *
|
|
|
+ * The Command is SMB_COM_NT_CANCEL
|
|
|
+ * The WordCount is zeroed out
|
|
|
+ * The ByteCount is zeroed out
|
|
|
+ *
|
|
|
+ * This function mangles an existing request buffer into a
|
|
|
+ * SMB_COM_NT_CANCEL request and then sends it.
|
|
|
+ */
|
|
|
+static int
|
|
|
+send_nt_cancel(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
|
|
|
+ struct mid_q_entry *mid)
|
|
|
+{
|
|
|
+ int rc = 0;
|
|
|
+
|
|
|
+ /* -4 for RFC1001 length and +2 for BCC field */
|
|
|
+ in_buf->smb_buf_length = sizeof(struct smb_hdr) - 4 + 2;
|
|
|
+ in_buf->Command = SMB_COM_NT_CANCEL;
|
|
|
+ in_buf->WordCount = 0;
|
|
|
+ BCC_LE(in_buf) = 0;
|
|
|
+
|
|
|
+ mutex_lock(&server->srv_mutex);
|
|
|
+ rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
|
|
|
+ if (rc) {
|
|
|
+ mutex_unlock(&server->srv_mutex);
|
|
|
+ return rc;
|
|
|
+ }
|
|
|
+ rc = smb_send(server, in_buf, in_buf->smb_buf_length);
|
|
|
+ mutex_unlock(&server->srv_mutex);
|
|
|
+
|
|
|
+ cFYI(1, "issued NT_CANCEL for mid %u, rc = %d",
|
|
|
+ in_buf->Mid, rc);
|
|
|
+
|
|
|
+ return rc;
|
|
|
+}
|
|
|
+
|
|
|
int
|
|
|
SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
|
|
|
struct kvec *iov, int n_vec, int *pRespBufType /* ret */,
|
|
@@ -390,7 +509,6 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
|
|
|
int rc = 0;
|
|
|
int long_op;
|
|
|
unsigned int receive_len;
|
|
|
- unsigned long timeout;
|
|
|
struct mid_q_entry *midQ;
|
|
|
struct smb_hdr *in_buf = iov[0].iov_base;
|
|
|
|
|
@@ -413,7 +531,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
|
|
|
to the same server. We may make this configurable later or
|
|
|
use ses->maxReq */
|
|
|
|
|
|
- rc = wait_for_free_request(ses, long_op);
|
|
|
+ rc = wait_for_free_request(ses->server, long_op);
|
|
|
if (rc) {
|
|
|
cifs_small_buf_release(in_buf);
|
|
|
return rc;
|
|
@@ -457,65 +575,20 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
|
|
|
if (rc < 0)
|
|
|
goto out;
|
|
|
|
|
|
- if (long_op == CIFS_STD_OP)
|
|
|
- timeout = 15 * HZ;
|
|
|
- else if (long_op == CIFS_VLONG_OP) /* e.g. slow writes past EOF */
|
|
|
- timeout = 180 * HZ;
|
|
|
- else if (long_op == CIFS_LONG_OP)
|
|
|
- timeout = 45 * HZ; /* should be greater than
|
|
|
- servers oplock break timeout (about 43 seconds) */
|
|
|
- else if (long_op == CIFS_ASYNC_OP)
|
|
|
+ if (long_op == CIFS_ASYNC_OP)
|
|
|
goto out;
|
|
|
- else if (long_op == CIFS_BLOCKING_OP)
|
|
|
- timeout = 0x7FFFFFFF; /* large, but not so large as to wrap */
|
|
|
- else {
|
|
|
- cERROR(1, "unknown timeout flag %d", long_op);
|
|
|
- rc = -EIO;
|
|
|
- goto out;
|
|
|
- }
|
|
|
-
|
|
|
- /* wait for 15 seconds or until woken up due to response arriving or
|
|
|
- due to last connection to this server being unmounted */
|
|
|
- if (signal_pending(current)) {
|
|
|
- /* if signal pending do not hold up user for full smb timeout
|
|
|
- but we still give response a chance to complete */
|
|
|
- timeout = 2 * HZ;
|
|
|
- }
|
|
|
-
|
|
|
- /* No user interrupts in wait - wreaks havoc with performance */
|
|
|
- wait_for_response(ses, midQ, timeout, 10 * HZ);
|
|
|
-
|
|
|
- spin_lock(&GlobalMid_Lock);
|
|
|
|
|
|
- if (midQ->resp_buf == NULL) {
|
|
|
- cERROR(1, "No response to cmd %d mid %d",
|
|
|
- midQ->command, midQ->mid);
|
|
|
- if (midQ->midState == MID_REQUEST_SUBMITTED) {
|
|
|
- if (ses->server->tcpStatus == CifsExiting)
|
|
|
- rc = -EHOSTDOWN;
|
|
|
- else {
|
|
|
- ses->server->tcpStatus = CifsNeedReconnect;
|
|
|
- midQ->midState = MID_RETRY_NEEDED;
|
|
|
- }
|
|
|
- }
|
|
|
+ rc = wait_for_response(ses->server, midQ);
|
|
|
+ if (rc != 0)
|
|
|
+ goto out;
|
|
|
|
|
|
- if (rc != -EHOSTDOWN) {
|
|
|
- if (midQ->midState == MID_RETRY_NEEDED) {
|
|
|
- rc = -EAGAIN;
|
|
|
- cFYI(1, "marking request for retry");
|
|
|
- } else {
|
|
|
- rc = -EIO;
|
|
|
- }
|
|
|
- }
|
|
|
- spin_unlock(&GlobalMid_Lock);
|
|
|
- DeleteMidQEntry(midQ);
|
|
|
- /* Update # of requests on wire to server */
|
|
|
+ rc = sync_mid_result(midQ, ses->server);
|
|
|
+ if (rc != 0) {
|
|
|
atomic_dec(&ses->server->inFlight);
|
|
|
wake_up(&ses->server->request_q);
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
- spin_unlock(&GlobalMid_Lock);
|
|
|
receive_len = midQ->resp_buf->smb_buf_length;
|
|
|
|
|
|
if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
|
|
@@ -564,14 +637,14 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
|
|
|
if ((flags & CIFS_NO_RESP) == 0)
|
|
|
midQ->resp_buf = NULL; /* mark it so buf will
|
|
|
not be freed by
|
|
|
- DeleteMidQEntry */
|
|
|
+ delete_mid */
|
|
|
} else {
|
|
|
rc = -EIO;
|
|
|
cFYI(1, "Bad MID state?");
|
|
|
}
|
|
|
|
|
|
out:
|
|
|
- DeleteMidQEntry(midQ);
|
|
|
+ delete_mid(midQ);
|
|
|
atomic_dec(&ses->server->inFlight);
|
|
|
wake_up(&ses->server->request_q);
|
|
|
|
|
@@ -585,7 +658,6 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
|
|
|
{
|
|
|
int rc = 0;
|
|
|
unsigned int receive_len;
|
|
|
- unsigned long timeout;
|
|
|
struct mid_q_entry *midQ;
|
|
|
|
|
|
if (ses == NULL) {
|
|
@@ -610,7 +682,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
|
|
|
return -EIO;
|
|
|
}
|
|
|
|
|
|
- rc = wait_for_free_request(ses, long_op);
|
|
|
+ rc = wait_for_free_request(ses->server, long_op);
|
|
|
if (rc)
|
|
|
return rc;
|
|
|
|
|
@@ -649,64 +721,20 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
|
|
|
if (rc < 0)
|
|
|
goto out;
|
|
|
|
|
|
- if (long_op == CIFS_STD_OP)
|
|
|
- timeout = 15 * HZ;
|
|
|
- /* wait for 15 seconds or until woken up due to response arriving or
|
|
|
- due to last connection to this server being unmounted */
|
|
|
- else if (long_op == CIFS_ASYNC_OP)
|
|
|
+ if (long_op == CIFS_ASYNC_OP)
|
|
|
goto out;
|
|
|
- else if (long_op == CIFS_VLONG_OP) /* writes past EOF can be slow */
|
|
|
- timeout = 180 * HZ;
|
|
|
- else if (long_op == CIFS_LONG_OP)
|
|
|
- timeout = 45 * HZ; /* should be greater than
|
|
|
- servers oplock break timeout (about 43 seconds) */
|
|
|
- else if (long_op == CIFS_BLOCKING_OP)
|
|
|
- timeout = 0x7FFFFFFF; /* large but no so large as to wrap */
|
|
|
- else {
|
|
|
- cERROR(1, "unknown timeout flag %d", long_op);
|
|
|
- rc = -EIO;
|
|
|
- goto out;
|
|
|
- }
|
|
|
|
|
|
- if (signal_pending(current)) {
|
|
|
- /* if signal pending do not hold up user for full smb timeout
|
|
|
- but we still give response a chance to complete */
|
|
|
- timeout = 2 * HZ;
|
|
|
- }
|
|
|
-
|
|
|
- /* No user interrupts in wait - wreaks havoc with performance */
|
|
|
- wait_for_response(ses, midQ, timeout, 10 * HZ);
|
|
|
-
|
|
|
- spin_lock(&GlobalMid_Lock);
|
|
|
- if (midQ->resp_buf == NULL) {
|
|
|
- cERROR(1, "No response for cmd %d mid %d",
|
|
|
- midQ->command, midQ->mid);
|
|
|
- if (midQ->midState == MID_REQUEST_SUBMITTED) {
|
|
|
- if (ses->server->tcpStatus == CifsExiting)
|
|
|
- rc = -EHOSTDOWN;
|
|
|
- else {
|
|
|
- ses->server->tcpStatus = CifsNeedReconnect;
|
|
|
- midQ->midState = MID_RETRY_NEEDED;
|
|
|
- }
|
|
|
- }
|
|
|
+ rc = wait_for_response(ses->server, midQ);
|
|
|
+ if (rc != 0)
|
|
|
+ goto out;
|
|
|
|
|
|
- if (rc != -EHOSTDOWN) {
|
|
|
- if (midQ->midState == MID_RETRY_NEEDED) {
|
|
|
- rc = -EAGAIN;
|
|
|
- cFYI(1, "marking request for retry");
|
|
|
- } else {
|
|
|
- rc = -EIO;
|
|
|
- }
|
|
|
- }
|
|
|
- spin_unlock(&GlobalMid_Lock);
|
|
|
- DeleteMidQEntry(midQ);
|
|
|
- /* Update # of requests on wire to server */
|
|
|
+ rc = sync_mid_result(midQ, ses->server);
|
|
|
+ if (rc != 0) {
|
|
|
atomic_dec(&ses->server->inFlight);
|
|
|
wake_up(&ses->server->request_q);
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
- spin_unlock(&GlobalMid_Lock);
|
|
|
receive_len = midQ->resp_buf->smb_buf_length;
|
|
|
|
|
|
if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
|
|
@@ -755,36 +783,13 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
|
|
|
}
|
|
|
|
|
|
out:
|
|
|
- DeleteMidQEntry(midQ);
|
|
|
+ delete_mid(midQ);
|
|
|
atomic_dec(&ses->server->inFlight);
|
|
|
wake_up(&ses->server->request_q);
|
|
|
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
-/* Send an NT_CANCEL SMB to cause the POSIX blocking lock to return. */
|
|
|
-
|
|
|
-static int
|
|
|
-send_nt_cancel(struct cifsTconInfo *tcon, struct smb_hdr *in_buf,
|
|
|
- struct mid_q_entry *midQ)
|
|
|
-{
|
|
|
- int rc = 0;
|
|
|
- struct cifsSesInfo *ses = tcon->ses;
|
|
|
- __u16 mid = in_buf->Mid;
|
|
|
-
|
|
|
- header_assemble(in_buf, SMB_COM_NT_CANCEL, tcon, 0);
|
|
|
- in_buf->Mid = mid;
|
|
|
- mutex_lock(&ses->server->srv_mutex);
|
|
|
- rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
|
|
|
- if (rc) {
|
|
|
- mutex_unlock(&ses->server->srv_mutex);
|
|
|
- return rc;
|
|
|
- }
|
|
|
- rc = smb_send(ses->server, in_buf, in_buf->smb_buf_length);
|
|
|
- mutex_unlock(&ses->server->srv_mutex);
|
|
|
- return rc;
|
|
|
-}
|
|
|
-
|
|
|
/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
|
|
|
blocking lock to return. */
|
|
|
|
|
@@ -807,7 +812,7 @@ send_lock_cancel(const unsigned int xid, struct cifsTconInfo *tcon,
|
|
|
pSMB->hdr.Mid = GetNextMid(ses->server);
|
|
|
|
|
|
return SendReceive(xid, ses, in_buf, out_buf,
|
|
|
- &bytes_returned, CIFS_STD_OP);
|
|
|
+ &bytes_returned, 0);
|
|
|
}
|
|
|
|
|
|
int
|
|
@@ -845,7 +850,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
|
|
|
return -EIO;
|
|
|
}
|
|
|
|
|
|
- rc = wait_for_free_request(ses, CIFS_BLOCKING_OP);
|
|
|
+ rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP);
|
|
|
if (rc)
|
|
|
return rc;
|
|
|
|
|
@@ -863,7 +868,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
|
|
|
|
|
|
rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
|
|
|
if (rc) {
|
|
|
- DeleteMidQEntry(midQ);
|
|
|
+ delete_mid(midQ);
|
|
|
mutex_unlock(&ses->server->srv_mutex);
|
|
|
return rc;
|
|
|
}
|
|
@@ -880,7 +885,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
|
|
|
mutex_unlock(&ses->server->srv_mutex);
|
|
|
|
|
|
if (rc < 0) {
|
|
|
- DeleteMidQEntry(midQ);
|
|
|
+ delete_mid(midQ);
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
@@ -899,10 +904,9 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
|
|
|
if (in_buf->Command == SMB_COM_TRANSACTION2) {
|
|
|
/* POSIX lock. We send a NT_CANCEL SMB to cause the
|
|
|
blocking lock to return. */
|
|
|
-
|
|
|
- rc = send_nt_cancel(tcon, in_buf, midQ);
|
|
|
+ rc = send_nt_cancel(ses->server, in_buf, midQ);
|
|
|
if (rc) {
|
|
|
- DeleteMidQEntry(midQ);
|
|
|
+ delete_mid(midQ);
|
|
|
return rc;
|
|
|
}
|
|
|
} else {
|
|
@@ -914,47 +918,22 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
|
|
|
/* If we get -ENOLCK back the lock may have
|
|
|
already been removed. Don't exit in this case. */
|
|
|
if (rc && rc != -ENOLCK) {
|
|
|
- DeleteMidQEntry(midQ);
|
|
|
+ delete_mid(midQ);
|
|
|
return rc;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- /* Wait 5 seconds for the response. */
|
|
|
- if (wait_for_response(ses, midQ, 5 * HZ, 5 * HZ) == 0) {
|
|
|
+ if (wait_for_response(ses->server, midQ) == 0) {
|
|
|
/* We got the response - restart system call. */
|
|
|
rstart = 1;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- spin_lock(&GlobalMid_Lock);
|
|
|
- if (midQ->resp_buf) {
|
|
|
- spin_unlock(&GlobalMid_Lock);
|
|
|
- receive_len = midQ->resp_buf->smb_buf_length;
|
|
|
- } else {
|
|
|
- cERROR(1, "No response for cmd %d mid %d",
|
|
|
- midQ->command, midQ->mid);
|
|
|
- if (midQ->midState == MID_REQUEST_SUBMITTED) {
|
|
|
- if (ses->server->tcpStatus == CifsExiting)
|
|
|
- rc = -EHOSTDOWN;
|
|
|
- else {
|
|
|
- ses->server->tcpStatus = CifsNeedReconnect;
|
|
|
- midQ->midState = MID_RETRY_NEEDED;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- if (rc != -EHOSTDOWN) {
|
|
|
- if (midQ->midState == MID_RETRY_NEEDED) {
|
|
|
- rc = -EAGAIN;
|
|
|
- cFYI(1, "marking request for retry");
|
|
|
- } else {
|
|
|
- rc = -EIO;
|
|
|
- }
|
|
|
- }
|
|
|
- spin_unlock(&GlobalMid_Lock);
|
|
|
- DeleteMidQEntry(midQ);
|
|
|
+ rc = sync_mid_result(midQ, ses->server);
|
|
|
+ if (rc != 0)
|
|
|
return rc;
|
|
|
- }
|
|
|
|
|
|
+ receive_len = midQ->resp_buf->smb_buf_length;
|
|
|
if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
|
|
|
cERROR(1, "Frame too large received. Length: %d Xid: %d",
|
|
|
receive_len, xid);
|
|
@@ -1001,7 +980,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
|
|
|
BCC(out_buf) = le16_to_cpu(BCC_LE(out_buf));
|
|
|
|
|
|
out:
|
|
|
- DeleteMidQEntry(midQ);
|
|
|
+ delete_mid(midQ);
|
|
|
if (rstart && rc == -EACCES)
|
|
|
return -ERESTARTSYS;
|
|
|
return rc;
|