|
@@ -192,13 +192,13 @@ svc_sock_enqueue(struct svc_sock *svsk)
|
|
|
svsk->sk_pool = pool;
|
|
|
|
|
|
set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
|
|
|
- if (((atomic_read(&svsk->sk_reserved) + serv->sv_bufsz)*2
|
|
|
+ if (((atomic_read(&svsk->sk_reserved) + serv->sv_max_mesg)*2
|
|
|
> svc_sock_wspace(svsk))
|
|
|
&& !test_bit(SK_CLOSE, &svsk->sk_flags)
|
|
|
&& !test_bit(SK_CONN, &svsk->sk_flags)) {
|
|
|
/* Don't enqueue while not enough space for reply */
|
|
|
dprintk("svc: socket %p no space, %d*2 > %ld, not enqueued\n",
|
|
|
- svsk->sk_sk, atomic_read(&svsk->sk_reserved)+serv->sv_bufsz,
|
|
|
+ svsk->sk_sk, atomic_read(&svsk->sk_reserved)+serv->sv_max_mesg,
|
|
|
svc_sock_wspace(svsk));
|
|
|
svsk->sk_pool = NULL;
|
|
|
clear_bit(SK_BUSY, &svsk->sk_flags);
|
|
@@ -220,7 +220,7 @@ svc_sock_enqueue(struct svc_sock *svsk)
|
|
|
rqstp, rqstp->rq_sock);
|
|
|
rqstp->rq_sock = svsk;
|
|
|
atomic_inc(&svsk->sk_inuse);
|
|
|
- rqstp->rq_reserved = serv->sv_bufsz;
|
|
|
+ rqstp->rq_reserved = serv->sv_max_mesg;
|
|
|
atomic_add(rqstp->rq_reserved, &svsk->sk_reserved);
|
|
|
BUG_ON(svsk->sk_pool != pool);
|
|
|
wake_up(&rqstp->rq_wait);
|
|
@@ -639,8 +639,8 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
|
|
|
* which will access the socket.
|
|
|
*/
|
|
|
svc_sock_setbufsize(svsk->sk_sock,
|
|
|
- (serv->sv_nrthreads+3) * serv->sv_bufsz,
|
|
|
- (serv->sv_nrthreads+3) * serv->sv_bufsz);
|
|
|
+ (serv->sv_nrthreads+3) * serv->sv_max_mesg,
|
|
|
+ (serv->sv_nrthreads+3) * serv->sv_max_mesg);
|
|
|
|
|
|
if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
|
|
|
svc_sock_received(svsk);
|
|
@@ -749,8 +749,8 @@ svc_udp_init(struct svc_sock *svsk)
|
|
|
* svc_udp_recvfrom will re-adjust if necessary
|
|
|
*/
|
|
|
svc_sock_setbufsize(svsk->sk_sock,
|
|
|
- 3 * svsk->sk_server->sv_bufsz,
|
|
|
- 3 * svsk->sk_server->sv_bufsz);
|
|
|
+ 3 * svsk->sk_server->sv_max_mesg,
|
|
|
+ 3 * svsk->sk_server->sv_max_mesg);
|
|
|
|
|
|
set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */
|
|
|
set_bit(SK_CHNGBUF, &svsk->sk_flags);
|
|
@@ -993,8 +993,8 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
|
|
|
* as soon a a complete request arrives.
|
|
|
*/
|
|
|
svc_sock_setbufsize(svsk->sk_sock,
|
|
|
- (serv->sv_nrthreads+3) * serv->sv_bufsz,
|
|
|
- 3 * serv->sv_bufsz);
|
|
|
+ (serv->sv_nrthreads+3) * serv->sv_max_mesg,
|
|
|
+ 3 * serv->sv_max_mesg);
|
|
|
|
|
|
clear_bit(SK_DATA, &svsk->sk_flags);
|
|
|
|
|
@@ -1032,7 +1032,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
|
|
|
}
|
|
|
svsk->sk_reclen &= 0x7fffffff;
|
|
|
dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen);
|
|
|
- if (svsk->sk_reclen > serv->sv_bufsz) {
|
|
|
+ if (svsk->sk_reclen > serv->sv_max_mesg) {
|
|
|
printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx (large)\n",
|
|
|
(unsigned long) svsk->sk_reclen);
|
|
|
goto err_delete;
|
|
@@ -1171,8 +1171,8 @@ svc_tcp_init(struct svc_sock *svsk)
|
|
|
* svc_tcp_recvfrom will re-adjust if necessary
|
|
|
*/
|
|
|
svc_sock_setbufsize(svsk->sk_sock,
|
|
|
- 3 * svsk->sk_server->sv_bufsz,
|
|
|
- 3 * svsk->sk_server->sv_bufsz);
|
|
|
+ 3 * svsk->sk_server->sv_max_mesg,
|
|
|
+ 3 * svsk->sk_server->sv_max_mesg);
|
|
|
|
|
|
set_bit(SK_CHNGBUF, &svsk->sk_flags);
|
|
|
set_bit(SK_DATA, &svsk->sk_flags);
|
|
@@ -1234,7 +1234,7 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
|
|
|
|
|
|
|
|
|
/* now allocate needed pages. If we get a failure, sleep briefly */
|
|
|
- pages = 2 + (serv->sv_bufsz + PAGE_SIZE -1) / PAGE_SIZE;
|
|
|
+ pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE;
|
|
|
for (i=0; i < pages ; i++)
|
|
|
while (rqstp->rq_pages[i] == NULL) {
|
|
|
struct page *p = alloc_page(GFP_KERNEL);
|
|
@@ -1263,7 +1263,7 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
|
|
|
if ((svsk = svc_sock_dequeue(pool)) != NULL) {
|
|
|
rqstp->rq_sock = svsk;
|
|
|
atomic_inc(&svsk->sk_inuse);
|
|
|
- rqstp->rq_reserved = serv->sv_bufsz;
|
|
|
+ rqstp->rq_reserved = serv->sv_max_mesg;
|
|
|
atomic_add(rqstp->rq_reserved, &svsk->sk_reserved);
|
|
|
} else {
|
|
|
/* No data pending. Go to sleep */
|