|
@@ -666,17 +666,6 @@ static ssize_t aio_run_iocb(struct kiocb *iocb)
|
|
ssize_t (*retry)(struct kiocb *);
|
|
ssize_t (*retry)(struct kiocb *);
|
|
ssize_t ret;
|
|
ssize_t ret;
|
|
|
|
|
|
- if (iocb->ki_retried++ > 1024*1024) {
|
|
|
|
- printk("Maximal retry count. Bytes done %Zd\n",
|
|
|
|
- iocb->ki_nbytes - iocb->ki_left);
|
|
|
|
- return -EAGAIN;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- if (!(iocb->ki_retried & 0xff)) {
|
|
|
|
- pr_debug("%ld retry: %zd of %zd\n", iocb->ki_retried,
|
|
|
|
- iocb->ki_nbytes - iocb->ki_left, iocb->ki_nbytes);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
if (!(retry = iocb->ki_retry)) {
|
|
if (!(retry = iocb->ki_retry)) {
|
|
printk("aio_run_iocb: iocb->ki_retry = NULL\n");
|
|
printk("aio_run_iocb: iocb->ki_retry = NULL\n");
|
|
return 0;
|
|
return 0;
|
|
@@ -1005,9 +994,6 @@ int fastcall aio_complete(struct kiocb *iocb, long res, long res2)
|
|
kunmap_atomic(ring, KM_IRQ1);
|
|
kunmap_atomic(ring, KM_IRQ1);
|
|
|
|
|
|
pr_debug("added to ring %p at [%lu]\n", iocb, tail);
|
|
pr_debug("added to ring %p at [%lu]\n", iocb, tail);
|
|
-
|
|
|
|
- pr_debug("%ld retries: %zd of %zd\n", iocb->ki_retried,
|
|
|
|
- iocb->ki_nbytes - iocb->ki_left, iocb->ki_nbytes);
|
|
|
|
put_rq:
|
|
put_rq:
|
|
/* everything turned out well, dispose of the aiocb. */
|
|
/* everything turned out well, dispose of the aiocb. */
|
|
ret = __aio_put_req(ctx, iocb);
|
|
ret = __aio_put_req(ctx, iocb);
|
|
@@ -1590,7 +1576,6 @@ int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
|
|
req->ki_opcode = iocb->aio_lio_opcode;
|
|
req->ki_opcode = iocb->aio_lio_opcode;
|
|
init_waitqueue_func_entry(&req->ki_wait, aio_wake_function);
|
|
init_waitqueue_func_entry(&req->ki_wait, aio_wake_function);
|
|
INIT_LIST_HEAD(&req->ki_wait.task_list);
|
|
INIT_LIST_HEAD(&req->ki_wait.task_list);
|
|
- req->ki_retried = 0;
|
|
|
|
|
|
|
|
ret = aio_setup_iocb(req);
|
|
ret = aio_setup_iocb(req);
|
|
|
|
|