|
@@ -38,40 +38,6 @@
|
|
|
#include <linux/pagevec.h>
|
|
|
#include <linux/writeback.h>
|
|
|
|
|
|
-
|
|
|
-/*
|
|
|
- * Prime number of hash buckets since address is used as the key.
|
|
|
- */
|
|
|
-#define NVSYNC 37
|
|
|
-#define to_ioend_wq(v) (&xfs_ioend_wq[((unsigned long)v) % NVSYNC])
|
|
|
-static wait_queue_head_t xfs_ioend_wq[NVSYNC];
|
|
|
-
|
|
|
-void __init
|
|
|
-xfs_ioend_init(void)
|
|
|
-{
|
|
|
- int i;
|
|
|
-
|
|
|
- for (i = 0; i < NVSYNC; i++)
|
|
|
- init_waitqueue_head(&xfs_ioend_wq[i]);
|
|
|
-}
|
|
|
-
|
|
|
-void
|
|
|
-xfs_ioend_wait(
|
|
|
- xfs_inode_t *ip)
|
|
|
-{
|
|
|
- wait_queue_head_t *wq = to_ioend_wq(ip);
|
|
|
-
|
|
|
- wait_event(*wq, (atomic_read(&ip->i_iocount) == 0));
|
|
|
-}
|
|
|
-
|
|
|
-STATIC void
|
|
|
-xfs_ioend_wake(
|
|
|
- xfs_inode_t *ip)
|
|
|
-{
|
|
|
- if (atomic_dec_and_test(&ip->i_iocount))
|
|
|
- wake_up(to_ioend_wq(ip));
|
|
|
-}
|
|
|
-
|
|
|
void
|
|
|
xfs_count_page_state(
|
|
|
struct page *page,
|
|
@@ -115,7 +81,6 @@ xfs_destroy_ioend(
|
|
|
xfs_ioend_t *ioend)
|
|
|
{
|
|
|
struct buffer_head *bh, *next;
|
|
|
- struct xfs_inode *ip = XFS_I(ioend->io_inode);
|
|
|
|
|
|
for (bh = ioend->io_buffer_head; bh; bh = next) {
|
|
|
next = bh->b_private;
|
|
@@ -127,7 +92,7 @@ xfs_destroy_ioend(
|
|
|
aio_complete(ioend->io_iocb, ioend->io_result, 0);
|
|
|
inode_dio_done(ioend->io_inode);
|
|
|
}
|
|
|
- xfs_ioend_wake(ip);
|
|
|
+
|
|
|
mempool_free(ioend, xfs_ioend_pool);
|
|
|
}
|
|
|
|
|
@@ -298,7 +263,6 @@ xfs_alloc_ioend(
|
|
|
ioend->io_inode = inode;
|
|
|
ioend->io_buffer_head = NULL;
|
|
|
ioend->io_buffer_tail = NULL;
|
|
|
- atomic_inc(&XFS_I(ioend->io_inode)->i_iocount);
|
|
|
ioend->io_offset = 0;
|
|
|
ioend->io_size = 0;
|
|
|
ioend->io_iocb = NULL;
|
|
@@ -558,7 +522,6 @@ xfs_cancel_ioend(
|
|
|
unlock_buffer(bh);
|
|
|
} while ((bh = next_bh) != NULL);
|
|
|
|
|
|
- xfs_ioend_wake(XFS_I(ioend->io_inode));
|
|
|
mempool_free(ioend, xfs_ioend_pool);
|
|
|
} while ((ioend = next) != NULL);
|
|
|
}
|