|
@@ -160,7 +160,7 @@ static int aio_setup_ring(struct kioctx *ctx)
|
|
|
|
|
|
info->nr = nr_events; /* trusted copy */
|
|
|
|
|
|
- ring = kmap_atomic(info->ring_pages[0], KM_USER0);
|
|
|
+ ring = kmap_atomic(info->ring_pages[0]);
|
|
|
ring->nr = nr_events; /* user copy */
|
|
|
ring->id = ctx->user_id;
|
|
|
ring->head = ring->tail = 0;
|
|
@@ -168,32 +168,32 @@ static int aio_setup_ring(struct kioctx *ctx)
|
|
|
ring->compat_features = AIO_RING_COMPAT_FEATURES;
|
|
|
ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
|
|
|
ring->header_length = sizeof(struct aio_ring);
|
|
|
- kunmap_atomic(ring, KM_USER0);
|
|
|
+ kunmap_atomic(ring);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
|
|
|
/* aio_ring_event: returns a pointer to the event at the given index from
|
|
|
- * kmap_atomic(, km). Release the pointer with put_aio_ring_event();
|
|
|
+ * kmap_atomic(). Release the pointer with put_aio_ring_event();
|
|
|
*/
|
|
|
#define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event))
|
|
|
#define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
|
|
|
#define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
|
|
|
|
|
|
-#define aio_ring_event(info, nr, km) ({ \
|
|
|
+#define aio_ring_event(info, nr) ({ \
|
|
|
unsigned pos = (nr) + AIO_EVENTS_OFFSET; \
|
|
|
struct io_event *__event; \
|
|
|
__event = kmap_atomic( \
|
|
|
- (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE], km); \
|
|
|
+ (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE]); \
|
|
|
__event += pos % AIO_EVENTS_PER_PAGE; \
|
|
|
__event; \
|
|
|
})
|
|
|
|
|
|
-#define put_aio_ring_event(event, km) do { \
|
|
|
+#define put_aio_ring_event(event) do { \
|
|
|
struct io_event *__event = (event); \
|
|
|
(void)__event; \
|
|
|
- kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \
|
|
|
+ kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK)); \
|
|
|
} while(0)
|
|
|
|
|
|
static void ctx_rcu_free(struct rcu_head *head)
|
|
@@ -1019,10 +1019,10 @@ int aio_complete(struct kiocb *iocb, long res, long res2)
|
|
|
if (kiocbIsCancelled(iocb))
|
|
|
goto put_rq;
|
|
|
|
|
|
- ring = kmap_atomic(info->ring_pages[0], KM_IRQ1);
|
|
|
+ ring = kmap_atomic(info->ring_pages[0]);
|
|
|
|
|
|
tail = info->tail;
|
|
|
- event = aio_ring_event(info, tail, KM_IRQ0);
|
|
|
+ event = aio_ring_event(info, tail);
|
|
|
if (++tail >= info->nr)
|
|
|
tail = 0;
|
|
|
|
|
@@ -1043,8 +1043,8 @@ int aio_complete(struct kiocb *iocb, long res, long res2)
|
|
|
info->tail = tail;
|
|
|
ring->tail = tail;
|
|
|
|
|
|
- put_aio_ring_event(event, KM_IRQ0);
|
|
|
- kunmap_atomic(ring, KM_IRQ1);
|
|
|
+ put_aio_ring_event(event);
|
|
|
+ kunmap_atomic(ring);
|
|
|
|
|
|
pr_debug("added to ring %p at [%lu]\n", iocb, tail);
|
|
|
|
|
@@ -1089,7 +1089,7 @@ static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
|
|
|
unsigned long head;
|
|
|
int ret = 0;
|
|
|
|
|
|
- ring = kmap_atomic(info->ring_pages[0], KM_USER0);
|
|
|
+ ring = kmap_atomic(info->ring_pages[0]);
|
|
|
dprintk("in aio_read_evt h%lu t%lu m%lu\n",
|
|
|
(unsigned long)ring->head, (unsigned long)ring->tail,
|
|
|
(unsigned long)ring->nr);
|
|
@@ -1101,18 +1101,18 @@ static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
|
|
|
|
|
|
head = ring->head % info->nr;
|
|
|
if (head != ring->tail) {
|
|
|
- struct io_event *evp = aio_ring_event(info, head, KM_USER1);
|
|
|
+ struct io_event *evp = aio_ring_event(info, head);
|
|
|
*ent = *evp;
|
|
|
head = (head + 1) % info->nr;
|
|
|
smp_mb(); /* finish reading the event before updatng the head */
|
|
|
ring->head = head;
|
|
|
ret = 1;
|
|
|
- put_aio_ring_event(evp, KM_USER1);
|
|
|
+ put_aio_ring_event(evp);
|
|
|
}
|
|
|
spin_unlock(&info->ring_lock);
|
|
|
|
|
|
out:
|
|
|
- kunmap_atomic(ring, KM_USER0);
|
|
|
+ kunmap_atomic(ring);
|
|
|
dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret,
|
|
|
(unsigned long)ring->head, (unsigned long)ring->tail);
|
|
|
return ret;
|