|
@@ -34,124 +34,6 @@
|
|
|
|
|
|
#include "mlx4_ib.h"
|
|
#include "mlx4_ib.h"
|
|
|
|
|
|
-struct mlx4_ib_db_pgdir {
|
|
|
|
- struct list_head list;
|
|
|
|
- DECLARE_BITMAP(order0, MLX4_IB_DB_PER_PAGE);
|
|
|
|
- DECLARE_BITMAP(order1, MLX4_IB_DB_PER_PAGE / 2);
|
|
|
|
- unsigned long *bits[2];
|
|
|
|
- __be32 *db_page;
|
|
|
|
- dma_addr_t db_dma;
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
-static struct mlx4_ib_db_pgdir *mlx4_ib_alloc_db_pgdir(struct mlx4_ib_dev *dev)
|
|
|
|
-{
|
|
|
|
- struct mlx4_ib_db_pgdir *pgdir;
|
|
|
|
-
|
|
|
|
- pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL);
|
|
|
|
- if (!pgdir)
|
|
|
|
- return NULL;
|
|
|
|
-
|
|
|
|
- bitmap_fill(pgdir->order1, MLX4_IB_DB_PER_PAGE / 2);
|
|
|
|
- pgdir->bits[0] = pgdir->order0;
|
|
|
|
- pgdir->bits[1] = pgdir->order1;
|
|
|
|
- pgdir->db_page = dma_alloc_coherent(dev->ib_dev.dma_device,
|
|
|
|
- PAGE_SIZE, &pgdir->db_dma,
|
|
|
|
- GFP_KERNEL);
|
|
|
|
- if (!pgdir->db_page) {
|
|
|
|
- kfree(pgdir);
|
|
|
|
- return NULL;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- return pgdir;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static int mlx4_ib_alloc_db_from_pgdir(struct mlx4_ib_db_pgdir *pgdir,
|
|
|
|
- struct mlx4_ib_db *db, int order)
|
|
|
|
-{
|
|
|
|
- int o;
|
|
|
|
- int i;
|
|
|
|
-
|
|
|
|
- for (o = order; o <= 1; ++o) {
|
|
|
|
- i = find_first_bit(pgdir->bits[o], MLX4_IB_DB_PER_PAGE >> o);
|
|
|
|
- if (i < MLX4_IB_DB_PER_PAGE >> o)
|
|
|
|
- goto found;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- return -ENOMEM;
|
|
|
|
-
|
|
|
|
-found:
|
|
|
|
- clear_bit(i, pgdir->bits[o]);
|
|
|
|
-
|
|
|
|
- i <<= o;
|
|
|
|
-
|
|
|
|
- if (o > order)
|
|
|
|
- set_bit(i ^ 1, pgdir->bits[order]);
|
|
|
|
-
|
|
|
|
- db->u.pgdir = pgdir;
|
|
|
|
- db->index = i;
|
|
|
|
- db->db = pgdir->db_page + db->index;
|
|
|
|
- db->dma = pgdir->db_dma + db->index * 4;
|
|
|
|
- db->order = order;
|
|
|
|
-
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-int mlx4_ib_db_alloc(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db, int order)
|
|
|
|
-{
|
|
|
|
- struct mlx4_ib_db_pgdir *pgdir;
|
|
|
|
- int ret = 0;
|
|
|
|
-
|
|
|
|
- mutex_lock(&dev->pgdir_mutex);
|
|
|
|
-
|
|
|
|
- list_for_each_entry(pgdir, &dev->pgdir_list, list)
|
|
|
|
- if (!mlx4_ib_alloc_db_from_pgdir(pgdir, db, order))
|
|
|
|
- goto out;
|
|
|
|
-
|
|
|
|
- pgdir = mlx4_ib_alloc_db_pgdir(dev);
|
|
|
|
- if (!pgdir) {
|
|
|
|
- ret = -ENOMEM;
|
|
|
|
- goto out;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- list_add(&pgdir->list, &dev->pgdir_list);
|
|
|
|
-
|
|
|
|
- /* This should never fail -- we just allocated an empty page: */
|
|
|
|
- WARN_ON(mlx4_ib_alloc_db_from_pgdir(pgdir, db, order));
|
|
|
|
-
|
|
|
|
-out:
|
|
|
|
- mutex_unlock(&dev->pgdir_mutex);
|
|
|
|
-
|
|
|
|
- return ret;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-void mlx4_ib_db_free(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db)
|
|
|
|
-{
|
|
|
|
- int o;
|
|
|
|
- int i;
|
|
|
|
-
|
|
|
|
- mutex_lock(&dev->pgdir_mutex);
|
|
|
|
-
|
|
|
|
- o = db->order;
|
|
|
|
- i = db->index;
|
|
|
|
-
|
|
|
|
- if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
|
|
|
|
- clear_bit(i ^ 1, db->u.pgdir->order0);
|
|
|
|
- ++o;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- i >>= o;
|
|
|
|
- set_bit(i, db->u.pgdir->bits[o]);
|
|
|
|
-
|
|
|
|
- if (bitmap_full(db->u.pgdir->order1, MLX4_IB_DB_PER_PAGE / 2)) {
|
|
|
|
- dma_free_coherent(dev->ib_dev.dma_device, PAGE_SIZE,
|
|
|
|
- db->u.pgdir->db_page, db->u.pgdir->db_dma);
|
|
|
|
- list_del(&db->u.pgdir->list);
|
|
|
|
- kfree(db->u.pgdir);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- mutex_unlock(&dev->pgdir_mutex);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
struct mlx4_ib_user_db_page {
|
|
struct mlx4_ib_user_db_page {
|
|
struct list_head list;
|
|
struct list_head list;
|
|
struct ib_umem *umem;
|
|
struct ib_umem *umem;
|
|
@@ -160,7 +42,7 @@ struct mlx4_ib_user_db_page {
|
|
};
|
|
};
|
|
|
|
|
|
int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
|
|
int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
|
|
- struct mlx4_ib_db *db)
|
|
|
|
|
|
+ struct mlx4_db *db)
|
|
{
|
|
{
|
|
struct mlx4_ib_user_db_page *page;
|
|
struct mlx4_ib_user_db_page *page;
|
|
struct ib_umem_chunk *chunk;
|
|
struct ib_umem_chunk *chunk;
|
|
@@ -202,7 +84,7 @@ out:
|
|
return err;
|
|
return err;
|
|
}
|
|
}
|
|
|
|
|
|
-void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_ib_db *db)
|
|
|
|
|
|
+void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db)
|
|
{
|
|
{
|
|
mutex_lock(&context->db_page_mutex);
|
|
mutex_lock(&context->db_page_mutex);
|
|
|
|
|