|
@@ -32,9 +32,11 @@
|
|
|
* SOFTWARE.
|
|
|
*/
|
|
|
|
|
|
+#include <linux/init.h>
|
|
|
#include <linux/errno.h>
|
|
|
#include <linux/export.h>
|
|
|
#include <linux/slab.h>
|
|
|
+#include <linux/kernel.h>
|
|
|
|
|
|
#include <linux/mlx4/cmd.h>
|
|
|
|
|
@@ -180,7 +182,7 @@ static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
|
|
|
kfree(buddy->num_free);
|
|
|
}
|
|
|
|
|
|
-static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
|
|
|
+static u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
|
|
|
{
|
|
|
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
|
|
|
u32 seg;
|
|
@@ -198,6 +200,26 @@ static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
|
|
|
return seg;
|
|
|
}
|
|
|
|
|
|
+static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
|
|
|
+{
|
|
|
+ u64 in_param;
|
|
|
+ u64 out_param;
|
|
|
+ int err;
|
|
|
+
|
|
|
+ if (mlx4_is_mfunc(dev)) {
|
|
|
+ set_param_l(&in_param, order);
|
|
|
+ err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT,
|
|
|
+ RES_OP_RESERVE_AND_MAP,
|
|
|
+ MLX4_CMD_ALLOC_RES,
|
|
|
+ MLX4_CMD_TIME_CLASS_A,
|
|
|
+ MLX4_CMD_WRAPPED);
|
|
|
+ if (err)
|
|
|
+ return -1;
|
|
|
+ return get_param_l(&out_param);
|
|
|
+ }
|
|
|
+ return __mlx4_alloc_mtt_range(dev, order);
|
|
|
+}
|
|
|
+
|
|
|
int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
|
|
|
struct mlx4_mtt *mtt)
|
|
|
{
|
|
@@ -221,16 +243,42 @@ int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(mlx4_mtt_init);
|
|
|
|
|
|
-void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
|
|
|
+static void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg,
|
|
|
+ int order)
|
|
|
{
|
|
|
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
|
|
|
|
|
|
+ mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, order);
|
|
|
+ mlx4_table_put_range(dev, &mr_table->mtt_table, first_seg,
|
|
|
+ first_seg + (1 << order) - 1);
|
|
|
+}
|
|
|
+
|
|
|
+static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order)
|
|
|
+{
|
|
|
+ u64 in_param;
|
|
|
+ int err;
|
|
|
+
|
|
|
+ if (mlx4_is_mfunc(dev)) {
|
|
|
+ set_param_l(&in_param, first_seg);
|
|
|
+ set_param_h(&in_param, order);
|
|
|
+ err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP,
|
|
|
+ MLX4_CMD_FREE_RES,
|
|
|
+ MLX4_CMD_TIME_CLASS_A,
|
|
|
+ MLX4_CMD_WRAPPED);
|
|
|
+ if (err)
|
|
|
+ mlx4_warn(dev, "Failed to free mtt range at:%d"
|
|
|
+ " order:%d\n", first_seg, order);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ __mlx4_free_mtt_range(dev, first_seg, order);
|
|
|
+}
|
|
|
+
|
|
|
+void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
|
|
|
+{
|
|
|
if (mtt->order < 0)
|
|
|
return;
|
|
|
|
|
|
- mlx4_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order);
|
|
|
- mlx4_table_put_range(dev, &mr_table->mtt_table, mtt->first_seg,
|
|
|
- mtt->first_seg + (1 << mtt->order) - 1);
|
|
|
+ mlx4_free_mtt_range(dev, mtt->first_seg, mtt->order);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup);
|
|
|
|
|
@@ -253,8 +301,9 @@ static u32 key_to_hw_index(u32 key)
|
|
|
static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
|
|
|
int mpt_index)
|
|
|
{
|
|
|
- return mlx4_cmd(dev, mailbox->dma, mpt_index, 0, MLX4_CMD_SW2HW_MPT,
|
|
|
- MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
|
|
|
+ return mlx4_cmd(dev, mailbox->dma | dev->caps.function , mpt_index,
|
|
|
+ 0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B,
|
|
|
+ MLX4_CMD_WRAPPED);
|
|
|
}
|
|
|
|
|
|
static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
|
|
@@ -265,58 +314,192 @@ static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
|
|
|
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
|
|
|
}
|
|
|
|
|
|
-int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
|
|
|
- int npages, int page_shift, struct mlx4_mr *mr)
|
|
|
+static int mlx4_mr_reserve_range(struct mlx4_dev *dev, int cnt, int align,
|
|
|
+ u32 *base_mridx)
|
|
|
{
|
|
|
struct mlx4_priv *priv = mlx4_priv(dev);
|
|
|
- u32 index;
|
|
|
- int err;
|
|
|
+ u32 mridx;
|
|
|
|
|
|
- index = mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
|
|
|
- if (index == -1)
|
|
|
+ mridx = mlx4_bitmap_alloc_range(&priv->mr_table.mpt_bitmap, cnt, align);
|
|
|
+ if (mridx == -1)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
+ *base_mridx = mridx;
|
|
|
+ return 0;
|
|
|
+
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(mlx4_mr_reserve_range);
|
|
|
+
|
|
|
+static void mlx4_mr_release_range(struct mlx4_dev *dev, u32 base_mridx, int cnt)
|
|
|
+{
|
|
|
+ struct mlx4_priv *priv = mlx4_priv(dev);
|
|
|
+ mlx4_bitmap_free_range(&priv->mr_table.mpt_bitmap, base_mridx, cnt);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(mlx4_mr_release_range);
|
|
|
+
|
|
|
+static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
|
|
|
+ u64 iova, u64 size, u32 access, int npages,
|
|
|
+ int page_shift, struct mlx4_mr *mr)
|
|
|
+{
|
|
|
mr->iova = iova;
|
|
|
mr->size = size;
|
|
|
mr->pd = pd;
|
|
|
mr->access = access;
|
|
|
- mr->enabled = 0;
|
|
|
- mr->key = hw_index_to_key(index);
|
|
|
+ mr->enabled = MLX4_MR_DISABLED;
|
|
|
+ mr->key = hw_index_to_key(mridx);
|
|
|
+
|
|
|
+ return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(mlx4_mr_alloc_reserved);
|
|
|
+
|
|
|
+static int mlx4_WRITE_MTT(struct mlx4_dev *dev,
|
|
|
+ struct mlx4_cmd_mailbox *mailbox,
|
|
|
+ int num_entries)
|
|
|
+{
|
|
|
+ return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT,
|
|
|
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
|
|
|
+}
|
|
|
+
|
|
|
+static int __mlx4_mr_reserve(struct mlx4_dev *dev)
|
|
|
+{
|
|
|
+ struct mlx4_priv *priv = mlx4_priv(dev);
|
|
|
+
|
|
|
+ return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
|
|
|
+}
|
|
|
|
|
|
- err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
|
|
|
+static int mlx4_mr_reserve(struct mlx4_dev *dev)
|
|
|
+{
|
|
|
+ u64 out_param;
|
|
|
+
|
|
|
+ if (mlx4_is_mfunc(dev)) {
|
|
|
+ if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE,
|
|
|
+ MLX4_CMD_ALLOC_RES,
|
|
|
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
|
|
|
+ return -1;
|
|
|
+ return get_param_l(&out_param);
|
|
|
+ }
|
|
|
+ return __mlx4_mr_reserve(dev);
|
|
|
+}
|
|
|
+
|
|
|
+static void __mlx4_mr_release(struct mlx4_dev *dev, u32 index)
|
|
|
+{
|
|
|
+ struct mlx4_priv *priv = mlx4_priv(dev);
|
|
|
+
|
|
|
+ mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
|
|
|
+}
|
|
|
+
|
|
|
+static void mlx4_mr_release(struct mlx4_dev *dev, u32 index)
|
|
|
+{
|
|
|
+ u64 in_param;
|
|
|
+
|
|
|
+ if (mlx4_is_mfunc(dev)) {
|
|
|
+ set_param_l(&in_param, index);
|
|
|
+ if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE,
|
|
|
+ MLX4_CMD_FREE_RES,
|
|
|
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
|
|
|
+ mlx4_warn(dev, "Failed to release mr index:%d\n",
|
|
|
+ index);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ __mlx4_mr_release(dev, index);
|
|
|
+}
|
|
|
+
|
|
|
+static int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
|
|
|
+{
|
|
|
+ struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
|
|
|
+
|
|
|
+ return mlx4_table_get(dev, &mr_table->dmpt_table, index);
|
|
|
+}
|
|
|
+
|
|
|
+static int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
|
|
|
+{
|
|
|
+ u64 param;
|
|
|
+
|
|
|
+ if (mlx4_is_mfunc(dev)) {
|
|
|
+ set_param_l(¶m, index);
|
|
|
+ return mlx4_cmd_imm(dev, param, ¶m, RES_MPT, RES_OP_MAP_ICM,
|
|
|
+ MLX4_CMD_ALLOC_RES,
|
|
|
+ MLX4_CMD_TIME_CLASS_A,
|
|
|
+ MLX4_CMD_WRAPPED);
|
|
|
+ }
|
|
|
+ return __mlx4_mr_alloc_icm(dev, index);
|
|
|
+}
|
|
|
+
|
|
|
+static void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
|
|
|
+{
|
|
|
+ struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
|
|
|
+
|
|
|
+ mlx4_table_put(dev, &mr_table->dmpt_table, index);
|
|
|
+}
|
|
|
+
|
|
|
+static void mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
|
|
|
+{
|
|
|
+ u64 in_param;
|
|
|
+
|
|
|
+ if (mlx4_is_mfunc(dev)) {
|
|
|
+ set_param_l(&in_param, index);
|
|
|
+ if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM,
|
|
|
+ MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
|
|
|
+ MLX4_CMD_WRAPPED))
|
|
|
+ mlx4_warn(dev, "Failed to free icm of mr index:%d\n",
|
|
|
+ index);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ return __mlx4_mr_free_icm(dev, index);
|
|
|
+}
|
|
|
+
|
|
|
+int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
|
|
|
+ int npages, int page_shift, struct mlx4_mr *mr)
|
|
|
+{
|
|
|
+ u32 index;
|
|
|
+ int err;
|
|
|
+
|
|
|
+ index = mlx4_mr_reserve(dev);
|
|
|
+ if (index == -1)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size,
|
|
|
+ access, npages, page_shift, mr);
|
|
|
if (err)
|
|
|
- mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
|
|
|
+ mlx4_mr_release(dev, index);
|
|
|
|
|
|
return err;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
|
|
|
|
|
|
-void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
|
|
|
+static void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
|
|
|
{
|
|
|
- struct mlx4_priv *priv = mlx4_priv(dev);
|
|
|
int err;
|
|
|
|
|
|
- if (mr->enabled) {
|
|
|
+ if (mr->enabled == MLX4_MR_EN_HW) {
|
|
|
err = mlx4_HW2SW_MPT(dev, NULL,
|
|
|
key_to_hw_index(mr->key) &
|
|
|
(dev->caps.num_mpts - 1));
|
|
|
if (err)
|
|
|
- mlx4_warn(dev, "HW2SW_MPT failed (%d)\n", err);
|
|
|
- }
|
|
|
+ mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err);
|
|
|
|
|
|
+ mr->enabled = MLX4_MR_EN_SW;
|
|
|
+ }
|
|
|
mlx4_mtt_cleanup(dev, &mr->mtt);
|
|
|
- mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, key_to_hw_index(mr->key));
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(mlx4_mr_free_reserved);
|
|
|
+
|
|
|
+void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
|
|
|
+{
|
|
|
+ mlx4_mr_free_reserved(dev, mr);
|
|
|
+ if (mr->enabled)
|
|
|
+ mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
|
|
|
+ mlx4_mr_release(dev, key_to_hw_index(mr->key));
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(mlx4_mr_free);
|
|
|
|
|
|
int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
|
|
|
{
|
|
|
- struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
|
|
|
struct mlx4_cmd_mailbox *mailbox;
|
|
|
struct mlx4_mpt_entry *mpt_entry;
|
|
|
int err;
|
|
|
|
|
|
- err = mlx4_table_get(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
|
|
|
+ err = mlx4_mr_alloc_icm(dev, key_to_hw_index(mr->key));
|
|
|
if (err)
|
|
|
return err;
|
|
|
|
|
@@ -363,8 +546,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
|
|
|
mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
|
|
|
goto err_cmd;
|
|
|
}
|
|
|
-
|
|
|
- mr->enabled = 1;
|
|
|
+ mr->enabled = MLX4_MR_EN_HW;
|
|
|
|
|
|
mlx4_free_cmd_mailbox(dev, mailbox);
|
|
|
|
|
@@ -374,7 +556,7 @@ err_cmd:
|
|
|
mlx4_free_cmd_mailbox(dev, mailbox);
|
|
|
|
|
|
err_table:
|
|
|
- mlx4_table_put(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
|
|
|
+ mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
|
|
|
return err;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(mlx4_mr_enable);
|
|
@@ -413,27 +595,74 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
|
|
|
- int start_index, int npages, u64 *page_list)
|
|
|
+static int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
|
|
|
+ int start_index, int npages, u64 *page_list)
|
|
|
{
|
|
|
+ int err = 0;
|
|
|
int chunk;
|
|
|
- int err;
|
|
|
-
|
|
|
- if (mtt->order < 0)
|
|
|
- return -EINVAL;
|
|
|
|
|
|
while (npages > 0) {
|
|
|
chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages);
|
|
|
err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
|
|
|
if (err)
|
|
|
return err;
|
|
|
-
|
|
|
npages -= chunk;
|
|
|
start_index += chunk;
|
|
|
page_list += chunk;
|
|
|
}
|
|
|
+ return err;
|
|
|
+}
|
|
|
|
|
|
- return 0;
|
|
|
+int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
|
|
|
+ int start_index, int npages, u64 *page_list)
|
|
|
+{
|
|
|
+ struct mlx4_cmd_mailbox *mailbox = NULL;
|
|
|
+ __be64 *inbox = NULL;
|
|
|
+ int chunk;
|
|
|
+ int err = 0;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ if (mtt->order < 0)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (mlx4_is_mfunc(dev)) {
|
|
|
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
|
|
|
+ if (IS_ERR(mailbox))
|
|
|
+ return PTR_ERR(mailbox);
|
|
|
+ inbox = mailbox->buf;
|
|
|
+
|
|
|
+ while (npages > 0) {
|
|
|
+ int s = mtt->first_seg * dev->caps.mtts_per_seg +
|
|
|
+ start_index;
|
|
|
+ chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) -
|
|
|
+ dev->caps.mtts_per_seg, npages);
|
|
|
+ if (s / (PAGE_SIZE / sizeof(u64)) !=
|
|
|
+ (s + chunk - 1) / (PAGE_SIZE / sizeof(u64)))
|
|
|
+ chunk = PAGE_SIZE / sizeof(u64) -
|
|
|
+ (s % (PAGE_SIZE / sizeof(u64)));
|
|
|
+
|
|
|
+ inbox[0] = cpu_to_be64(mtt->first_seg *
|
|
|
+ dev->caps.mtts_per_seg +
|
|
|
+ start_index);
|
|
|
+ inbox[1] = 0;
|
|
|
+ for (i = 0; i < chunk; ++i)
|
|
|
+ inbox[i + 2] = cpu_to_be64(page_list[i] |
|
|
|
+ MLX4_MTT_FLAG_PRESENT);
|
|
|
+ err = mlx4_WRITE_MTT(dev, mailbox, chunk);
|
|
|
+ if (err) {
|
|
|
+ mlx4_free_cmd_mailbox(dev, mailbox);
|
|
|
+ return err;
|
|
|
+ }
|
|
|
+
|
|
|
+ npages -= chunk;
|
|
|
+ start_index += chunk;
|
|
|
+ page_list += chunk;
|
|
|
+ }
|
|
|
+ mlx4_free_cmd_mailbox(dev, mailbox);
|
|
|
+ return err;
|
|
|
+ }
|
|
|
+
|
|
|
+ return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(mlx4_write_mtt);
|
|
|
|
|
@@ -463,9 +692,18 @@ EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt);
|
|
|
|
|
|
int mlx4_init_mr_table(struct mlx4_dev *dev)
|
|
|
{
|
|
|
- struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
|
|
|
+ struct mlx4_priv *priv = mlx4_priv(dev);
|
|
|
+ struct mlx4_mr_table *mr_table = &priv->mr_table;
|
|
|
int err;
|
|
|
|
|
|
+ if (!is_power_of_2(dev->caps.num_mpts))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ /* Nothing to do for slaves - all MR handling is forwarded
|
|
|
+ * to the master */
|
|
|
+ if (mlx4_is_slave(dev))
|
|
|
+ return 0;
|
|
|
+
|
|
|
err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
|
|
|
~0, dev->caps.reserved_mrws, 0);
|
|
|
if (err)
|
|
@@ -477,7 +715,10 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)
|
|
|
goto err_buddy;
|
|
|
|
|
|
if (dev->caps.reserved_mtts) {
|
|
|
- if (mlx4_alloc_mtt_range(dev, fls(dev->caps.reserved_mtts - 1)) == -1) {
|
|
|
+ priv->reserved_mtts =
|
|
|
+ mlx4_alloc_mtt_range(dev,
|
|
|
+ fls(dev->caps.reserved_mtts - 1));
|
|
|
+ if (priv->reserved_mtts < 0) {
|
|
|
mlx4_warn(dev, "MTT table of order %d is too small.\n",
|
|
|
mr_table->mtt_buddy.max_order);
|
|
|
err = -ENOMEM;
|
|
@@ -498,8 +739,14 @@ err_buddy:
|
|
|
|
|
|
void mlx4_cleanup_mr_table(struct mlx4_dev *dev)
|
|
|
{
|
|
|
- struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
|
|
|
+ struct mlx4_priv *priv = mlx4_priv(dev);
|
|
|
+ struct mlx4_mr_table *mr_table = &priv->mr_table;
|
|
|
|
|
|
+ if (mlx4_is_slave(dev))
|
|
|
+ return;
|
|
|
+ if (priv->reserved_mtts >= 0)
|
|
|
+ mlx4_free_mtt_range(dev, priv->reserved_mtts,
|
|
|
+ fls(dev->caps.reserved_mtts - 1));
|
|
|
mlx4_buddy_cleanup(&mr_table->mtt_buddy);
|
|
|
mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
|
|
|
}
|
|
@@ -620,6 +867,46 @@ err_free:
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
|
|
|
|
|
|
+static int mlx4_fmr_alloc_reserved(struct mlx4_dev *dev, u32 mridx,
|
|
|
+ u32 pd, u32 access, int max_pages,
|
|
|
+ int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
|
|
|
+{
|
|
|
+ struct mlx4_priv *priv = mlx4_priv(dev);
|
|
|
+ int err = -ENOMEM;
|
|
|
+
|
|
|
+ if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ /* All MTTs must fit in the same page */
|
|
|
+ if (max_pages * sizeof *fmr->mtts > PAGE_SIZE)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ fmr->page_shift = page_shift;
|
|
|
+ fmr->max_pages = max_pages;
|
|
|
+ fmr->max_maps = max_maps;
|
|
|
+ fmr->maps = 0;
|
|
|
+
|
|
|
+ err = mlx4_mr_alloc_reserved(dev, mridx, pd, 0, 0, access, max_pages,
|
|
|
+ page_shift, &fmr->mr);
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
+
|
|
|
+ fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
|
|
|
+ fmr->mr.mtt.first_seg,
|
|
|
+ &fmr->dma_handle);
|
|
|
+ if (!fmr->mtts) {
|
|
|
+ err = -ENOMEM;
|
|
|
+ goto err_free;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+
|
|
|
+err_free:
|
|
|
+ mlx4_mr_free_reserved(dev, &fmr->mr);
|
|
|
+ return err;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(mlx4_fmr_alloc_reserved);
|
|
|
+
|
|
|
int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
|
|
|
{
|
|
|
struct mlx4_priv *priv = mlx4_priv(dev);
|
|
@@ -641,12 +928,32 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
|
|
|
void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
|
|
|
u32 *lkey, u32 *rkey)
|
|
|
{
|
|
|
+ struct mlx4_cmd_mailbox *mailbox;
|
|
|
+ int err;
|
|
|
+
|
|
|
if (!fmr->maps)
|
|
|
return;
|
|
|
|
|
|
fmr->maps = 0;
|
|
|
|
|
|
- *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW;
|
|
|
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
|
|
|
+ if (IS_ERR(mailbox)) {
|
|
|
+ err = PTR_ERR(mailbox);
|
|
|
+ printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox"
|
|
|
+ " failed (%d)\n", err);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ err = mlx4_HW2SW_MPT(dev, NULL,
|
|
|
+ key_to_hw_index(fmr->mr.key) &
|
|
|
+ (dev->caps.num_mpts - 1));
|
|
|
+ mlx4_free_cmd_mailbox(dev, mailbox);
|
|
|
+ if (err) {
|
|
|
+ printk(KERN_WARNING "mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n",
|
|
|
+ err);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ fmr->mr.enabled = MLX4_MR_EN_SW;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
|
|
|
|
|
@@ -655,13 +962,25 @@ int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
|
|
|
if (fmr->maps)
|
|
|
return -EBUSY;
|
|
|
|
|
|
- fmr->mr.enabled = 0;
|
|
|
mlx4_mr_free(dev, &fmr->mr);
|
|
|
+ fmr->mr.enabled = MLX4_MR_DISABLED;
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(mlx4_fmr_free);
|
|
|
|
|
|
+static int mlx4_fmr_free_reserved(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
|
|
|
+{
|
|
|
+ if (fmr->maps)
|
|
|
+ return -EBUSY;
|
|
|
+
|
|
|
+ mlx4_mr_free_reserved(dev, &fmr->mr);
|
|
|
+ fmr->mr.enabled = MLX4_MR_DISABLED;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(mlx4_fmr_free_reserved);
|
|
|
+
|
|
|
int mlx4_SYNC_TPT(struct mlx4_dev *dev)
|
|
|
{
|
|
|
return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000,
|