|
@@ -50,11 +50,7 @@
|
|
|
#include "target_core_pr.h"
|
|
|
#include "target_core_ua.h"
|
|
|
|
|
|
-static void se_dev_start(struct se_device *dev);
|
|
|
-static void se_dev_stop(struct se_device *dev);
|
|
|
-
|
|
|
static struct se_hba *lun0_hba;
|
|
|
-static struct se_subsystem_dev *lun0_su_dev;
|
|
|
/* not static, needed by tpg.c */
|
|
|
struct se_device *g_lun0_dev;
|
|
|
|
|
@@ -136,15 +132,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
|
|
|
se_cmd->orig_fe_lun = 0;
|
|
|
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
|
|
|
}
|
|
|
- /*
|
|
|
- * Determine if the struct se_lun is online.
|
|
|
- * FIXME: Check for LUN_RESET + UNIT Attention
|
|
|
- */
|
|
|
- if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
|
|
|
- se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
|
|
|
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
|
|
|
- return -ENODEV;
|
|
|
- }
|
|
|
|
|
|
/* Directly associate cmd with se_dev */
|
|
|
se_cmd->se_dev = se_lun->lun_se_dev;
|
|
@@ -202,14 +189,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
|
|
|
se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
|
|
|
return -ENODEV;
|
|
|
}
|
|
|
- /*
|
|
|
- * Determine if the struct se_lun is online.
|
|
|
- * FIXME: Check for LUN_RESET + UNIT Attention
|
|
|
- */
|
|
|
- if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
|
|
|
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
|
|
|
- return -ENODEV;
|
|
|
- }
|
|
|
|
|
|
/* Directly associate cmd with se_dev */
|
|
|
se_cmd->se_dev = se_lun->lun_se_dev;
|
|
@@ -565,7 +544,6 @@ static void core_export_port(
|
|
|
struct se_port *port,
|
|
|
struct se_lun *lun)
|
|
|
{
|
|
|
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
|
|
|
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
|
|
|
|
|
|
spin_lock(&dev->se_port_lock);
|
|
@@ -578,7 +556,7 @@ static void core_export_port(
|
|
|
list_add_tail(&port->sep_list, &dev->dev_sep_list);
|
|
|
spin_unlock(&dev->se_port_lock);
|
|
|
|
|
|
- if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
|
|
|
+ if (dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
|
|
|
tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
|
|
|
if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
|
|
|
pr_err("Unable to allocate t10_alua_tg_pt"
|
|
@@ -587,7 +565,7 @@ static void core_export_port(
|
|
|
}
|
|
|
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
|
|
|
__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
|
|
|
- su_dev->t10_alua.default_tg_pt_gp);
|
|
|
+ dev->t10_alua.default_tg_pt_gp);
|
|
|
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
|
|
|
pr_debug("%s/%s: Adding to default ALUA Target Port"
|
|
|
" Group: alua/default_tg_pt_gp\n",
|
|
@@ -625,6 +603,7 @@ int core_dev_export(
|
|
|
struct se_portal_group *tpg,
|
|
|
struct se_lun *lun)
|
|
|
{
|
|
|
+ struct se_hba *hba = dev->se_hba;
|
|
|
struct se_port *port;
|
|
|
|
|
|
port = core_alloc_port(dev);
|
|
@@ -632,9 +611,11 @@ int core_dev_export(
|
|
|
return PTR_ERR(port);
|
|
|
|
|
|
lun->lun_se_dev = dev;
|
|
|
- se_dev_start(dev);
|
|
|
|
|
|
- atomic_inc(&dev->dev_export_obj.obj_access_count);
|
|
|
+ spin_lock(&hba->device_lock);
|
|
|
+ dev->export_count++;
|
|
|
+ spin_unlock(&hba->device_lock);
|
|
|
+
|
|
|
core_export_port(dev, tpg, port, lun);
|
|
|
return 0;
|
|
|
}
|
|
@@ -644,6 +625,7 @@ void core_dev_unexport(
|
|
|
struct se_portal_group *tpg,
|
|
|
struct se_lun *lun)
|
|
|
{
|
|
|
+ struct se_hba *hba = dev->se_hba;
|
|
|
struct se_port *port = lun->lun_sep;
|
|
|
|
|
|
spin_lock(&lun->lun_sep_lock);
|
|
@@ -654,11 +636,13 @@ void core_dev_unexport(
|
|
|
spin_unlock(&lun->lun_sep_lock);
|
|
|
|
|
|
spin_lock(&dev->se_port_lock);
|
|
|
- atomic_dec(&dev->dev_export_obj.obj_access_count);
|
|
|
core_release_port(dev, port);
|
|
|
spin_unlock(&dev->se_port_lock);
|
|
|
|
|
|
- se_dev_stop(dev);
|
|
|
+ spin_lock(&hba->device_lock);
|
|
|
+ dev->export_count--;
|
|
|
+ spin_unlock(&hba->device_lock);
|
|
|
+
|
|
|
lun->lun_se_dev = NULL;
|
|
|
}
|
|
|
|
|
@@ -725,127 +709,17 @@ done:
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-/* se_release_device_for_hba():
|
|
|
- *
|
|
|
- *
|
|
|
- */
|
|
|
-void se_release_device_for_hba(struct se_device *dev)
|
|
|
-{
|
|
|
- struct se_hba *hba = dev->se_hba;
|
|
|
-
|
|
|
- if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
|
|
|
- (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) ||
|
|
|
- (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) ||
|
|
|
- (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) ||
|
|
|
- (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED))
|
|
|
- se_dev_stop(dev);
|
|
|
-
|
|
|
- if (dev->dev_ptr) {
|
|
|
- destroy_workqueue(dev->tmr_wq);
|
|
|
- if (dev->transport->free_device)
|
|
|
- dev->transport->free_device(dev->dev_ptr);
|
|
|
- }
|
|
|
-
|
|
|
- spin_lock(&hba->device_lock);
|
|
|
- list_del(&dev->dev_list);
|
|
|
- hba->dev_count--;
|
|
|
- spin_unlock(&hba->device_lock);
|
|
|
-
|
|
|
- core_scsi3_free_all_registrations(dev);
|
|
|
- se_release_vpd_for_dev(dev);
|
|
|
-
|
|
|
- kfree(dev);
|
|
|
-}
|
|
|
-
|
|
|
-void se_release_vpd_for_dev(struct se_device *dev)
|
|
|
+static void se_release_vpd_for_dev(struct se_device *dev)
|
|
|
{
|
|
|
struct t10_vpd *vpd, *vpd_tmp;
|
|
|
|
|
|
- spin_lock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
|
|
|
+ spin_lock(&dev->t10_wwn.t10_vpd_lock);
|
|
|
list_for_each_entry_safe(vpd, vpd_tmp,
|
|
|
- &dev->se_sub_dev->t10_wwn.t10_vpd_list, vpd_list) {
|
|
|
+ &dev->t10_wwn.t10_vpd_list, vpd_list) {
|
|
|
list_del(&vpd->vpd_list);
|
|
|
kfree(vpd);
|
|
|
}
|
|
|
- spin_unlock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
|
|
|
-}
|
|
|
-
|
|
|
-/* se_free_virtual_device():
|
|
|
- *
|
|
|
- * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
|
|
|
- */
|
|
|
-int se_free_virtual_device(struct se_device *dev, struct se_hba *hba)
|
|
|
-{
|
|
|
- if (!list_empty(&dev->dev_sep_list))
|
|
|
- dump_stack();
|
|
|
-
|
|
|
- core_alua_free_lu_gp_mem(dev);
|
|
|
- se_release_device_for_hba(dev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static void se_dev_start(struct se_device *dev)
|
|
|
-{
|
|
|
- struct se_hba *hba = dev->se_hba;
|
|
|
-
|
|
|
- spin_lock(&hba->device_lock);
|
|
|
- atomic_inc(&dev->dev_obj.obj_access_count);
|
|
|
- if (atomic_read(&dev->dev_obj.obj_access_count) == 1) {
|
|
|
- if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) {
|
|
|
- dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED;
|
|
|
- dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED;
|
|
|
- } else if (dev->dev_status &
|
|
|
- TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) {
|
|
|
- dev->dev_status &=
|
|
|
- ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
|
|
|
- dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
|
|
|
- }
|
|
|
- }
|
|
|
- spin_unlock(&hba->device_lock);
|
|
|
-}
|
|
|
-
|
|
|
-static void se_dev_stop(struct se_device *dev)
|
|
|
-{
|
|
|
- struct se_hba *hba = dev->se_hba;
|
|
|
-
|
|
|
- spin_lock(&hba->device_lock);
|
|
|
- atomic_dec(&dev->dev_obj.obj_access_count);
|
|
|
- if (atomic_read(&dev->dev_obj.obj_access_count) == 0) {
|
|
|
- if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) {
|
|
|
- dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED;
|
|
|
- dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
|
|
|
- } else if (dev->dev_status &
|
|
|
- TRANSPORT_DEVICE_OFFLINE_ACTIVATED) {
|
|
|
- dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
|
|
|
- dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
|
|
|
- }
|
|
|
- }
|
|
|
- spin_unlock(&hba->device_lock);
|
|
|
-}
|
|
|
-
|
|
|
-int se_dev_check_online(struct se_device *dev)
|
|
|
-{
|
|
|
- unsigned long flags;
|
|
|
- int ret;
|
|
|
-
|
|
|
- spin_lock_irqsave(&dev->dev_status_lock, flags);
|
|
|
- ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
|
|
|
- (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1;
|
|
|
- spin_unlock_irqrestore(&dev->dev_status_lock, flags);
|
|
|
-
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
-int se_dev_check_shutdown(struct se_device *dev)
|
|
|
-{
|
|
|
- int ret;
|
|
|
-
|
|
|
- spin_lock_irq(&dev->dev_status_lock);
|
|
|
- ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN);
|
|
|
- spin_unlock_irq(&dev->dev_status_lock);
|
|
|
-
|
|
|
- return ret;
|
|
|
+ spin_unlock(&dev->t10_wwn.t10_vpd_lock);
|
|
|
}
|
|
|
|
|
|
static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
|
|
@@ -866,72 +740,13 @@ static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
|
|
|
return aligned_max_sectors;
|
|
|
}
|
|
|
|
|
|
-void se_dev_set_default_attribs(
|
|
|
- struct se_device *dev,
|
|
|
- struct se_dev_limits *dev_limits)
|
|
|
-{
|
|
|
- struct queue_limits *limits = &dev_limits->limits;
|
|
|
-
|
|
|
- dev->se_sub_dev->se_dev_attrib.emulate_dpo = DA_EMULATE_DPO;
|
|
|
- dev->se_sub_dev->se_dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
|
|
|
- dev->se_sub_dev->se_dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
|
|
|
- dev->se_sub_dev->se_dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
|
|
|
- dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
|
|
|
- dev->se_sub_dev->se_dev_attrib.emulate_tas = DA_EMULATE_TAS;
|
|
|
- dev->se_sub_dev->se_dev_attrib.emulate_tpu = DA_EMULATE_TPU;
|
|
|
- dev->se_sub_dev->se_dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
|
|
|
- dev->se_sub_dev->se_dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS;
|
|
|
- dev->se_sub_dev->se_dev_attrib.emulate_alua = DA_EMULATE_ALUA;
|
|
|
- dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
|
|
|
- dev->se_sub_dev->se_dev_attrib.is_nonrot = DA_IS_NONROT;
|
|
|
- dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
|
|
|
- /*
|
|
|
- * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
|
|
|
- * iblock_create_virtdevice() from struct queue_limits values
|
|
|
- * if blk_queue_discard()==1
|
|
|
- */
|
|
|
- dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
|
|
|
- dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
|
|
|
- DA_MAX_UNMAP_BLOCK_DESC_COUNT;
|
|
|
- dev->se_sub_dev->se_dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
|
|
|
- dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
|
|
|
- DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
|
|
|
- /*
|
|
|
- * block_size is based on subsystem plugin dependent requirements.
|
|
|
- */
|
|
|
- dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size;
|
|
|
- dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size;
|
|
|
- /*
|
|
|
- * Align max_hw_sectors down to PAGE_SIZE I/O transfers
|
|
|
- */
|
|
|
- limits->max_hw_sectors = se_dev_align_max_sectors(limits->max_hw_sectors,
|
|
|
- limits->logical_block_size);
|
|
|
- dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
|
|
|
-
|
|
|
- /*
|
|
|
- * Set fabric_max_sectors, which is reported in block limits
|
|
|
- * VPD page (B0h).
|
|
|
- */
|
|
|
- dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
|
|
|
- /*
|
|
|
- * Set optimal_sectors from fabric_max_sectors, which can be
|
|
|
- * lowered via configfs.
|
|
|
- */
|
|
|
- dev->se_sub_dev->se_dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
|
|
|
- /*
|
|
|
- * queue_depth is based on subsystem plugin dependent requirements.
|
|
|
- */
|
|
|
- dev->se_sub_dev->se_dev_attrib.hw_queue_depth = dev_limits->hw_queue_depth;
|
|
|
- dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth;
|
|
|
-}
|
|
|
-
|
|
|
int se_dev_set_max_unmap_lba_count(
|
|
|
struct se_device *dev,
|
|
|
u32 max_unmap_lba_count)
|
|
|
{
|
|
|
- dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
|
|
|
+ dev->dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
|
|
|
pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
|
|
|
- dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count);
|
|
|
+ dev, dev->dev_attrib.max_unmap_lba_count);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -939,10 +754,10 @@ int se_dev_set_max_unmap_block_desc_count(
|
|
|
struct se_device *dev,
|
|
|
u32 max_unmap_block_desc_count)
|
|
|
{
|
|
|
- dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
|
|
|
+ dev->dev_attrib.max_unmap_block_desc_count =
|
|
|
max_unmap_block_desc_count;
|
|
|
pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
|
|
|
- dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count);
|
|
|
+ dev, dev->dev_attrib.max_unmap_block_desc_count);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -950,9 +765,9 @@ int se_dev_set_unmap_granularity(
|
|
|
struct se_device *dev,
|
|
|
u32 unmap_granularity)
|
|
|
{
|
|
|
- dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity;
|
|
|
+ dev->dev_attrib.unmap_granularity = unmap_granularity;
|
|
|
pr_debug("dev[%p]: Set unmap_granularity: %u\n",
|
|
|
- dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity);
|
|
|
+ dev, dev->dev_attrib.unmap_granularity);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -960,9 +775,9 @@ int se_dev_set_unmap_granularity_alignment(
|
|
|
struct se_device *dev,
|
|
|
u32 unmap_granularity_alignment)
|
|
|
{
|
|
|
- dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
|
|
|
+ dev->dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
|
|
|
pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
|
|
|
- dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment);
|
|
|
+ dev, dev->dev_attrib.unmap_granularity_alignment);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -993,9 +808,9 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
|
|
|
pr_err("emulate_fua_write not supported for pSCSI\n");
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
- dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag;
|
|
|
+ dev->dev_attrib.emulate_fua_write = flag;
|
|
|
pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
|
|
|
- dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write);
|
|
|
+ dev, dev->dev_attrib.emulate_fua_write);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1025,9 +840,9 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
|
|
|
pr_err("emulate_write_cache not supported for pSCSI\n");
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
- dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag;
|
|
|
+ dev->dev_attrib.emulate_write_cache = flag;
|
|
|
pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
|
|
|
- dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache);
|
|
|
+ dev, dev->dev_attrib.emulate_write_cache);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1038,16 +853,15 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
- if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
|
|
|
+ if (dev->export_count) {
|
|
|
pr_err("dev[%p]: Unable to change SE Device"
|
|
|
- " UA_INTRLCK_CTRL while dev_export_obj: %d count"
|
|
|
- " exists\n", dev,
|
|
|
- atomic_read(&dev->dev_export_obj.obj_access_count));
|
|
|
+ " UA_INTRLCK_CTRL while export_count is %d\n",
|
|
|
+ dev, dev->export_count);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
- dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag;
|
|
|
+ dev->dev_attrib.emulate_ua_intlck_ctrl = flag;
|
|
|
pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
|
|
|
- dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl);
|
|
|
+ dev, dev->dev_attrib.emulate_ua_intlck_ctrl);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -1059,15 +873,15 @@ int se_dev_set_emulate_tas(struct se_device *dev, int flag)
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
- if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
|
|
|
+ if (dev->export_count) {
|
|
|
pr_err("dev[%p]: Unable to change SE Device TAS while"
|
|
|
- " dev_export_obj: %d count exists\n", dev,
|
|
|
- atomic_read(&dev->dev_export_obj.obj_access_count));
|
|
|
+ " export_count is %d\n",
|
|
|
+ dev, dev->export_count);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
- dev->se_sub_dev->se_dev_attrib.emulate_tas = flag;
|
|
|
+ dev->dev_attrib.emulate_tas = flag;
|
|
|
pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
|
|
|
- dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
|
|
|
+ dev, (dev->dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -1082,12 +896,12 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
|
|
|
* We expect this value to be non-zero when generic Block Layer
|
|
|
* Discard supported is detected iblock_create_virtdevice().
|
|
|
*/
|
|
|
- if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
|
|
|
+ if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
|
|
|
pr_err("Generic Block Discard not supported\n");
|
|
|
return -ENOSYS;
|
|
|
}
|
|
|
|
|
|
- dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag;
|
|
|
+ dev->dev_attrib.emulate_tpu = flag;
|
|
|
pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
|
|
|
dev, flag);
|
|
|
return 0;
|
|
@@ -1103,12 +917,12 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
|
|
|
* We expect this value to be non-zero when generic Block Layer
|
|
|
* Discard supported is detected iblock_create_virtdevice().
|
|
|
*/
|
|
|
- if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
|
|
|
+ if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
|
|
|
pr_err("Generic Block Discard not supported\n");
|
|
|
return -ENOSYS;
|
|
|
}
|
|
|
|
|
|
- dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag;
|
|
|
+ dev->dev_attrib.emulate_tpws = flag;
|
|
|
pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
|
|
|
dev, flag);
|
|
|
return 0;
|
|
@@ -1120,9 +934,9 @@ int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
|
|
|
pr_err("Illegal value %d\n", flag);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
- dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag;
|
|
|
+ dev->dev_attrib.enforce_pr_isids = flag;
|
|
|
pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
|
|
|
- (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
|
|
|
+ (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1132,7 +946,7 @@ int se_dev_set_is_nonrot(struct se_device *dev, int flag)
|
|
|
printk(KERN_ERR "Illegal value %d\n", flag);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
- dev->se_sub_dev->se_dev_attrib.is_nonrot = flag;
|
|
|
+ dev->dev_attrib.is_nonrot = flag;
|
|
|
pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
|
|
|
dev, flag);
|
|
|
return 0;
|
|
@@ -1145,7 +959,7 @@ int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
|
|
|
" reordering not implemented\n", dev);
|
|
|
return -ENOSYS;
|
|
|
}
|
|
|
- dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = flag;
|
|
|
+ dev->dev_attrib.emulate_rest_reord = flag;
|
|
|
pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
|
|
|
return 0;
|
|
|
}
|
|
@@ -1155,10 +969,10 @@ int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
|
|
|
*/
|
|
|
int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
|
|
|
{
|
|
|
- if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
|
|
|
+ if (dev->export_count) {
|
|
|
pr_err("dev[%p]: Unable to change SE Device TCQ while"
|
|
|
- " dev_export_obj: %d count exists\n", dev,
|
|
|
- atomic_read(&dev->dev_export_obj.obj_access_count));
|
|
|
+ " export_count is %d\n",
|
|
|
+ dev, dev->export_count);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
if (!queue_depth) {
|
|
@@ -1168,26 +982,26 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
|
|
|
}
|
|
|
|
|
|
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
|
|
|
- if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
|
|
|
+ if (queue_depth > dev->dev_attrib.hw_queue_depth) {
|
|
|
pr_err("dev[%p]: Passed queue_depth: %u"
|
|
|
" exceeds TCM/SE_Device TCQ: %u\n",
|
|
|
dev, queue_depth,
|
|
|
- dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
|
|
|
+ dev->dev_attrib.hw_queue_depth);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
} else {
|
|
|
- if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) {
|
|
|
- if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
|
|
|
+ if (queue_depth > dev->dev_attrib.queue_depth) {
|
|
|
+ if (queue_depth > dev->dev_attrib.hw_queue_depth) {
|
|
|
pr_err("dev[%p]: Passed queue_depth:"
|
|
|
" %u exceeds TCM/SE_Device MAX"
|
|
|
" TCQ: %u\n", dev, queue_depth,
|
|
|
- dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
|
|
|
+ dev->dev_attrib.hw_queue_depth);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth;
|
|
|
+ dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth;
|
|
|
pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
|
|
|
dev, queue_depth);
|
|
|
return 0;
|
|
@@ -1195,10 +1009,10 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
|
|
|
|
|
|
int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
|
|
|
{
|
|
|
- if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
|
|
|
+ if (dev->export_count) {
|
|
|
pr_err("dev[%p]: Unable to change SE Device"
|
|
|
- " fabric_max_sectors while dev_export_obj: %d count exists\n",
|
|
|
- dev, atomic_read(&dev->dev_export_obj.obj_access_count));
|
|
|
+ " fabric_max_sectors while export_count is %d\n",
|
|
|
+ dev, dev->export_count);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
if (!fabric_max_sectors) {
|
|
@@ -1213,11 +1027,11 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
|
|
|
- if (fabric_max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
|
|
|
+ if (fabric_max_sectors > dev->dev_attrib.hw_max_sectors) {
|
|
|
pr_err("dev[%p]: Passed fabric_max_sectors: %u"
|
|
|
" greater than TCM/SE_Device max_sectors:"
|
|
|
" %u\n", dev, fabric_max_sectors,
|
|
|
- dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
|
|
|
+ dev->dev_attrib.hw_max_sectors);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
} else {
|
|
@@ -1233,9 +1047,9 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
|
|
|
* Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
|
|
|
*/
|
|
|
fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
|
|
|
- dev->se_sub_dev->se_dev_attrib.block_size);
|
|
|
+ dev->dev_attrib.block_size);
|
|
|
|
|
|
- dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = fabric_max_sectors;
|
|
|
+ dev->dev_attrib.fabric_max_sectors = fabric_max_sectors;
|
|
|
pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
|
|
|
dev, fabric_max_sectors);
|
|
|
return 0;
|
|
@@ -1243,10 +1057,10 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
|
|
|
|
|
|
int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
|
|
|
{
|
|
|
- if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
|
|
|
+ if (dev->export_count) {
|
|
|
pr_err("dev[%p]: Unable to change SE Device"
|
|
|
- " optimal_sectors while dev_export_obj: %d count exists\n",
|
|
|
- dev, atomic_read(&dev->dev_export_obj.obj_access_count));
|
|
|
+ " optimal_sectors while export_count is %d\n",
|
|
|
+ dev, dev->export_count);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
|
|
@@ -1254,14 +1068,14 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
|
|
|
" changed for TCM/pSCSI\n", dev);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
- if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.fabric_max_sectors) {
|
|
|
+ if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
|
|
|
pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
|
|
|
" greater than fabric_max_sectors: %u\n", dev,
|
|
|
- optimal_sectors, dev->se_sub_dev->se_dev_attrib.fabric_max_sectors);
|
|
|
+ optimal_sectors, dev->dev_attrib.fabric_max_sectors);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
- dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors;
|
|
|
+ dev->dev_attrib.optimal_sectors = optimal_sectors;
|
|
|
pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
|
|
|
dev, optimal_sectors);
|
|
|
return 0;
|
|
@@ -1269,10 +1083,10 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
|
|
|
|
|
|
int se_dev_set_block_size(struct se_device *dev, u32 block_size)
|
|
|
{
|
|
|
- if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
|
|
|
+ if (dev->export_count) {
|
|
|
pr_err("dev[%p]: Unable to change SE Device block_size"
|
|
|
- " while dev_export_obj: %d count exists\n", dev,
|
|
|
- atomic_read(&dev->dev_export_obj.obj_access_count));
|
|
|
+ " while export_count is %d\n",
|
|
|
+ dev, dev->export_count);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
@@ -1293,7 +1107,7 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
- dev->se_sub_dev->se_dev_attrib.block_size = block_size;
|
|
|
+ dev->dev_attrib.block_size = block_size;
|
|
|
pr_debug("dev[%p]: SE Device block_size changed to %u\n",
|
|
|
dev, block_size);
|
|
|
return 0;
|
|
@@ -1307,12 +1121,6 @@ struct se_lun *core_dev_add_lun(
|
|
|
struct se_lun *lun_p;
|
|
|
int rc;
|
|
|
|
|
|
- if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
|
|
|
- pr_err("Unable to export struct se_device while dev_access_obj: %d\n",
|
|
|
- atomic_read(&dev->dev_access_obj.obj_access_count));
|
|
|
- return ERR_PTR(-EACCES);
|
|
|
- }
|
|
|
-
|
|
|
lun_p = core_tpg_pre_addlun(tpg, lun);
|
|
|
if (IS_ERR(lun_p))
|
|
|
return lun_p;
|
|
@@ -1568,12 +1376,220 @@ void core_dev_free_initiator_node_lun_acl(
|
|
|
kfree(lacl);
|
|
|
}
|
|
|
|
|
|
+static void scsi_dump_inquiry(struct se_device *dev)
|
|
|
+{
|
|
|
+ struct t10_wwn *wwn = &dev->t10_wwn;
|
|
|
+ char buf[17];
|
|
|
+ int i, device_type;
|
|
|
+ /*
|
|
|
+ * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
|
|
|
+ */
|
|
|
+ for (i = 0; i < 8; i++)
|
|
|
+ if (wwn->vendor[i] >= 0x20)
|
|
|
+ buf[i] = wwn->vendor[i];
|
|
|
+ else
|
|
|
+ buf[i] = ' ';
|
|
|
+ buf[i] = '\0';
|
|
|
+ pr_debug(" Vendor: %s\n", buf);
|
|
|
+
|
|
|
+ for (i = 0; i < 16; i++)
|
|
|
+ if (wwn->model[i] >= 0x20)
|
|
|
+ buf[i] = wwn->model[i];
|
|
|
+ else
|
|
|
+ buf[i] = ' ';
|
|
|
+ buf[i] = '\0';
|
|
|
+ pr_debug(" Model: %s\n", buf);
|
|
|
+
|
|
|
+ for (i = 0; i < 4; i++)
|
|
|
+ if (wwn->revision[i] >= 0x20)
|
|
|
+ buf[i] = wwn->revision[i];
|
|
|
+ else
|
|
|
+ buf[i] = ' ';
|
|
|
+ buf[i] = '\0';
|
|
|
+ pr_debug(" Revision: %s\n", buf);
|
|
|
+
|
|
|
+ device_type = dev->transport->get_device_type(dev);
|
|
|
+ pr_debug(" Type: %s ", scsi_device_type(device_type));
|
|
|
+ pr_debug(" ANSI SCSI revision: %02x\n",
|
|
|
+ dev->transport->get_device_rev(dev));
|
|
|
+}
|
|
|
+
|
|
|
+struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
|
|
|
+{
|
|
|
+ struct se_device *dev;
|
|
|
+
|
|
|
+ dev = hba->transport->alloc_device(hba, name);
|
|
|
+ if (!dev)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ dev->se_hba = hba;
|
|
|
+ dev->transport = hba->transport;
|
|
|
+
|
|
|
+ INIT_LIST_HEAD(&dev->dev_list);
|
|
|
+ INIT_LIST_HEAD(&dev->dev_sep_list);
|
|
|
+ INIT_LIST_HEAD(&dev->dev_tmr_list);
|
|
|
+ INIT_LIST_HEAD(&dev->delayed_cmd_list);
|
|
|
+ INIT_LIST_HEAD(&dev->state_list);
|
|
|
+ INIT_LIST_HEAD(&dev->qf_cmd_list);
|
|
|
+ spin_lock_init(&dev->stats_lock);
|
|
|
+ spin_lock_init(&dev->execute_task_lock);
|
|
|
+ spin_lock_init(&dev->delayed_cmd_lock);
|
|
|
+ spin_lock_init(&dev->dev_reservation_lock);
|
|
|
+ spin_lock_init(&dev->se_port_lock);
|
|
|
+ spin_lock_init(&dev->se_tmr_lock);
|
|
|
+ spin_lock_init(&dev->qf_cmd_lock);
|
|
|
+ atomic_set(&dev->dev_ordered_id, 0);
|
|
|
+ INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
|
|
|
+ spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
|
|
|
+ INIT_LIST_HEAD(&dev->t10_pr.registration_list);
|
|
|
+ INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
|
|
|
+ spin_lock_init(&dev->t10_pr.registration_lock);
|
|
|
+ spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
|
|
|
+ INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
|
|
|
+ spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
|
|
|
+
|
|
|
+ dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
|
|
|
+ dev->t10_wwn.t10_dev = dev;
|
|
|
+ dev->t10_alua.t10_dev = dev;
|
|
|
+
|
|
|
+ dev->dev_attrib.da_dev = dev;
|
|
|
+ dev->dev_attrib.emulate_dpo = DA_EMULATE_DPO;
|
|
|
+ dev->dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
|
|
|
+ dev->dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
|
|
|
+ dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
|
|
|
+ dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
|
|
|
+ dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
|
|
|
+ dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
|
|
|
+ dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
|
|
|
+ dev->dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS;
|
|
|
+ dev->dev_attrib.emulate_alua = DA_EMULATE_ALUA;
|
|
|
+ dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
|
|
|
+ dev->dev_attrib.is_nonrot = DA_IS_NONROT;
|
|
|
+ dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
|
|
|
+ dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
|
|
|
+ dev->dev_attrib.max_unmap_block_desc_count =
|
|
|
+ DA_MAX_UNMAP_BLOCK_DESC_COUNT;
|
|
|
+ dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
|
|
|
+ dev->dev_attrib.unmap_granularity_alignment =
|
|
|
+ DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
|
|
|
+ dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
|
|
|
+ dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
|
|
|
+
|
|
|
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
|
|
|
+ dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
|
|
|
+ else
|
|
|
+ dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
|
|
|
+
|
|
|
+ return dev;
|
|
|
+}
|
|
|
+
|
|
|
+int target_configure_device(struct se_device *dev)
|
|
|
+{
|
|
|
+ struct se_hba *hba = dev->se_hba;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ if (dev->dev_flags & DF_CONFIGURED) {
|
|
|
+ pr_err("se_dev->se_dev_ptr already set for storage"
|
|
|
+ " object\n");
|
|
|
+ return -EEXIST;
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = dev->transport->configure_device(dev);
|
|
|
+ if (ret)
|
|
|
+ goto out;
|
|
|
+ dev->dev_flags |= DF_CONFIGURED;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * XXX: there is not much point to have two different values here..
|
|
|
+ */
|
|
|
+ dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
|
|
|
+ dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Align max_hw_sectors down to PAGE_SIZE I/O transfers
|
|
|
+ */
|
|
|
+ dev->dev_attrib.hw_max_sectors =
|
|
|
+ se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
|
|
|
+ dev->dev_attrib.hw_block_size);
|
|
|
+
|
|
|
+ dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
|
|
|
+ dev->creation_time = get_jiffies_64();
|
|
|
+
|
|
|
+ core_setup_reservations(dev);
|
|
|
+
|
|
|
+ ret = core_setup_alua(dev);
|
|
|
+ if (ret)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Startup the struct se_device processing thread
|
|
|
+ */
|
|
|
+ dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
|
|
|
+ dev->transport->name);
|
|
|
+ if (!dev->tmr_wq) {
|
|
|
+ pr_err("Unable to create tmr workqueue for %s\n",
|
|
|
+ dev->transport->name);
|
|
|
+ ret = -ENOMEM;
|
|
|
+ goto out_free_alua;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Setup work_queue for QUEUE_FULL
|
|
|
+ */
|
|
|
+ INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Preload the initial INQUIRY const values if we are doing
|
|
|
+ * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
|
|
|
+ * passthrough because this is being provided by the backend LLD.
|
|
|
+ */
|
|
|
+ if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
|
|
|
+ strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
|
|
|
+ strncpy(&dev->t10_wwn.model[0],
|
|
|
+ dev->transport->inquiry_prod, 16);
|
|
|
+ strncpy(&dev->t10_wwn.revision[0],
|
|
|
+ dev->transport->inquiry_rev, 4);
|
|
|
+ }
|
|
|
+
|
|
|
+ scsi_dump_inquiry(dev);
|
|
|
+
|
|
|
+ spin_lock(&hba->device_lock);
|
|
|
+ hba->dev_count++;
|
|
|
+ spin_unlock(&hba->device_lock);
|
|
|
+ return 0;
|
|
|
+
|
|
|
+out_free_alua:
|
|
|
+ core_alua_free_lu_gp_mem(dev);
|
|
|
+out:
|
|
|
+ se_release_vpd_for_dev(dev);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+void target_free_device(struct se_device *dev)
|
|
|
+{
|
|
|
+ struct se_hba *hba = dev->se_hba;
|
|
|
+
|
|
|
+ WARN_ON(!list_empty(&dev->dev_sep_list));
|
|
|
+
|
|
|
+ if (dev->dev_flags & DF_CONFIGURED) {
|
|
|
+ destroy_workqueue(dev->tmr_wq);
|
|
|
+
|
|
|
+ spin_lock(&hba->device_lock);
|
|
|
+ hba->dev_count--;
|
|
|
+ spin_unlock(&hba->device_lock);
|
|
|
+ }
|
|
|
+
|
|
|
+ core_alua_free_lu_gp_mem(dev);
|
|
|
+ core_scsi3_free_all_registrations(dev);
|
|
|
+ se_release_vpd_for_dev(dev);
|
|
|
+
|
|
|
+ dev->transport->free_device(dev);
|
|
|
+}
|
|
|
+
|
|
|
int core_dev_setup_virtual_lun0(void)
|
|
|
{
|
|
|
struct se_hba *hba;
|
|
|
struct se_device *dev;
|
|
|
- struct se_subsystem_dev *se_dev = NULL;
|
|
|
- struct se_subsystem_api *t;
|
|
|
char buf[16];
|
|
|
int ret;
|
|
|
|
|
@@ -1581,60 +1597,28 @@ int core_dev_setup_virtual_lun0(void)
|
|
|
if (IS_ERR(hba))
|
|
|
return PTR_ERR(hba);
|
|
|
|
|
|
- lun0_hba = hba;
|
|
|
- t = hba->transport;
|
|
|
-
|
|
|
- se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
|
|
|
- if (!se_dev) {
|
|
|
- pr_err("Unable to allocate memory for"
|
|
|
- " struct se_subsystem_dev\n");
|
|
|
+ dev = target_alloc_device(hba, "virt_lun0");
|
|
|
+ if (!dev) {
|
|
|
ret = -ENOMEM;
|
|
|
- goto out;
|
|
|
+ goto out_free_hba;
|
|
|
}
|
|
|
- INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
|
|
|
- spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
|
|
|
- INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
|
|
|
- INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
|
|
|
- spin_lock_init(&se_dev->t10_pr.registration_lock);
|
|
|
- spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
|
|
|
- INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
|
|
|
- spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
|
|
|
- spin_lock_init(&se_dev->se_dev_lock);
|
|
|
- se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
|
|
|
- se_dev->t10_wwn.t10_sub_dev = se_dev;
|
|
|
- se_dev->t10_alua.t10_sub_dev = se_dev;
|
|
|
- se_dev->se_dev_attrib.da_sub_dev = se_dev;
|
|
|
- se_dev->se_dev_hba = hba;
|
|
|
-
|
|
|
- se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
|
|
|
- if (!se_dev->se_dev_su_ptr) {
|
|
|
- pr_err("Unable to locate subsystem dependent pointer"
|
|
|
- " from allocate_virtdevice()\n");
|
|
|
- ret = -ENOMEM;
|
|
|
- goto out;
|
|
|
- }
|
|
|
- lun0_su_dev = se_dev;
|
|
|
|
|
|
memset(buf, 0, 16);
|
|
|
sprintf(buf, "rd_pages=8");
|
|
|
- t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf));
|
|
|
+ hba->transport->set_configfs_dev_params(dev, buf, sizeof(buf));
|
|
|
|
|
|
- dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
|
|
|
- if (IS_ERR(dev)) {
|
|
|
- ret = PTR_ERR(dev);
|
|
|
- goto out;
|
|
|
- }
|
|
|
- se_dev->se_dev_ptr = dev;
|
|
|
- g_lun0_dev = dev;
|
|
|
+ ret = target_configure_device(dev);
|
|
|
+ if (ret)
|
|
|
+ goto out_free_se_dev;
|
|
|
|
|
|
+ lun0_hba = hba;
|
|
|
+ g_lun0_dev = dev;
|
|
|
return 0;
|
|
|
-out:
|
|
|
- lun0_su_dev = NULL;
|
|
|
- kfree(se_dev);
|
|
|
- if (lun0_hba) {
|
|
|
- core_delete_hba(lun0_hba);
|
|
|
- lun0_hba = NULL;
|
|
|
- }
|
|
|
+
|
|
|
+out_free_se_dev:
|
|
|
+ target_free_device(dev);
|
|
|
+out_free_hba:
|
|
|
+ core_delete_hba(hba);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -1642,14 +1626,11 @@ out:
|
|
|
void core_dev_release_virtual_lun0(void)
|
|
|
{
|
|
|
struct se_hba *hba = lun0_hba;
|
|
|
- struct se_subsystem_dev *su_dev = lun0_su_dev;
|
|
|
|
|
|
if (!hba)
|
|
|
return;
|
|
|
|
|
|
if (g_lun0_dev)
|
|
|
- se_free_virtual_device(g_lun0_dev, hba);
|
|
|
-
|
|
|
- kfree(su_dev);
|
|
|
+ target_free_device(g_lun0_dev);
|
|
|
core_delete_hba(hba);
|
|
|
}
|