Browse Source

target: Use array_zalloc for device_list

Turns an order-8 allocation into slab-sized ones, thereby preventing
allocation failures with memory fragmentation.

This likely saves memory as well, as the slab allocator can pack objects
more tightly than the buddy allocator.

(nab: Fix lio-core patch fuzz)

Signed-off-by: Joern Engel <joern@logfs.org>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Jörn Engel 13 years ago
parent
commit
f2083241f2

+ 10 - 10
drivers/target/target_core_device.c

@@ -72,7 +72,7 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
 	}
 
 	spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
-	se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
+	se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
 	if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
 		struct se_dev_entry *deve = se_cmd->se_deve;
 
@@ -182,7 +182,7 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
 	}
 
 	spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
-	se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
+	se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
 	deve = se_cmd->se_deve;
 
 	if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
@@ -240,7 +240,7 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(
 
 	spin_lock_irq(&nacl->device_list_lock);
 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
-		deve = &nacl->device_list[i];
+		deve = nacl->device_list[i];
 
 		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
 			continue;
@@ -286,7 +286,7 @@ int core_free_device_list_for_node(
 
 	spin_lock_irq(&nacl->device_list_lock);
 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
-		deve = &nacl->device_list[i];
+		deve = nacl->device_list[i];
 
 		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
 			continue;
@@ -306,7 +306,7 @@ int core_free_device_list_for_node(
 	}
 	spin_unlock_irq(&nacl->device_list_lock);
 
-	kfree(nacl->device_list);
+	array_free(nacl->device_list, TRANSPORT_MAX_LUNS_PER_TPG);
 	nacl->device_list = NULL;
 
 	return 0;
@@ -318,7 +318,7 @@ void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
 	unsigned long flags;
 
 	spin_lock_irqsave(&se_nacl->device_list_lock, flags);
-	deve = &se_nacl->device_list[se_cmd->orig_fe_lun];
+	deve = se_nacl->device_list[se_cmd->orig_fe_lun];
 	deve->deve_cmds--;
 	spin_unlock_irqrestore(&se_nacl->device_list_lock, flags);
 }
@@ -331,7 +331,7 @@ void core_update_device_list_access(
 	struct se_dev_entry *deve;
 
 	spin_lock_irq(&nacl->device_list_lock);
-	deve = &nacl->device_list[mapped_lun];
+	deve = nacl->device_list[mapped_lun];
 	if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
 		deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
 		deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
@@ -356,7 +356,7 @@ int core_update_device_list_for_node(
 	int enable)
 {
 	struct se_port *port = lun->lun_sep;
-	struct se_dev_entry *deve = &nacl->device_list[mapped_lun];
+	struct se_dev_entry *deve = nacl->device_list[mapped_lun];
 	int trans = 0;
 	/*
 	 * If the MappedLUN entry is being disabled, the entry in
@@ -470,7 +470,7 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
 
 		spin_lock_irq(&nacl->device_list_lock);
 		for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
-			deve = &nacl->device_list[i];
+			deve = nacl->device_list[i];
 			if (lun != deve->se_lun)
 				continue;
 			spin_unlock_irq(&nacl->device_list_lock);
@@ -669,7 +669,7 @@ int target_report_luns(struct se_task *se_task)
 
 	spin_lock_irq(&se_sess->se_node_acl->device_list_lock);
 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
-		deve = &se_sess->se_node_acl->device_list[i];
+		deve = se_sess->se_node_acl->device_list[i];
 		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
 			continue;
 		se_lun = deve->se_lun;

+ 3 - 3
drivers/target/target_core_fabric_configfs.c

@@ -108,7 +108,7 @@ static int target_fabric_mappedlun_link(
 	 * tpg_1/attrib/demo_mode_write_protect=1
 	 */
 	spin_lock_irq(&lacl->se_lun_nacl->device_list_lock);
-	deve = &lacl->se_lun_nacl->device_list[lacl->mapped_lun];
+	deve = lacl->se_lun_nacl->device_list[lacl->mapped_lun];
 	if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)
 		lun_access = deve->lun_flags;
 	else
@@ -137,7 +137,7 @@ static int target_fabric_mappedlun_unlink(
 	struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
 			struct se_lun_acl, se_lun_group);
 	struct se_node_acl *nacl = lacl->se_lun_nacl;
-	struct se_dev_entry *deve = &nacl->device_list[lacl->mapped_lun];
+	struct se_dev_entry *deve = nacl->device_list[lacl->mapped_lun];
 	struct se_portal_group *se_tpg;
 	/*
 	 * Determine if the underlying MappedLUN has already been released..
@@ -168,7 +168,7 @@ static ssize_t target_fabric_mappedlun_show_write_protect(
 	ssize_t len;
 
 	spin_lock_irq(&se_nacl->device_list_lock);
-	deve = &se_nacl->device_list[lacl->mapped_lun];
+	deve = se_nacl->device_list[lacl->mapped_lun];
 	len = sprintf(page, "%d\n",
 			(deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ?
 			1 : 0);

+ 7 - 7
drivers/target/target_core_pr.c

@@ -328,7 +328,7 @@ static int core_scsi3_pr_seq_non_holder(
 		return core_scsi2_reservation_seq_non_holder(cmd,
 					cdb, pr_reg_type);
 
-	se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+	se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
 	/*
 	 * Determine if the registration should be ignored due to
 	 * non-matching ISIDs in core_scsi3_pr_reservation_check().
@@ -990,7 +990,7 @@ int core_scsi3_check_aptpl_registration(
 {
 	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
 	struct se_node_acl *nacl = lun_acl->se_lun_nacl;
-	struct se_dev_entry *deve = &nacl->device_list[lun_acl->mapped_lun];
+	struct se_dev_entry *deve = nacl->device_list[lun_acl->mapped_lun];
 
 	if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
 		return 0;
@@ -1499,7 +1499,7 @@ static int core_scsi3_decode_spec_i_port(
 
 	memset(dest_iport, 0, 64);
 
-	local_se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+	local_se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
 	/*
 	 * Allocate a struct pr_transport_id_holder and setup the
 	 * local_node_acl and local_se_deve pointers and add to
@@ -2116,7 +2116,7 @@ static int core_scsi3_emulate_pro_register(
 		return -EINVAL;
 	}
 	se_tpg = se_sess->se_tpg;
-	se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+	se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
 
 	if (se_tpg->se_tpg_tfo->sess_get_initiator_sid) {
 		memset(&isid_buf[0], 0, PR_REG_ISID_LEN);
@@ -2432,7 +2432,7 @@ static int core_scsi3_pro_reserve(
 		return -EINVAL;
 	}
 	se_tpg = se_sess->se_tpg;
-	se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+	se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
 	/*
 	 * Locate the existing *pr_reg via struct se_node_acl pointers
 	 */
@@ -3005,7 +3005,7 @@ static int core_scsi3_pro_preempt(
 		return -EINVAL;
 	}
 
-	se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+	se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
 	pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
 				se_sess);
 	if (!pr_reg_n) {
@@ -3366,7 +3366,7 @@ static int core_scsi3_emulate_pro_register_and_move(
 	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
 	se_tpg = se_sess->se_tpg;
 	tf_ops = se_tpg->se_tpg_tfo;
-	se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+	se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
 	/*
 	 * Follow logic from spc4r17 Section 5.7.8, Table 50 --
 	 *	Register behaviors for a REGISTER AND MOVE service action

+ 18 - 18
drivers/target/target_core_stat.c

@@ -1253,7 +1253,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_inst(
 	ssize_t ret;
 
 	spin_lock_irq(&nacl->device_list_lock);
-	deve = &nacl->device_list[lacl->mapped_lun];
+	deve = nacl->device_list[lacl->mapped_lun];
 	if (!deve->se_lun || !deve->se_lun_acl) {
 		spin_unlock_irq(&nacl->device_list_lock);
 		return -ENODEV;
@@ -1279,7 +1279,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_dev(
 	ssize_t ret;
 
 	spin_lock_irq(&nacl->device_list_lock);
-	deve = &nacl->device_list[lacl->mapped_lun];
+	deve = nacl->device_list[lacl->mapped_lun];
 	if (!deve->se_lun || !deve->se_lun_acl) {
 		spin_unlock_irq(&nacl->device_list_lock);
 		return -ENODEV;
@@ -1304,7 +1304,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_port(
 	ssize_t ret;
 
 	spin_lock_irq(&nacl->device_list_lock);
-	deve = &nacl->device_list[lacl->mapped_lun];
+	deve = nacl->device_list[lacl->mapped_lun];
 	if (!deve->se_lun || !deve->se_lun_acl) {
 		spin_unlock_irq(&nacl->device_list_lock);
 		return -ENODEV;
@@ -1327,7 +1327,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_indx(
 	ssize_t ret;
 
 	spin_lock_irq(&nacl->device_list_lock);
-	deve = &nacl->device_list[lacl->mapped_lun];
+	deve = nacl->device_list[lacl->mapped_lun];
 	if (!deve->se_lun || !deve->se_lun_acl) {
 		spin_unlock_irq(&nacl->device_list_lock);
 		return -ENODEV;
@@ -1349,7 +1349,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_dev_or_port(
 	ssize_t ret;
 
 	spin_lock_irq(&nacl->device_list_lock);
-	deve = &nacl->device_list[lacl->mapped_lun];
+	deve = nacl->device_list[lacl->mapped_lun];
 	if (!deve->se_lun || !deve->se_lun_acl) {
 		spin_unlock_irq(&nacl->device_list_lock);
 		return -ENODEV;
@@ -1371,7 +1371,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_intr_name(
 	ssize_t ret;
 
 	spin_lock_irq(&nacl->device_list_lock);
-	deve = &nacl->device_list[lacl->mapped_lun];
+	deve = nacl->device_list[lacl->mapped_lun];
 	if (!deve->se_lun || !deve->se_lun_acl) {
 		spin_unlock_irq(&nacl->device_list_lock);
 		return -ENODEV;
@@ -1393,7 +1393,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_map_indx(
 	ssize_t ret;
 
 	spin_lock_irq(&nacl->device_list_lock);
-	deve = &nacl->device_list[lacl->mapped_lun];
+	deve = nacl->device_list[lacl->mapped_lun];
 	if (!deve->se_lun || !deve->se_lun_acl) {
 		spin_unlock_irq(&nacl->device_list_lock);
 		return -ENODEV;
@@ -1415,7 +1415,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_att_count(
 	ssize_t ret;
 
 	spin_lock_irq(&nacl->device_list_lock);
-	deve = &nacl->device_list[lacl->mapped_lun];
+	deve = nacl->device_list[lacl->mapped_lun];
 	if (!deve->se_lun || !deve->se_lun_acl) {
 		spin_unlock_irq(&nacl->device_list_lock);
 		return -ENODEV;
@@ -1437,7 +1437,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_num_cmds(
 	ssize_t ret;
 
 	spin_lock_irq(&nacl->device_list_lock);
-	deve = &nacl->device_list[lacl->mapped_lun];
+	deve = nacl->device_list[lacl->mapped_lun];
 	if (!deve->se_lun || !deve->se_lun_acl) {
 		spin_unlock_irq(&nacl->device_list_lock);
 		return -ENODEV;
@@ -1459,7 +1459,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_read_mbytes(
 	ssize_t ret;
 
 	spin_lock_irq(&nacl->device_list_lock);
-	deve = &nacl->device_list[lacl->mapped_lun];
+	deve = nacl->device_list[lacl->mapped_lun];
 	if (!deve->se_lun || !deve->se_lun_acl) {
 		spin_unlock_irq(&nacl->device_list_lock);
 		return -ENODEV;
@@ -1481,7 +1481,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_write_mbytes(
 	ssize_t ret;
 
 	spin_lock_irq(&nacl->device_list_lock);
-	deve = &nacl->device_list[lacl->mapped_lun];
+	deve = nacl->device_list[lacl->mapped_lun];
 	if (!deve->se_lun || !deve->se_lun_acl) {
 		spin_unlock_irq(&nacl->device_list_lock);
 		return -ENODEV;
@@ -1503,7 +1503,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_hs_num_cmds(
 	ssize_t ret;
 
 	spin_lock_irq(&nacl->device_list_lock);
-	deve = &nacl->device_list[lacl->mapped_lun];
+	deve = nacl->device_list[lacl->mapped_lun];
 	if (!deve->se_lun || !deve->se_lun_acl) {
 		spin_unlock_irq(&nacl->device_list_lock);
 		return -ENODEV;
@@ -1525,7 +1525,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_creation_time(
 	ssize_t ret;
 
 	spin_lock_irq(&nacl->device_list_lock);
-	deve = &nacl->device_list[lacl->mapped_lun];
+	deve = nacl->device_list[lacl->mapped_lun];
 	if (!deve->se_lun || !deve->se_lun_acl) {
 		spin_unlock_irq(&nacl->device_list_lock);
 		return -ENODEV;
@@ -1548,7 +1548,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_row_status(
 	ssize_t ret;
 
 	spin_lock_irq(&nacl->device_list_lock);
-	deve = &nacl->device_list[lacl->mapped_lun];
+	deve = nacl->device_list[lacl->mapped_lun];
 	if (!deve->se_lun || !deve->se_lun_acl) {
 		spin_unlock_irq(&nacl->device_list_lock);
 		return -ENODEV;
@@ -1621,7 +1621,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_inst(
 	ssize_t ret;
 
 	spin_lock_irq(&nacl->device_list_lock);
-	deve = &nacl->device_list[lacl->mapped_lun];
+	deve = nacl->device_list[lacl->mapped_lun];
 	if (!deve->se_lun || !deve->se_lun_acl) {
 		spin_unlock_irq(&nacl->device_list_lock);
 		return -ENODEV;
@@ -1647,7 +1647,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_dev(
 	ssize_t ret;
 
 	spin_lock_irq(&nacl->device_list_lock);
-	deve = &nacl->device_list[lacl->mapped_lun];
+	deve = nacl->device_list[lacl->mapped_lun];
 	if (!deve->se_lun || !deve->se_lun_acl) {
 		spin_unlock_irq(&nacl->device_list_lock);
 		return -ENODEV;
@@ -1672,7 +1672,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port(
 	ssize_t ret;
 
 	spin_lock_irq(&nacl->device_list_lock);
-	deve = &nacl->device_list[lacl->mapped_lun];
+	deve = nacl->device_list[lacl->mapped_lun];
 	if (!deve->se_lun || !deve->se_lun_acl) {
 		spin_unlock_irq(&nacl->device_list_lock);
 		return -ENODEV;
@@ -1721,7 +1721,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port_auth_indx(
 	ssize_t ret;
 
 	spin_lock_irq(&nacl->device_list_lock);
-	deve = &nacl->device_list[lacl->mapped_lun];
+	deve = nacl->device_list[lacl->mapped_lun];
 	if (!deve->se_lun || !deve->se_lun_acl) {
 		spin_unlock_irq(&nacl->device_list_lock);
 		return -ENODEV;

+ 4 - 4
drivers/target/target_core_tpg.c

@@ -64,7 +64,7 @@ static void core_clear_initiator_node_from_tpg(
 
 	spin_lock_irq(&nacl->device_list_lock);
 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
-		deve = &nacl->device_list[i];
+		deve = nacl->device_list[i];
 
 		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
 			continue;
@@ -259,15 +259,15 @@ static int core_create_device_list_for_node(struct se_node_acl *nacl)
 	struct se_dev_entry *deve;
 	int i;
 
-	nacl->device_list = kzalloc(sizeof(struct se_dev_entry) *
-				TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL);
+	nacl->device_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
+			sizeof(struct se_dev_entry), GFP_KERNEL);
 	if (!nacl->device_list) {
 		pr_err("Unable to allocate memory for"
 			" struct se_node_acl->device_list\n");
 		return -ENOMEM;
 	}
 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
-		deve = &nacl->device_list[i];
+		deve = nacl->device_list[i];
 
 		atomic_set(&deve->ua_count, 0);
 		atomic_set(&deve->pr_ref_count, 0);

+ 4 - 4
drivers/target/target_core_ua.c

@@ -53,7 +53,7 @@ int core_scsi3_ua_check(
 	if (!nacl)
 		return 0;
 
-	deve = &nacl->device_list[cmd->orig_fe_lun];
+	deve = nacl->device_list[cmd->orig_fe_lun];
 	if (!atomic_read(&deve->ua_count))
 		return 0;
 	/*
@@ -110,7 +110,7 @@ int core_scsi3_ua_allocate(
 	ua->ua_ascq = ascq;
 
 	spin_lock_irq(&nacl->device_list_lock);
-	deve = &nacl->device_list[unpacked_lun];
+	deve = nacl->device_list[unpacked_lun];
 
 	spin_lock(&deve->ua_lock);
 	list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) {
@@ -220,7 +220,7 @@ void core_scsi3_ua_for_check_condition(
 		return;
 
 	spin_lock_irq(&nacl->device_list_lock);
-	deve = &nacl->device_list[cmd->orig_fe_lun];
+	deve = nacl->device_list[cmd->orig_fe_lun];
 	if (!atomic_read(&deve->ua_count)) {
 		spin_unlock_irq(&nacl->device_list_lock);
 		return;
@@ -289,7 +289,7 @@ int core_scsi3_ua_clear_for_request_sense(
 		return -EINVAL;
 
 	spin_lock_irq(&nacl->device_list_lock);
-	deve = &nacl->device_list[cmd->orig_fe_lun];
+	deve = nacl->device_list[cmd->orig_fe_lun];
 	if (!atomic_read(&deve->ua_count)) {
 		spin_unlock_irq(&nacl->device_list_lock);
 		return -EPERM;

+ 1 - 1
include/target/target_core_base.h

@@ -629,7 +629,7 @@ struct se_node_acl {
 	spinlock_t		stats_lock;
 	/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
 	atomic_t		acl_pr_ref_count;
-	struct se_dev_entry	*device_list;
+	struct se_dev_entry	**device_list;
 	struct se_session	*nacl_sess;
 	struct se_portal_group *se_tpg;
 	spinlock_t		device_list_lock;