Browse Source

drm/radeon: protect concurrent smc register access with a spinlock

smc registers are access indirectly via the main mmio aperture, so
there may be problems with concurrent access.  This adds a spinlock
to protect access to this register space.

Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Alex Deucher 11 years ago
parent
commit
fe78118c46

+ 26 - 13
drivers/gpu/drm/radeon/ci_smc.c

@@ -47,10 +47,11 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev,
 			 u32 smc_start_address,
 			 const u8 *src, u32 byte_count, u32 limit)
 {
+	unsigned long flags;
 	u32 data, original_data;
 	u32 addr;
 	u32 extra_shift;
-	int ret;
+	int ret = 0;
 
 	if (smc_start_address & 3)
 		return -EINVAL;
@@ -59,13 +60,14 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev,
 
 	addr = smc_start_address;
 
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
 	while (byte_count >= 4) {
 		/* SMC address space is BE */
 		data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
 
 		ret = ci_set_smc_sram_address(rdev, addr, limit);
 		if (ret)
-			return ret;
+			goto done;
 
 		WREG32(SMC_IND_DATA_0, data);
 
@@ -80,7 +82,7 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev,
 
 		ret = ci_set_smc_sram_address(rdev, addr, limit);
 		if (ret)
-			return ret;
+			goto done;
 
 		original_data = RREG32(SMC_IND_DATA_0);
 
@@ -97,11 +99,15 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev,
 
 		ret = ci_set_smc_sram_address(rdev, addr, limit);
 		if (ret)
-			return ret;
+			goto done;
 
 		WREG32(SMC_IND_DATA_0, data);
 	}
-	return 0;
+
+done:
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
+
+	return ret;
 }
 
 void ci_start_smc(struct radeon_device *rdev)
@@ -197,6 +203,7 @@ PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev)
 
 int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
 {
+	unsigned long flags;
 	u32 ucode_start_address;
 	u32 ucode_size;
 	const u8 *src;
@@ -219,6 +226,7 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
 		return -EINVAL;
 
 	src = (const u8 *)rdev->smc_fw->data;
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
 	WREG32(SMC_IND_INDEX_0, ucode_start_address);
 	WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
 	while (ucode_size >= 4) {
@@ -231,6 +239,7 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
 		ucode_size -= 4;
 	}
 	WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
 
 	return 0;
 }
@@ -238,25 +247,29 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
 int ci_read_smc_sram_dword(struct radeon_device *rdev,
 			   u32 smc_address, u32 *value, u32 limit)
 {
+	unsigned long flags;
 	int ret;
 
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
 	ret = ci_set_smc_sram_address(rdev, smc_address, limit);
-	if (ret)
-		return ret;
+	if (ret == 0)
+		*value = RREG32(SMC_IND_DATA_0);
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
 
-	*value = RREG32(SMC_IND_DATA_0);
-	return 0;
+	return ret;
 }
 
 int ci_write_smc_sram_dword(struct radeon_device *rdev,
 			    u32 smc_address, u32 value, u32 limit)
 {
+	unsigned long flags;
 	int ret;
 
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
 	ret = ci_set_smc_sram_address(rdev, smc_address, limit);
-	if (ret)
-		return ret;
+	if (ret == 0)
+		WREG32(SMC_IND_DATA_0, value);
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
 
-	WREG32(SMC_IND_DATA_0, value);
-	return 0;
+	return ret;
 }

+ 9 - 0
drivers/gpu/drm/radeon/radeon.h

@@ -2110,6 +2110,8 @@ struct radeon_device {
 	resource_size_t			rmmio_size;
 	/* protects concurrent MM_INDEX/DATA based register access */
 	spinlock_t mmio_idx_lock;
+	/* protects concurrent SMC based register access */
+	spinlock_t smc_idx_lock;
 	void __iomem			*rmmio;
 	radeon_rreg_t			mc_rreg;
 	radeon_wreg_t			mc_wreg;
@@ -2292,17 +2294,24 @@ static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uin
 
 static inline u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg)
 {
+	unsigned long flags;
 	u32 r;
 
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
 	WREG32(TN_SMC_IND_INDEX_0, (reg));
 	r = RREG32(TN_SMC_IND_DATA_0);
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
 	return r;
 }
 
 static inline void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
 {
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
 	WREG32(TN_SMC_IND_INDEX_0, (reg));
 	WREG32(TN_SMC_IND_DATA_0, (v));
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
 }
 
 static inline u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg)

+ 1 - 0
drivers/gpu/drm/radeon/radeon_device.c

@@ -1249,6 +1249,7 @@ int radeon_device_init(struct radeon_device *rdev,
 	/* Registers mapping */
 	/* TODO: block userspace mapping of io register */
 	spin_lock_init(&rdev->mmio_idx_lock);
+	spin_lock_init(&rdev->smc_idx_lock);
 	if (rdev->family >= CHIP_BONAIRE) {
 		rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
 		rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);

+ 27 - 17
drivers/gpu/drm/radeon/rv770_smc.c

@@ -274,8 +274,8 @@ static const u8 cayman_smc_int_vectors[] =
 	0x08, 0x72, 0x08, 0x72
 };
 
-int rv770_set_smc_sram_address(struct radeon_device *rdev,
-			       u16 smc_address, u16 limit)
+static int rv770_set_smc_sram_address(struct radeon_device *rdev,
+				      u16 smc_address, u16 limit)
 {
 	u32 addr;
 
@@ -296,9 +296,10 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
 			    u16 smc_start_address, const u8 *src,
 			    u16 byte_count, u16 limit)
 {
+	unsigned long flags;
 	u32 data, original_data, extra_shift;
 	u16 addr;
-	int ret;
+	int ret = 0;
 
 	if (smc_start_address & 3)
 		return -EINVAL;
@@ -307,13 +308,14 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
 
 	addr = smc_start_address;
 
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
 	while (byte_count >= 4) {
 		/* SMC address space is BE */
 		data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
 
 		ret = rv770_set_smc_sram_address(rdev, addr, limit);
 		if (ret)
-			return ret;
+			goto done;
 
 		WREG32(SMC_SRAM_DATA, data);
 
@@ -328,7 +330,7 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
 
 		ret = rv770_set_smc_sram_address(rdev, addr, limit);
 		if (ret)
-			return ret;
+			goto done;
 
 		original_data = RREG32(SMC_SRAM_DATA);
 
@@ -346,12 +348,15 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
 
 		ret = rv770_set_smc_sram_address(rdev, addr, limit);
 		if (ret)
-			return ret;
+			goto done;
 
 		WREG32(SMC_SRAM_DATA, data);
 	}
 
-	return 0;
+done:
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
+
+	return ret;
 }
 
 static int rv770_program_interrupt_vectors(struct radeon_device *rdev,
@@ -461,12 +466,15 @@ PPSMC_Result rv770_wait_for_smc_inactive(struct radeon_device *rdev)
 
 static void rv770_clear_smc_sram(struct radeon_device *rdev, u16 limit)
 {
+	unsigned long flags;
 	u16 i;
 
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
 	for (i = 0;  i < limit; i += 4) {
 		rv770_set_smc_sram_address(rdev, i, limit);
 		WREG32(SMC_SRAM_DATA, 0);
 	}
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
 }
 
 int rv770_load_smc_ucode(struct radeon_device *rdev,
@@ -595,27 +603,29 @@ int rv770_load_smc_ucode(struct radeon_device *rdev,
 int rv770_read_smc_sram_dword(struct radeon_device *rdev,
 			      u16 smc_address, u32 *value, u16 limit)
 {
+	unsigned long flags;
 	int ret;
 
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
 	ret = rv770_set_smc_sram_address(rdev, smc_address, limit);
-	if (ret)
-		return ret;
-
-	*value = RREG32(SMC_SRAM_DATA);
+	if (ret == 0)
+		*value = RREG32(SMC_SRAM_DATA);
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
 
-	return 0;
+	return ret;
 }
 
 int rv770_write_smc_sram_dword(struct radeon_device *rdev,
 			       u16 smc_address, u32 value, u16 limit)
 {
+	unsigned long flags;
 	int ret;
 
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
 	ret = rv770_set_smc_sram_address(rdev, smc_address, limit);
-	if (ret)
-		return ret;
+	if (ret == 0)
+		WREG32(SMC_SRAM_DATA, value);
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
 
-	WREG32(SMC_SRAM_DATA, value);
-
-	return 0;
+	return ret;
 }

+ 0 - 2
drivers/gpu/drm/radeon/rv770_smc.h

@@ -187,8 +187,6 @@ typedef struct RV770_SMC_STATETABLE RV770_SMC_STATETABLE;
 #define RV770_SMC_SOFT_REGISTER_uvd_enabled             0x9C
 #define RV770_SMC_SOFT_REGISTER_is_asic_lombok          0xA0
 
-int rv770_set_smc_sram_address(struct radeon_device *rdev,
-			       u16 smc_address, u16 limit);
 int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
 			    u16 smc_start_address, const u8 *src,
 			    u16 byte_count, u16 limit);

+ 28 - 15
drivers/gpu/drm/radeon/si_smc.c

@@ -29,8 +29,8 @@
 #include "ppsmc.h"
 #include "radeon_ucode.h"
 
-int si_set_smc_sram_address(struct radeon_device *rdev,
-			    u32 smc_address, u32 limit)
+static int si_set_smc_sram_address(struct radeon_device *rdev,
+				   u32 smc_address, u32 limit)
 {
 	if (smc_address & 3)
 		return -EINVAL;
@@ -47,7 +47,8 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev,
 			 u32 smc_start_address,
 			 const u8 *src, u32 byte_count, u32 limit)
 {
-	int ret;
+	unsigned long flags;
+	int ret = 0;
 	u32 data, original_data, addr, extra_shift;
 
 	if (smc_start_address & 3)
@@ -57,13 +58,14 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev,
 
 	addr = smc_start_address;
 
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
 	while (byte_count >= 4) {
 		/* SMC address space is BE */
 		data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
 
 		ret = si_set_smc_sram_address(rdev, addr, limit);
 		if (ret)
-			return ret;
+			goto done;
 
 		WREG32(SMC_IND_DATA_0, data);
 
@@ -78,7 +80,7 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev,
 
 		ret = si_set_smc_sram_address(rdev, addr, limit);
 		if (ret)
-			return ret;
+			goto done;
 
 		original_data = RREG32(SMC_IND_DATA_0);
 
@@ -96,11 +98,15 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev,
 
 		ret = si_set_smc_sram_address(rdev, addr, limit);
 		if (ret)
-			return ret;
+			goto done;
 
 		WREG32(SMC_IND_DATA_0, data);
 	}
-	return 0;
+
+done:
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
+
+	return ret;
 }
 
 void si_start_smc(struct radeon_device *rdev)
@@ -203,6 +209,7 @@ PPSMC_Result si_wait_for_smc_inactive(struct radeon_device *rdev)
 
 int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
 {
+	unsigned long flags;
 	u32 ucode_start_address;
 	u32 ucode_size;
 	const u8 *src;
@@ -241,6 +248,7 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
 		return -EINVAL;
 
 	src = (const u8 *)rdev->smc_fw->data;
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
 	WREG32(SMC_IND_INDEX_0, ucode_start_address);
 	WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
 	while (ucode_size >= 4) {
@@ -253,6 +261,7 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
 		ucode_size -= 4;
 	}
 	WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
 
 	return 0;
 }
@@ -260,25 +269,29 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
 int si_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
 			   u32 *value, u32 limit)
 {
+	unsigned long flags;
 	int ret;
 
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
 	ret = si_set_smc_sram_address(rdev, smc_address, limit);
-	if (ret)
-		return ret;
+	if (ret == 0)
+		*value = RREG32(SMC_IND_DATA_0);
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
 
-	*value = RREG32(SMC_IND_DATA_0);
-	return 0;
+	return ret;
 }
 
 int si_write_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
 			    u32 value, u32 limit)
 {
+	unsigned long flags;
 	int ret;
 
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
 	ret = si_set_smc_sram_address(rdev, smc_address, limit);
-	if (ret)
-		return ret;
+	if (ret == 0)
+		WREG32(SMC_IND_DATA_0, value);
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
 
-	WREG32(SMC_IND_DATA_0, value);
-	return 0;
+	return ret;
 }