Browse Source

Merge branch 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (27 commits)
  drm/radeon/kms: remove rv100 bios connector quirk
  drm/radeon/kms/pm: fix power state indexing on igp chips in dynpm mode
  DRM / radeon / KMS: Fix hibernation regression related to radeon PM (was: Re: [Regression, post-2.6.34] Hibernation broken on machines with radeon/KMS and r300)
  drm/radeon/kms/igp: fix possible divide by 0 in bandwidth code (v2)
  drm/radeon: add quirk to make HP nx6125 laptop resume.
  drm/radeon/kms: add some missing regs to evergreen gpu init
  drm/radeon/kms: fix typos in evergreen command checker
  drm/radeon/kms: avoid oops on mac r4xx cards
  fb: fix colliding defines for fb flags.
  drm/radeon/kms: Force HDP_NONSURF to maximum size
  drm/radeon/kms: disable frac fb dividers for rs6xx
  drm/radeon/kms: don't read attempt to read bios from VRAM on unposted GPU.
  drm/radeon/kms: fix typo in evergreen_gpu_init
  drm/radeon/kms: return ret in cursor_set failure path
  drm/ttm: non pooled page allocation should have GFP_USER set
  drm/radeon/r100/r200: fix calculation of compressed cube maps
  drm/radeon/r200: handle more hw tex coord types
  drm/radeon/kms: CS checker texture fixes for r1xx/r2xx/r3xx
  drm/radeon: add fake RN50 table for powerpc
  drm/fb: Fix video= mode computation
  ...
Linus Torvalds 15 years ago
parent
commit
bf4f42b441

+ 2 - 4
drivers/char/agp/generic.c

@@ -97,20 +97,18 @@ EXPORT_SYMBOL(agp_flush_chipset);
 void agp_alloc_page_array(size_t size, struct agp_memory *mem)
 {
 	mem->pages = NULL;
-	mem->vmalloc_flag = false;
 
 	if (size <= 2*PAGE_SIZE)
-		mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NORETRY);
+		mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
 	if (mem->pages == NULL) {
 		mem->pages = vmalloc(size);
-		mem->vmalloc_flag = true;
 	}
 }
 EXPORT_SYMBOL(agp_alloc_page_array);
 
 void agp_free_page_array(struct agp_memory *mem)
 {
-	if (mem->vmalloc_flag) {
+	if (is_vmalloc_addr(mem->pages)) {
 		vfree(mem->pages);
 	} else {
 		kfree(mem->pages);

+ 13 - 6
drivers/gpu/drm/drm_fb_helper.c

@@ -146,7 +146,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_conn
 				cvt = 1;
 			break;
 		case 'R':
-			if (!cvt)
+			if (cvt)
 				rb = 1;
 			break;
 		case 'm':
@@ -1024,11 +1024,18 @@ static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_conne
 	}
 
 create_mode:
-	mode = drm_cvt_mode(fb_helper_conn->connector->dev, cmdline_mode->xres,
-			    cmdline_mode->yres,
-			    cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
-			    cmdline_mode->rb, cmdline_mode->interlace,
-			    cmdline_mode->margins);
+	if (cmdline_mode->cvt)
+		mode = drm_cvt_mode(fb_helper_conn->connector->dev,
+				    cmdline_mode->xres, cmdline_mode->yres,
+				    cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
+				    cmdline_mode->rb, cmdline_mode->interlace,
+				    cmdline_mode->margins);
+	else
+		mode = drm_gtf_mode(fb_helper_conn->connector->dev,
+				    cmdline_mode->xres, cmdline_mode->yres,
+				    cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
+				    cmdline_mode->interlace,
+				    cmdline_mode->margins);
 	drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
 	list_add(&mode->head, &fb_helper_conn->connector->modes);
 	return mode;

+ 1 - 1
drivers/gpu/drm/radeon/atombios_crtc.c

@@ -498,7 +498,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
 		if ((rdev->family == CHIP_RS600) ||
 		    (rdev->family == CHIP_RS690) ||
 		    (rdev->family == CHIP_RS740))
-			pll->flags |= (RADEON_PLL_USE_FRAC_FB_DIV |
+			pll->flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/
 				       RADEON_PLL_PREFER_CLOSEST_LOWER);
 
 		if (ASIC_IS_DCE32(rdev) && mode->clock > 200000)	/* range limits??? */

+ 29 - 6
drivers/gpu/drm/radeon/evergreen.c

@@ -607,7 +607,7 @@ static void evergreen_mc_program(struct radeon_device *rdev)
 	WREG32(MC_VM_FB_LOCATION, tmp);
 	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
 	WREG32(HDP_NONSURFACE_INFO, (2 << 7));
-	WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF);
+	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
 	if (rdev->flags & RADEON_IS_AGP) {
 		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
 		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
@@ -1222,11 +1222,11 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
 		ps_thread_count = 128;
 
 	sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
-	sq_thread_resource_mgmt |= NUM_VS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
-	sq_thread_resource_mgmt |= NUM_GS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
-	sq_thread_resource_mgmt |= NUM_ES_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
-	sq_thread_resource_mgmt_2 = NUM_HS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
-	sq_thread_resource_mgmt_2 |= NUM_LS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
+	sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
+	sq_thread_resource_mgmt |= NUM_GS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
+	sq_thread_resource_mgmt |= NUM_ES_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
+	sq_thread_resource_mgmt_2 = NUM_HS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
+	sq_thread_resource_mgmt_2 |= NUM_LS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
 
 	sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
 	sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
@@ -1260,6 +1260,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
 	WREG32(VGT_GS_VERTEX_REUSE, 16);
 	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
 
+	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
+	WREG32(VGT_OUT_DEALLOC_CNTL, 16);
+
 	WREG32(CB_PERF_CTR0_SEL_0, 0);
 	WREG32(CB_PERF_CTR0_SEL_1, 0);
 	WREG32(CB_PERF_CTR1_SEL_0, 0);
@@ -1269,6 +1272,26 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
 	WREG32(CB_PERF_CTR3_SEL_0, 0);
 	WREG32(CB_PERF_CTR3_SEL_1, 0);
 
+	/* clear render buffer base addresses */
+	WREG32(CB_COLOR0_BASE, 0);
+	WREG32(CB_COLOR1_BASE, 0);
+	WREG32(CB_COLOR2_BASE, 0);
+	WREG32(CB_COLOR3_BASE, 0);
+	WREG32(CB_COLOR4_BASE, 0);
+	WREG32(CB_COLOR5_BASE, 0);
+	WREG32(CB_COLOR6_BASE, 0);
+	WREG32(CB_COLOR7_BASE, 0);
+	WREG32(CB_COLOR8_BASE, 0);
+	WREG32(CB_COLOR9_BASE, 0);
+	WREG32(CB_COLOR10_BASE, 0);
+	WREG32(CB_COLOR11_BASE, 0);
+
+	/* set the shader const cache sizes to 0 */
+	for (i = SQ_ALU_CONST_BUFFER_SIZE_PS_0; i < 0x28200; i += 4)
+		WREG32(i, 0);
+	for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4)
+		WREG32(i, 0);
+
 	hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
 	WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
 

+ 2 - 2
drivers/gpu/drm/radeon/evergreen_cs.c

@@ -1197,7 +1197,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
 					DRM_ERROR("bad SET_RESOURCE (tex)\n");
 					return -EINVAL;
 				}
-				ib[idx+1+(i*8)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+				ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
 				if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
 					ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
 				else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
@@ -1209,7 +1209,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
 					DRM_ERROR("bad SET_RESOURCE (tex)\n");
 					return -EINVAL;
 				}
-				ib[idx+1+(i*8)+4] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+				ib[idx+1+(i*8)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
 				mipmap = reloc->robj;
 				r = evergreen_check_texture_resource(p,  idx+1+(i*8),
 						texture, mipmap);

+ 3 - 0
drivers/gpu/drm/radeon/evergreend.h

@@ -713,6 +713,9 @@
 #define SQ_GSVS_RING_OFFSET_2				0x28930
 #define SQ_GSVS_RING_OFFSET_3				0x28934
 
+#define SQ_ALU_CONST_BUFFER_SIZE_PS_0			0x28140
+#define SQ_ALU_CONST_BUFFER_SIZE_HS_0			0x28f80
+
 #define SQ_ALU_CONST_CACHE_PS_0				0x28940
 #define SQ_ALU_CONST_CACHE_PS_1				0x28944
 #define SQ_ALU_CONST_CACHE_PS_2				0x28948

+ 48 - 33
drivers/gpu/drm/radeon/r100.c

@@ -1628,6 +1628,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
 		case RADEON_TXFORMAT_RGB332:
 		case RADEON_TXFORMAT_Y8:
 			track->textures[i].cpp = 1;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
 			break;
 		case RADEON_TXFORMAT_AI88:
 		case RADEON_TXFORMAT_ARGB1555:
@@ -1639,12 +1640,14 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
 		case RADEON_TXFORMAT_LDUDV655:
 		case RADEON_TXFORMAT_DUDV88:
 			track->textures[i].cpp = 2;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
 			break;
 		case RADEON_TXFORMAT_ARGB8888:
 		case RADEON_TXFORMAT_RGBA8888:
 		case RADEON_TXFORMAT_SHADOW32:
 		case RADEON_TXFORMAT_LDUDUV8888:
 			track->textures[i].cpp = 4;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
 			break;
 		case RADEON_TXFORMAT_DXT1:
 			track->textures[i].cpp = 1;
@@ -2604,12 +2607,6 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg,
 	int surf_index = reg * 16;
 	int flags = 0;
 
-	/* r100/r200 divide by 16 */
-	if (rdev->family < CHIP_R300)
-		flags = pitch / 16;
-	else
-		flags = pitch / 8;
-
 	if (rdev->family <= CHIP_RS200) {
 		if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
 				 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
@@ -2633,6 +2630,20 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg,
 	if (tiling_flags & RADEON_TILING_SWAP_32BIT)
 		flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP;
 
+	/* when we aren't tiling the pitch seems to needs to be furtherdivided down. - tested on power5 + rn50 server */
+	if (tiling_flags & (RADEON_TILING_SWAP_16BIT | RADEON_TILING_SWAP_32BIT)) {
+		if (!(tiling_flags & (RADEON_TILING_MACRO | RADEON_TILING_MICRO)))
+			if (ASIC_IS_RN50(rdev))
+				pitch /= 16;
+	}
+
+	/* r100/r200 divide by 16 */
+	if (rdev->family < CHIP_R300)
+		flags |= pitch / 16;
+	else
+		flags |= pitch / 8;
+
+
 	DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
 	WREG32(RADEON_SURFACE0_INFO + surf_index, flags);
 	WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset);
@@ -3147,33 +3158,6 @@ static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
 	DRM_ERROR("compress format            %d\n", t->compress_format);
 }
 
-static int r100_cs_track_cube(struct radeon_device *rdev,
-			      struct r100_cs_track *track, unsigned idx)
-{
-	unsigned face, w, h;
-	struct radeon_bo *cube_robj;
-	unsigned long size;
-
-	for (face = 0; face < 5; face++) {
-		cube_robj = track->textures[idx].cube_info[face].robj;
-		w = track->textures[idx].cube_info[face].width;
-		h = track->textures[idx].cube_info[face].height;
-
-		size = w * h;
-		size *= track->textures[idx].cpp;
-
-		size += track->textures[idx].cube_info[face].offset;
-
-		if (size > radeon_bo_size(cube_robj)) {
-			DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
-				  size, radeon_bo_size(cube_robj));
-			r100_cs_track_texture_print(&track->textures[idx]);
-			return -1;
-		}
-	}
-	return 0;
-}
-
 static int r100_track_compress_size(int compress_format, int w, int h)
 {
 	int block_width, block_height, block_bytes;
@@ -3204,6 +3188,37 @@ static int r100_track_compress_size(int compress_format, int w, int h)
 	return sz;
 }
 
+static int r100_cs_track_cube(struct radeon_device *rdev,
+			      struct r100_cs_track *track, unsigned idx)
+{
+	unsigned face, w, h;
+	struct radeon_bo *cube_robj;
+	unsigned long size;
+	unsigned compress_format = track->textures[idx].compress_format;
+
+	for (face = 0; face < 5; face++) {
+		cube_robj = track->textures[idx].cube_info[face].robj;
+		w = track->textures[idx].cube_info[face].width;
+		h = track->textures[idx].cube_info[face].height;
+
+		if (compress_format) {
+			size = r100_track_compress_size(compress_format, w, h);
+		} else
+			size = w * h;
+		size *= track->textures[idx].cpp;
+
+		size += track->textures[idx].cube_info[face].offset;
+
+		if (size > radeon_bo_size(cube_robj)) {
+			DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
+				  size, radeon_bo_size(cube_robj));
+			r100_cs_track_texture_print(&track->textures[idx]);
+			return -1;
+		}
+	}
+	return 0;
+}
+
 static int r100_cs_track_texture_check(struct radeon_device *rdev,
 				       struct r100_cs_track *track)
 {

+ 5 - 0
drivers/gpu/drm/radeon/r200.c

@@ -415,6 +415,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
 		/* 2D, 3D, CUBE */
 		switch (tmp) {
 		case 0:
+		case 3:
+		case 4:
 		case 5:
 		case 6:
 		case 7:
@@ -450,6 +452,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
 		case R200_TXFORMAT_RGB332:
 		case R200_TXFORMAT_Y8:
 			track->textures[i].cpp = 1;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
 			break;
 		case R200_TXFORMAT_AI88:
 		case R200_TXFORMAT_ARGB1555:
@@ -461,6 +464,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
 		case R200_TXFORMAT_DVDU88:
 		case R200_TXFORMAT_AVYU4444:
 			track->textures[i].cpp = 2;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
 			break;
 		case R200_TXFORMAT_ARGB8888:
 		case R200_TXFORMAT_RGBA8888:
@@ -468,6 +472,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
 		case R200_TXFORMAT_BGR111110:
 		case R200_TXFORMAT_LDVDU8888:
 			track->textures[i].cpp = 4;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
 			break;
 		case R200_TXFORMAT_DXT1:
 			track->textures[i].cpp = 1;

+ 5 - 0
drivers/gpu/drm/radeon/r300.c

@@ -881,6 +881,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
 		case R300_TX_FORMAT_Y4X4:
 		case R300_TX_FORMAT_Z3Y3X2:
 			track->textures[i].cpp = 1;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
 			break;
 		case R300_TX_FORMAT_X16:
 		case R300_TX_FORMAT_Y8X8:
@@ -892,6 +893,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
 		case R300_TX_FORMAT_B8G8_B8G8:
 		case R300_TX_FORMAT_G8R8_G8B8:
 			track->textures[i].cpp = 2;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
 			break;
 		case R300_TX_FORMAT_Y16X16:
 		case R300_TX_FORMAT_Z11Y11X10:
@@ -902,14 +904,17 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
 		case R300_TX_FORMAT_FL_I32:
 		case 0x1e:
 			track->textures[i].cpp = 4;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
 			break;
 		case R300_TX_FORMAT_W16Z16Y16X16:
 		case R300_TX_FORMAT_FL_R16G16B16A16:
 		case R300_TX_FORMAT_FL_I32A32:
 			track->textures[i].cpp = 8;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
 			break;
 		case R300_TX_FORMAT_FL_R32G32B32A32:
 			track->textures[i].cpp = 16;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
 			break;
 		case R300_TX_FORMAT_DXT1:
 			track->textures[i].cpp = 1;

+ 12 - 5
drivers/gpu/drm/radeon/r600.c

@@ -130,9 +130,14 @@ void r600_pm_get_dynpm_state(struct radeon_device *rdev)
 							break;
 						}
 					}
-				} else
-					rdev->pm.requested_power_state_index =
-						rdev->pm.current_power_state_index - 1;
+				} else {
+					if (rdev->pm.current_power_state_index == 0)
+						rdev->pm.requested_power_state_index =
+							rdev->pm.num_power_states - 1;
+					else
+						rdev->pm.requested_power_state_index =
+							rdev->pm.current_power_state_index - 1;
+				}
 			}
 			rdev->pm.requested_clock_mode_index = 0;
 			/* don't use the power state if crtcs are active and no display flag is set */
@@ -1097,7 +1102,7 @@ static void r600_mc_program(struct radeon_device *rdev)
 	WREG32(MC_VM_FB_LOCATION, tmp);
 	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
 	WREG32(HDP_NONSURFACE_INFO, (2 << 7));
-	WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF);
+	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
 	if (rdev->flags & RADEON_IS_AGP) {
 		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
 		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
@@ -1219,8 +1224,10 @@ int r600_mc_init(struct radeon_device *rdev)
 	rdev->mc.visible_vram_size = rdev->mc.aper_size;
 	r600_vram_gtt_location(rdev, &rdev->mc);
 
-	if (rdev->flags & RADEON_IS_IGP)
+	if (rdev->flags & RADEON_IS_IGP) {
+		rs690_pm_info(rdev);
 		rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+	}
 	radeon_update_bandwidth_info(rdev);
 	return 0;
 }

+ 3 - 1
drivers/gpu/drm/radeon/radeon.h

@@ -177,6 +177,7 @@ void radeon_pm_resume(struct radeon_device *rdev);
 void radeon_combios_get_power_modes(struct radeon_device *rdev);
 void radeon_atombios_get_power_modes(struct radeon_device *rdev);
 void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level);
+void rs690_pm_info(struct radeon_device *rdev);
 
 /*
  * Fences.
@@ -619,7 +620,8 @@ enum radeon_dynpm_state {
 	DYNPM_STATE_DISABLED,
 	DYNPM_STATE_MINIMUM,
 	DYNPM_STATE_PAUSED,
-	DYNPM_STATE_ACTIVE
+	DYNPM_STATE_ACTIVE,
+	DYNPM_STATE_SUSPENDED,
 };
 enum radeon_dynpm_action {
 	DYNPM_ACTION_NONE,

+ 7 - 0
drivers/gpu/drm/radeon/radeon_asic.c

@@ -780,6 +780,13 @@ int radeon_asic_init(struct radeon_device *rdev)
 	case CHIP_R423:
 	case CHIP_RV410:
 		rdev->asic = &r420_asic;
+		/* handle macs */
+		if (rdev->bios == NULL) {
+			rdev->asic->get_engine_clock = &radeon_legacy_get_engine_clock;
+			rdev->asic->set_engine_clock = &radeon_legacy_set_engine_clock;
+			rdev->asic->get_memory_clock = &radeon_legacy_get_memory_clock;
+			rdev->asic->set_memory_clock = NULL;
+		}
 		break;
 	case CHIP_RS400:
 	case CHIP_RS480:

+ 4 - 0
drivers/gpu/drm/radeon/radeon_bios.c

@@ -48,6 +48,10 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev)
 	resource_size_t vram_base;
 	resource_size_t size = 256 * 1024; /* ??? */
 
+	if (!(rdev->flags & RADEON_IS_IGP))
+		if (!radeon_card_posted(rdev))
+			return false;
+
 	rdev->bios = NULL;
 	vram_base = drm_get_resource_start(rdev->ddev, 0);
 	bios = ioremap(vram_base, size);

+ 40 - 9
drivers/gpu/drm/radeon/radeon_combios.c

@@ -1411,6 +1411,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
 			rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT;
 		} else
 #endif /* CONFIG_PPC_PMAC */
+#ifdef CONFIG_PPC64
+		if (ASIC_IS_RN50(rdev))
+			rdev->mode_info.connector_table = CT_RN50_POWER;
+		else
+#endif
 			rdev->mode_info.connector_table = CT_GENERIC;
 	}
 
@@ -1853,6 +1858,33 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
 					    CONNECTOR_OBJECT_ID_SVIDEO,
 					    &hpd);
 		break;
+	case CT_RN50_POWER:
+		DRM_INFO("Connector Table: %d (rn50-power)\n",
+			 rdev->mode_info.connector_table);
+		/* VGA - primary dac */
+		ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_id(dev,
+								ATOM_DEVICE_CRT1_SUPPORT,
+								1),
+					  ATOM_DEVICE_CRT1_SUPPORT);
+		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT,
+					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_VGA,
+					    &hpd);
+		ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_id(dev,
+								ATOM_DEVICE_CRT2_SUPPORT,
+								2),
+					  ATOM_DEVICE_CRT2_SUPPORT);
+		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
+					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_VGA,
+					    &hpd);
+		break;
 	default:
 		DRM_INFO("Connector table: %d (invalid)\n",
 			 rdev->mode_info.connector_table);
@@ -1906,15 +1938,6 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev,
 			return false;
 	}
 
-	/* Some RV100 cards with 2 VGA ports show up with DVI+VGA */
-	if (dev->pdev->device == 0x5159 &&
-	    dev->pdev->subsystem_vendor == 0x1002 &&
-	    dev->pdev->subsystem_device == 0x013a) {
-		if (*legacy_connector == CONNECTOR_DVI_I_LEGACY)
-			*legacy_connector = CONNECTOR_CRT_LEGACY;
-
-	}
-
 	/* X300 card with extra non-existent DVI port */
 	if (dev->pdev->device == 0x5B60 &&
 	    dev->pdev->subsystem_vendor == 0x17af &&
@@ -3019,6 +3042,14 @@ void radeon_combios_asic_init(struct drm_device *dev)
 		combios_write_ram_size(dev);
 	}
 
+	/* quirk for rs4xx HP nx6125 laptop to make it resume
+	 * - it hangs on resume inside the dynclk 1 table.
+	 */
+	if (rdev->family == CHIP_RS480 &&
+	    rdev->pdev->subsystem_vendor == 0x103c &&
+	    rdev->pdev->subsystem_device == 0x308b)
+		return;
+
 	/* DYN CLK 1 */
 	table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
 	if (table)

+ 1 - 1
drivers/gpu/drm/radeon/radeon_cursor.c

@@ -194,7 +194,7 @@ unpin:
 fail:
 	drm_gem_object_unreference_unlocked(obj);
 
-	return 0;
+	return ret;
 }
 
 int radeon_crtc_cursor_move(struct drm_crtc *crtc,

+ 7 - 0
drivers/gpu/drm/radeon/radeon_device.c

@@ -779,6 +779,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
 
 int radeon_resume_kms(struct drm_device *dev)
 {
+	struct drm_connector *connector;
 	struct radeon_device *rdev = dev->dev_private;
 
 	if (rdev->powered_down)
@@ -797,6 +798,12 @@ int radeon_resume_kms(struct drm_device *dev)
 	radeon_resume(rdev);
 	radeon_pm_resume(rdev);
 	radeon_restore_bios_scratch_regs(rdev);
+
+	/* turn on display hw */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+	}
+
 	radeon_fbdev_set_suspend(rdev, 0);
 	release_console_sem();
 

+ 2 - 2
drivers/gpu/drm/radeon/radeon_encoders.c

@@ -1072,6 +1072,8 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
 	if (is_dig) {
 		switch (mode) {
 		case DRM_MODE_DPMS_ON:
+			if (!ASIC_IS_DCE4(rdev))
+				atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
 			if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
 				struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
 
@@ -1079,8 +1081,6 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
 				if (ASIC_IS_DCE4(rdev))
 					atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON);
 			}
-			if (!ASIC_IS_DCE4(rdev))
-				atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
 			break;
 		case DRM_MODE_DPMS_STANDBY:
 		case DRM_MODE_DPMS_SUSPEND:

+ 9 - 13
drivers/gpu/drm/radeon/radeon_legacy_encoders.c

@@ -928,16 +928,14 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder,
 	if (ASIC_IS_R300(rdev)) {
 		gpiopad_a = RREG32(RADEON_GPIOPAD_A) | 1;
 		disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL);
-	}
-
-	if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev))
-		disp_tv_out_cntl = RREG32(RADEON_DISP_TV_OUT_CNTL);
-	else
+	} else if (rdev->family != CHIP_R200)
 		disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
-
-	if (rdev->family == CHIP_R200)
+	else if (rdev->family == CHIP_R200)
 		fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
 
+	if (rdev->family >= CHIP_R200)
+		disp_tv_out_cntl = RREG32(RADEON_DISP_TV_OUT_CNTL);
+
 	if (is_tv) {
 		uint32_t dac_cntl;
 
@@ -1002,15 +1000,13 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder,
 	if (ASIC_IS_R300(rdev)) {
 		WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1);
 		WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
-	}
+	} else if (rdev->family != CHIP_R200)
+		WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
+	else if (rdev->family == CHIP_R200)
+		WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
 
 	if (rdev->family >= CHIP_R200)
 		WREG32(RADEON_DISP_TV_OUT_CNTL, disp_tv_out_cntl);
-	else
-		WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
-
-	if (rdev->family == CHIP_R200)
-		WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
 
 	if (is_tv)
 		radeon_legacy_tv_mode_set(encoder, mode, adjusted_mode);

+ 1 - 0
drivers/gpu/drm/radeon/radeon_mode.h

@@ -206,6 +206,7 @@ enum radeon_connector_table {
 	CT_MINI_INTERNAL,
 	CT_IMAC_G5_ISIGHT,
 	CT_EMAC,
+	CT_RN50_POWER,
 };
 
 enum radeon_dvo_chip {

+ 34 - 7
drivers/gpu/drm/radeon/radeon_pm.c

@@ -397,13 +397,20 @@ static ssize_t radeon_set_pm_method(struct device *dev,
 		rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
 		mutex_unlock(&rdev->pm.mutex);
 	} else if (strncmp("profile", buf, strlen("profile")) == 0) {
+		bool flush_wq = false;
+
 		mutex_lock(&rdev->pm.mutex);
-		rdev->pm.pm_method = PM_METHOD_PROFILE;
+		if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+			cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+			flush_wq = true;
+		}
 		/* disable dynpm */
 		rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
 		rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
-		cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+		rdev->pm.pm_method = PM_METHOD_PROFILE;
 		mutex_unlock(&rdev->pm.mutex);
+		if (flush_wq)
+			flush_workqueue(rdev->wq);
 	} else {
 		DRM_ERROR("invalid power method!\n");
 		goto fail;
@@ -418,9 +425,18 @@ static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon
 
 void radeon_pm_suspend(struct radeon_device *rdev)
 {
+	bool flush_wq = false;
+
 	mutex_lock(&rdev->pm.mutex);
-	cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+	if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+		cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+		if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
+			rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
+		flush_wq = true;
+	}
 	mutex_unlock(&rdev->pm.mutex);
+	if (flush_wq)
+		flush_workqueue(rdev->wq);
 }
 
 void radeon_pm_resume(struct radeon_device *rdev)
@@ -432,6 +448,12 @@ void radeon_pm_resume(struct radeon_device *rdev)
 	rdev->pm.current_sclk = rdev->clock.default_sclk;
 	rdev->pm.current_mclk = rdev->clock.default_mclk;
 	rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+	if (rdev->pm.pm_method == PM_METHOD_DYNPM
+	    && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
+		rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
+		queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
+					msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+	}
 	mutex_unlock(&rdev->pm.mutex);
 	radeon_pm_compute_clocks(rdev);
 }
@@ -486,6 +508,8 @@ int radeon_pm_init(struct radeon_device *rdev)
 void radeon_pm_fini(struct radeon_device *rdev)
 {
 	if (rdev->pm.num_power_states > 1) {
+		bool flush_wq = false;
+
 		mutex_lock(&rdev->pm.mutex);
 		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
 			rdev->pm.profile = PM_PROFILE_DEFAULT;
@@ -493,13 +517,16 @@ void radeon_pm_fini(struct radeon_device *rdev)
 			radeon_pm_set_clocks(rdev);
 		} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
 			/* cancel work */
-			cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
+			cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+			flush_wq = true;
 			/* reset default clocks */
 			rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
 			rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
 			radeon_pm_set_clocks(rdev);
 		}
 		mutex_unlock(&rdev->pm.mutex);
+		if (flush_wq)
+			flush_workqueue(rdev->wq);
 
 		device_remove_file(rdev->dev, &dev_attr_power_profile);
 		device_remove_file(rdev->dev, &dev_attr_power_method);
@@ -720,12 +747,12 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work)
 			radeon_pm_get_dynpm_state(rdev);
 			radeon_pm_set_clocks(rdev);
 		}
+
+		queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
+					msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
 	}
 	mutex_unlock(&rdev->pm.mutex);
 	ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
-
-	queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
-					msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
 }
 
 /*

+ 5 - 5
drivers/gpu/drm/radeon/reg_srcs/evergreen

@@ -80,8 +80,8 @@ evergreen 0x9400
 0x00028010 DB_RENDER_OVERRIDE2
 0x00028028 DB_STENCIL_CLEAR
 0x0002802C DB_DEPTH_CLEAR
-0x00028034 PA_SC_SCREEN_SCISSOR_BR
 0x00028030 PA_SC_SCREEN_SCISSOR_TL
+0x00028034 PA_SC_SCREEN_SCISSOR_BR
 0x0002805C DB_DEPTH_SLICE
 0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
 0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
@@ -460,8 +460,8 @@ evergreen 0x9400
 0x00028844 SQ_PGM_RESOURCES_PS
 0x00028848 SQ_PGM_RESOURCES_2_PS
 0x0002884C SQ_PGM_EXPORTS_PS
-0x0002885C SQ_PGM_RESOURCES_VS
-0x00028860 SQ_PGM_RESOURCES_2_VS
+0x00028860 SQ_PGM_RESOURCES_VS
+0x00028864 SQ_PGM_RESOURCES_2_VS
 0x00028878 SQ_PGM_RESOURCES_GS
 0x0002887C SQ_PGM_RESOURCES_2_GS
 0x00028890 SQ_PGM_RESOURCES_ES
@@ -469,8 +469,8 @@ evergreen 0x9400
 0x000288A8 SQ_PGM_RESOURCES_FS
 0x000288BC SQ_PGM_RESOURCES_HS
 0x000288C0 SQ_PGM_RESOURCES_2_HS
-0x000288D0 SQ_PGM_RESOURCES_LS
-0x000288D4 SQ_PGM_RESOURCES_2_LS
+0x000288D4 SQ_PGM_RESOURCES_LS
+0x000288D8 SQ_PGM_RESOURCES_2_LS
 0x000288E8 SQ_LDS_ALLOC
 0x000288EC SQ_LDS_ALLOC_PS
 0x000288F0 SQ_VTX_SEMANTIC_CLEAR

+ 20 - 21
drivers/gpu/drm/radeon/rs690.c

@@ -79,7 +79,13 @@ void rs690_pm_info(struct radeon_device *rdev)
 			tmp.full = dfixed_const(100);
 			rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock);
 			rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
-			rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
+			if (info->info.usK8MemoryClock)
+				rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
+			else if (rdev->clock.default_mclk) {
+				rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
+				rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
+			} else
+				rdev->pm.igp_system_mclk.full = dfixed_const(400);
 			rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock));
 			rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth);
 			break;
@@ -87,34 +93,31 @@ void rs690_pm_info(struct radeon_device *rdev)
 			tmp.full = dfixed_const(100);
 			rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock);
 			rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
-			rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock);
+			if (info->info_v2.ulBootUpUMAClock)
+				rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock);
+			else if (rdev->clock.default_mclk)
+				rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
+			else
+				rdev->pm.igp_system_mclk.full = dfixed_const(66700);
 			rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
 			rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq);
 			rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp);
 			rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
 			break;
 		default:
-			tmp.full = dfixed_const(100);
 			/* We assume the slower possible clock ie worst case */
-			/* DDR 333Mhz */
-			rdev->pm.igp_sideport_mclk.full = dfixed_const(333);
-			/* FIXME: system clock ? */
-			rdev->pm.igp_system_mclk.full = dfixed_const(100);
-			rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
-			rdev->pm.igp_ht_link_clk.full = dfixed_const(200);
+			rdev->pm.igp_sideport_mclk.full = dfixed_const(200);
+			rdev->pm.igp_system_mclk.full = dfixed_const(200);
+			rdev->pm.igp_ht_link_clk.full = dfixed_const(1000);
 			rdev->pm.igp_ht_link_width.full = dfixed_const(8);
 			DRM_ERROR("No integrated system info for your GPU, using safe default\n");
 			break;
 		}
 	} else {
-		tmp.full = dfixed_const(100);
 		/* We assume the slower possible clock ie worst case */
-		/* DDR 333Mhz */
-		rdev->pm.igp_sideport_mclk.full = dfixed_const(333);
-		/* FIXME: system clock ? */
-		rdev->pm.igp_system_mclk.full = dfixed_const(100);
-		rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
-		rdev->pm.igp_ht_link_clk.full = dfixed_const(200);
+		rdev->pm.igp_sideport_mclk.full = dfixed_const(200);
+		rdev->pm.igp_system_mclk.full = dfixed_const(200);
+		rdev->pm.igp_ht_link_clk.full = dfixed_const(1000);
 		rdev->pm.igp_ht_link_width.full = dfixed_const(8);
 		DRM_ERROR("No integrated system info for your GPU, using safe default\n");
 	}
@@ -228,10 +231,6 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
 	fixed20_12 a, b, c;
 	fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
 	fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
-	/* FIXME: detect IGP with sideport memory, i don't think there is any
-	 * such product available
-	 */
-	bool sideport = false;
 
 	if (!crtc->base.enabled) {
 		/* FIXME: wouldn't it better to set priority mark to maximum */
@@ -300,7 +299,7 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
 
 	/* Maximun bandwidth is the minimun bandwidth of all component */
 	rdev->pm.max_bandwidth = rdev->pm.core_bandwidth;
-	if (sideport) {
+	if (rdev->mc.igp_sideport_enabled) {
 		if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
 			rdev->pm.sideport_bandwidth.full)
 			rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;

+ 1 - 1
drivers/gpu/drm/radeon/rv770.c

@@ -224,7 +224,7 @@ static void rv770_mc_program(struct radeon_device *rdev)
 	WREG32(MC_VM_FB_LOCATION, tmp);
 	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
 	WREG32(HDP_NONSURFACE_INFO, (2 << 7));
-	WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF);
+	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
 	if (rdev->flags & RADEON_IS_AGP) {
 		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
 		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);

+ 1 - 1
drivers/gpu/drm/ttm/ttm_page_alloc.c

@@ -667,7 +667,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
 {
 	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
 	struct page *p = NULL;
-	int gfp_flags = 0;
+	int gfp_flags = GFP_USER;
 	int r;
 
 	/* set zero flag for page allocation if required */

+ 0 - 1
include/linux/agp_backend.h

@@ -79,7 +79,6 @@ struct agp_memory {
 	u32 physical;
 	bool is_bound;
 	bool is_flushed;
-	bool vmalloc_flag;
 	/* list of agp_memory mapped to the aperture */
 	struct list_head mapped_list;
 	/* DMA-mapped addresses */

+ 2 - 2
include/linux/fb.h

@@ -786,8 +786,6 @@ struct fb_tile_ops {
 #define FBINFO_MISC_USEREVENT          0x10000 /* event request
 						  from userspace */
 #define FBINFO_MISC_TILEBLITTING       0x20000 /* use tile blitting */
-#define FBINFO_MISC_FIRMWARE           0x40000 /* a replaceable firmware
-						  inited framebuffer */
 
 /* A driver may set this flag to indicate that it does want a set_par to be
  * called every time when fbcon_switch is executed. The advantage is that with
@@ -801,6 +799,8 @@ struct fb_tile_ops {
  */
 #define FBINFO_MISC_ALWAYS_SETPAR   0x40000
 
+/* where the fb is a firmware driver, and can be replaced with a proper one */
+#define FBINFO_MISC_FIRMWARE        0x80000
 /*
  * Host and GPU endianness differ.
  */