Răsfoiți Sursa

Merge branch 'bugfixes' into nfs-for-2.6.38

Conflicts:
	fs/nfs/nfs2xdr.c
	fs/nfs/nfs3xdr.c
	fs/nfs/nfs4xdr.c
Trond Myklebust 14 ani în urmă
părinte
comite
68c404b18f
100 a modificat fișierele cu 875 adăugiri și 703 ștergeri
  1. 1 0
      Documentation/accounting/getdelays.c
  2. 100 112
      Documentation/filesystems/Locking
  3. 1 6
      Documentation/kernel-parameters.txt
  4. 2 2
      Documentation/power/runtime_pm.txt
  5. 31 28
      Documentation/scsi/scsi_mid_low_api.txt
  6. 10 1
      Documentation/trace/postprocess/trace-vmscan-postprocess.pl
  7. 18 3
      MAINTAINERS
  8. 1 1
      Makefile
  9. 1 0
      arch/arm/common/it8152.c
  10. 1 0
      arch/arm/include/asm/hardware/it8152.h
  11. 0 3
      arch/arm/include/asm/highmem.h
  12. 3 3
      arch/arm/include/asm/sizes.h
  13. 1 0
      arch/arm/include/asm/system.h
  14. 6 0
      arch/arm/kernel/entry-common.S
  15. 0 1
      arch/arm/kernel/smp.c
  16. 1 1
      arch/arm/mach-at91/Makefile
  17. 3 95
      arch/arm/mach-at91/board-pcontrol-g20.c
  18. 43 39
      arch/arm/mach-at91/board-stamp9g20.c
  19. 1 1
      arch/arm/mach-at91/clock.c
  20. 2 0
      arch/arm/mach-at91/include/mach/at91_mci.h
  21. 7 0
      arch/arm/mach-at91/include/mach/stamp9g20.h
  22. 1 1
      arch/arm/mach-ixp4xx/common-pci.c
  23. 1 0
      arch/arm/mach-pxa/Kconfig
  24. 2 2
      arch/arm/mach-pxa/sleep.S
  25. 7 0
      arch/arm/mach-s3c2412/Kconfig
  26. 2 1
      arch/arm/mach-s3c2412/Makefile
  27. 1 0
      arch/arm/mach-s3c2416/Kconfig
  28. 6 0
      arch/arm/mach-s5pv210/mach-aquila.c
  29. 6 0
      arch/arm/mach-s5pv210/mach-goni.c
  30. 26 4
      arch/arm/mach-shmobile/include/mach/entry-macro.S
  31. 1 1
      arch/arm/mach-shmobile/include/mach/vmalloc.h
  32. 19 18
      arch/arm/mm/cache-feroceon-l2.c
  33. 21 36
      arch/arm/mm/cache-xsc3l2.c
  34. 4 3
      arch/arm/mm/dma-mapping.c
  35. 4 3
      arch/arm/mm/flush.c
  36. 0 87
      arch/arm/mm/highmem.c
  37. 1 1
      arch/arm/plat-s3c24xx/Kconfig
  38. 24 14
      arch/mips/Kconfig
  39. 2 0
      arch/mips/alchemy/common/platform.c
  40. 2 3
      arch/mips/alchemy/devboards/prom.c
  41. 3 6
      arch/mips/ar7/clock.c
  42. 3 0
      arch/mips/ar7/time.c
  43. 106 47
      arch/mips/bcm47xx/setup.c
  44. 2 2
      arch/mips/include/asm/cpu.h
  45. 6 2
      arch/mips/include/asm/elf.h
  46. 10 2
      arch/mips/include/asm/io.h
  47. 1 2
      arch/mips/include/asm/mach-ar7/ar7.h
  48. 7 0
      arch/mips/include/asm/mach-bcm47xx/nvram.h
  49. 2 2
      arch/mips/jz4740/board-qi_lb60.c
  50. 1 1
      arch/mips/jz4740/platform.c
  51. 1 1
      arch/mips/jz4740/prom.c
  52. 1 1
      arch/mips/kernel/cevt-r4k.c
  53. 2 5
      arch/mips/kernel/cpu-probe.c
  54. 7 6
      arch/mips/kernel/linux32.c
  55. 0 1
      arch/mips/kernel/process.c
  56. 1 1
      arch/mips/kernel/prom.c
  57. 1 1
      arch/mips/kernel/smp-mt.c
  58. 35 9
      arch/mips/kernel/traps.c
  59. 6 8
      arch/mips/kernel/vpe.c
  60. 2 2
      arch/mips/lib/memset.S
  61. 2 2
      arch/mips/loongson/common/env.c
  62. 95 21
      arch/mips/math-emu/cp1emu.c
  63. 3 1
      arch/mips/mm/dma-default.c
  64. 4 0
      arch/mips/mm/sc-mips.c
  65. 10 2
      arch/mips/pmc-sierra/yosemite/py-console.c
  66. 4 4
      arch/mips/sibyte/swarm/setup.c
  67. 1 1
      arch/mn10300/kernel/irq.c
  68. 3 7
      arch/mn10300/kernel/time.c
  69. 1 0
      arch/powerpc/platforms/52xx/mpc52xx_gpt.c
  70. 1 1
      arch/sh/boards/mach-se/7206/irq.c
  71. 1 1
      arch/sh/kernel/cpu/sh2a/clock-sh7201.c
  72. 1 2
      arch/sh/kernel/cpu/sh4/clock-sh4-202.c
  73. 1 1
      arch/tile/include/asm/signal.h
  74. 3 3
      arch/tile/kernel/compat_signal.c
  75. 21 3
      arch/tile/kernel/intvec_32.S
  76. 8 0
      arch/tile/kernel/process.c
  77. 4 6
      arch/tile/kernel/signal.c
  78. 1 1
      arch/x86/boot/compressed/misc.c
  79. 3 0
      arch/x86/include/asm/e820.h
  80. 1 1
      arch/x86/include/asm/kvm_host.h
  81. 1 0
      arch/x86/kernel/Makefile
  82. 8 0
      arch/x86/kernel/apic/apic.c
  83. 2 2
      arch/x86/kernel/apic/io_apic.c
  84. 0 7
      arch/x86/kernel/apic/probe_64.c
  85. 7 5
      arch/x86/kernel/head_32.S
  86. 16 10
      arch/x86/kernel/hpet.c
  87. 5 11
      arch/x86/kernel/microcode_intel.c
  88. 48 0
      arch/x86/kernel/resource.c
  89. 14 4
      arch/x86/kernel/setup.c
  90. 2 1
      arch/x86/kernel/xsave.c
  91. 2 0
      arch/x86/kvm/i8259.c
  92. 2 1
      arch/x86/kvm/mmu.c
  93. 4 0
      arch/x86/kvm/svm.c
  94. 0 5
      arch/x86/kvm/vmx.c
  95. 5 6
      arch/x86/kvm/x86.c
  96. 5 0
      arch/x86/kvm/x86.h
  97. 16 8
      arch/x86/oprofile/op_model_amd.c
  98. 5 13
      arch/x86/pci/i386.c
  99. 2 2
      arch/x86/vdso/Makefile
  100. 3 2
      block/blk-map.c

+ 1 - 0
Documentation/accounting/getdelays.c

@@ -516,6 +516,7 @@ int main(int argc, char *argv[])
 			default:
 			default:
 				fprintf(stderr, "Unknown nla_type %d\n",
 				fprintf(stderr, "Unknown nla_type %d\n",
 					na->nla_type);
 					na->nla_type);
+			case TASKSTATS_TYPE_NULL:
 				break;
 				break;
 			}
 			}
 			na = (struct nlattr *) (GENLMSG_DATA(&msg) + len);
 			na = (struct nlattr *) (GENLMSG_DATA(&msg) + len);

+ 100 - 112
Documentation/filesystems/Locking

@@ -18,7 +18,6 @@ prototypes:
 	char *(*d_dname)((struct dentry *dentry, char *buffer, int buflen);
 	char *(*d_dname)((struct dentry *dentry, char *buffer, int buflen);
 
 
 locking rules:
 locking rules:
-	none have BKL
 		dcache_lock	rename_lock	->d_lock	may block
 		dcache_lock	rename_lock	->d_lock	may block
 d_revalidate:	no		no		no		yes
 d_revalidate:	no		no		no		yes
 d_hash		no		no		no		yes
 d_hash		no		no		no		yes
@@ -42,18 +41,23 @@ ata *);
 	int (*rename) (struct inode *, struct dentry *,
 	int (*rename) (struct inode *, struct dentry *,
 			struct inode *, struct dentry *);
 			struct inode *, struct dentry *);
 	int (*readlink) (struct dentry *, char __user *,int);
 	int (*readlink) (struct dentry *, char __user *,int);
-	int (*follow_link) (struct dentry *, struct nameidata *);
+	void * (*follow_link) (struct dentry *, struct nameidata *);
+	void (*put_link) (struct dentry *, struct nameidata *, void *);
 	void (*truncate) (struct inode *);
 	void (*truncate) (struct inode *);
 	int (*permission) (struct inode *, int, struct nameidata *);
 	int (*permission) (struct inode *, int, struct nameidata *);
+	int (*check_acl)(struct inode *, int);
 	int (*setattr) (struct dentry *, struct iattr *);
 	int (*setattr) (struct dentry *, struct iattr *);
 	int (*getattr) (struct vfsmount *, struct dentry *, struct kstat *);
 	int (*getattr) (struct vfsmount *, struct dentry *, struct kstat *);
 	int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
 	int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
 	ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
 	ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
 	ssize_t (*listxattr) (struct dentry *, char *, size_t);
 	ssize_t (*listxattr) (struct dentry *, char *, size_t);
 	int (*removexattr) (struct dentry *, const char *);
 	int (*removexattr) (struct dentry *, const char *);
+	void (*truncate_range)(struct inode *, loff_t, loff_t);
+	long (*fallocate)(struct inode *inode, int mode, loff_t offset, loff_t len);
+	int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len);
 
 
 locking rules:
 locking rules:
-	all may block, none have BKL
+	all may block
 		i_mutex(inode)
 		i_mutex(inode)
 lookup:		yes
 lookup:		yes
 create:		yes
 create:		yes
@@ -66,19 +70,24 @@ rmdir:		yes (both)	(see below)
 rename:		yes (all)	(see below)
 rename:		yes (all)	(see below)
 readlink:	no
 readlink:	no
 follow_link:	no
 follow_link:	no
+put_link:	no
 truncate:	yes		(see below)
 truncate:	yes		(see below)
 setattr:	yes
 setattr:	yes
 permission:	no
 permission:	no
+check_acl:	no
 getattr:	no
 getattr:	no
 setxattr:	yes
 setxattr:	yes
 getxattr:	no
 getxattr:	no
 listxattr:	no
 listxattr:	no
 removexattr:	yes
 removexattr:	yes
+truncate_range:	yes
+fallocate:	no
+fiemap:		no
 	Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on
 	Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on
 victim.
 victim.
 	cross-directory ->rename() has (per-superblock) ->s_vfs_rename_sem.
 	cross-directory ->rename() has (per-superblock) ->s_vfs_rename_sem.
 	->truncate() is never called directly - it's a callback, not a
 	->truncate() is never called directly - it's a callback, not a
-method. It's called by vmtruncate() - library function normally used by
+method. It's called by vmtruncate() - deprecated library function used by
 ->setattr(). Locking information above applies to that call (i.e. is
 ->setattr(). Locking information above applies to that call (i.e. is
 inherited from ->setattr() - vmtruncate() is used when ATTR_SIZE had been
 inherited from ->setattr() - vmtruncate() is used when ATTR_SIZE had been
 passed).
 passed).
@@ -91,7 +100,7 @@ prototypes:
 	struct inode *(*alloc_inode)(struct super_block *sb);
 	struct inode *(*alloc_inode)(struct super_block *sb);
 	void (*destroy_inode)(struct inode *);
 	void (*destroy_inode)(struct inode *);
 	void (*dirty_inode) (struct inode *);
 	void (*dirty_inode) (struct inode *);
-	int (*write_inode) (struct inode *, int);
+	int (*write_inode) (struct inode *, struct writeback_control *wbc);
 	int (*drop_inode) (struct inode *);
 	int (*drop_inode) (struct inode *);
 	void (*evict_inode) (struct inode *);
 	void (*evict_inode) (struct inode *);
 	void (*put_super) (struct super_block *);
 	void (*put_super) (struct super_block *);
@@ -105,10 +114,10 @@ prototypes:
 	int (*show_options)(struct seq_file *, struct vfsmount *);
 	int (*show_options)(struct seq_file *, struct vfsmount *);
 	ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
 	ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
 	ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
 	ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
+	int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
 
 
 locking rules:
 locking rules:
 	All may block [not true, see below]
 	All may block [not true, see below]
-	None have BKL
 			s_umount
 			s_umount
 alloc_inode:
 alloc_inode:
 destroy_inode:
 destroy_inode:
@@ -127,6 +136,7 @@ umount_begin:		no
 show_options:		no		(namespace_sem)
 show_options:		no		(namespace_sem)
 quota_read:		no		(see below)
 quota_read:		no		(see below)
 quota_write:		no		(see below)
 quota_write:		no		(see below)
+bdev_try_to_free_page:	no		(see below)
 
 
 ->statfs() has s_umount (shared) when called by ustat(2) (native or
 ->statfs() has s_umount (shared) when called by ustat(2) (native or
 compat), but that's an accident of bad API; s_umount is used to pin
 compat), but that's an accident of bad API; s_umount is used to pin
@@ -139,19 +149,25 @@ be the only ones operating on the quota file by the quota code (via
 dqio_sem) (unless an admin really wants to screw up something and
 dqio_sem) (unless an admin really wants to screw up something and
 writes to quota files with quotas on). For other details about locking
 writes to quota files with quotas on). For other details about locking
 see also dquot_operations section.
 see also dquot_operations section.
+->bdev_try_to_free_page is called from the ->releasepage handler of
+the block device inode.  See there for more details.
 
 
 --------------------------- file_system_type ---------------------------
 --------------------------- file_system_type ---------------------------
 prototypes:
 prototypes:
 	int (*get_sb) (struct file_system_type *, int,
 	int (*get_sb) (struct file_system_type *, int,
 		       const char *, void *, struct vfsmount *);
 		       const char *, void *, struct vfsmount *);
+	struct dentry *(*mount) (struct file_system_type *, int,
+		       const char *, void *);
 	void (*kill_sb) (struct super_block *);
 	void (*kill_sb) (struct super_block *);
 locking rules:
 locking rules:
-		may block	BKL
-get_sb		yes		no
-kill_sb		yes		no
+		may block
+get_sb		yes
+mount		yes
+kill_sb		yes
 
 
 ->get_sb() returns error or 0 with locked superblock attached to the vfsmount
 ->get_sb() returns error or 0 with locked superblock attached to the vfsmount
 (exclusive on ->s_umount).
 (exclusive on ->s_umount).
+->mount() returns ERR_PTR or the root dentry.
 ->kill_sb() takes a write-locked superblock, does all shutdown work on it,
 ->kill_sb() takes a write-locked superblock, does all shutdown work on it,
 unlocks and drops the reference.
 unlocks and drops the reference.
 
 
@@ -176,27 +192,35 @@ prototypes:
 	void (*freepage)(struct page *);
 	void (*freepage)(struct page *);
 	int (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
 	int (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
 			loff_t offset, unsigned long nr_segs);
 			loff_t offset, unsigned long nr_segs);
-	int (*launder_page) (struct page *);
+	int (*get_xip_mem)(struct address_space *, pgoff_t, int, void **,
+				unsigned long *);
+	int (*migratepage)(struct address_space *, struct page *, struct page *);
+	int (*launder_page)(struct page *);
+	int (*is_partially_uptodate)(struct page *, read_descriptor_t *, unsigned long);
+	int (*error_remove_page)(struct address_space *, struct page *);
 
 
 locking rules:
 locking rules:
 	All except set_page_dirty and freepage may block
 	All except set_page_dirty and freepage may block
 
 
-			BKL	PageLocked(page)	i_mutex
-writepage:		no	yes, unlocks (see below)
-readpage:		no	yes, unlocks
-sync_page:		no	maybe
-writepages:		no
-set_page_dirty		no	no
-readpages:		no
-write_begin:		no	locks the page		yes
-write_end:		no	yes, unlocks		yes
-perform_write:		no	n/a			yes
-bmap:			no
-invalidatepage:		no	yes
-releasepage:		no	yes
-freepage:		no	yes
-direct_IO:		no
-launder_page:		no	yes
+			PageLocked(page)	i_mutex
+writepage:		yes, unlocks (see below)
+readpage:		yes, unlocks
+sync_page:		maybe
+writepages:
+set_page_dirty		no
+readpages:
+write_begin:		locks the page		yes
+write_end:		yes, unlocks		yes
+bmap:
+invalidatepage:		yes
+releasepage:		yes
+freepage:		yes
+direct_IO:
+get_xip_mem:					maybe
+migratepage:		yes (both)
+launder_page:		yes
+is_partially_uptodate:	yes
+error_remove_page:	yes
 
 
 	->write_begin(), ->write_end(), ->sync_page() and ->readpage()
 	->write_begin(), ->write_end(), ->sync_page() and ->readpage()
 may be called from the request handler (/dev/loop).
 may be called from the request handler (/dev/loop).
@@ -276,9 +300,8 @@ under spinlock (it cannot block) and is sometimes called with the page
 not locked.
 not locked.
 
 
 	->bmap() is currently used by legacy ioctl() (FIBMAP) provided by some
 	->bmap() is currently used by legacy ioctl() (FIBMAP) provided by some
-filesystems and by the swapper. The latter will eventually go away. All
-instances do not actually need the BKL. Please, keep it that way and don't
-breed new callers.
+filesystems and by the swapper. The latter will eventually go away.  Please,
+keep it that way and don't breed new callers.
 
 
 	->invalidatepage() is called when the filesystem must attempt to drop
 	->invalidatepage() is called when the filesystem must attempt to drop
 some or all of the buffers from the page when it is being truncated.  It
 some or all of the buffers from the page when it is being truncated.  It
@@ -299,47 +322,37 @@ cleaned, or an error value if not. Note that in order to prevent the page
 getting mapped back in and redirtied, it needs to be kept locked
 getting mapped back in and redirtied, it needs to be kept locked
 across the entire operation.
 across the entire operation.
 
 
-	Note: currently almost all instances of address_space methods are
-using BKL for internal serialization and that's one of the worst sources
-of contention. Normally they are calling library functions (in fs/buffer.c)
-and pass foo_get_block() as a callback (on local block-based filesystems,
-indeed). BKL is not needed for library stuff and is usually taken by
-foo_get_block(). It's an overkill, since block bitmaps can be protected by
-internal fs locking and real critical areas are much smaller than the areas
-filesystems protect now.
-
 ----------------------- file_lock_operations ------------------------------
 ----------------------- file_lock_operations ------------------------------
 prototypes:
 prototypes:
-	void (*fl_insert)(struct file_lock *);	/* lock insertion callback */
-	void (*fl_remove)(struct file_lock *);	/* lock removal callback */
 	void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
 	void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
 	void (*fl_release_private)(struct file_lock *);
 	void (*fl_release_private)(struct file_lock *);
 
 
 
 
 locking rules:
 locking rules:
-			BKL	may block
-fl_insert:		yes	no
-fl_remove:		yes	no
-fl_copy_lock:		yes	no
-fl_release_private:	yes	yes
+			file_lock_lock	may block
+fl_copy_lock:		yes		no
+fl_release_private:	maybe		no
 
 
 ----------------------- lock_manager_operations ---------------------------
 ----------------------- lock_manager_operations ---------------------------
 prototypes:
 prototypes:
 	int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
 	int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
 	void (*fl_notify)(struct file_lock *);  /* unblock callback */
 	void (*fl_notify)(struct file_lock *);  /* unblock callback */
+	int (*fl_grant)(struct file_lock *, struct file_lock *, int);
 	void (*fl_release_private)(struct file_lock *);
 	void (*fl_release_private)(struct file_lock *);
 	void (*fl_break)(struct file_lock *); /* break_lease callback */
 	void (*fl_break)(struct file_lock *); /* break_lease callback */
+	int (*fl_mylease)(struct file_lock *, struct file_lock *);
+	int (*fl_change)(struct file_lock **, int);
 
 
 locking rules:
 locking rules:
-			BKL	may block
-fl_compare_owner:	yes	no
-fl_notify:		yes	no
-fl_release_private:	yes	yes
-fl_break:		yes	no
-
-	Currently only NFSD and NLM provide instances of this class. None of the
-them block. If you have out-of-tree instances - please, show up. Locking
-in that area will change.
+			file_lock_lock	may block
+fl_compare_owner:	yes		no
+fl_notify:		yes		no
+fl_grant:		no		no
+fl_release_private:	maybe		no
+fl_break:		yes		no
+fl_mylease:		yes		no
+fl_change		yes		no
+
 --------------------------- buffer_head -----------------------------------
 --------------------------- buffer_head -----------------------------------
 prototypes:
 prototypes:
 	void (*b_end_io)(struct buffer_head *bh, int uptodate);
 	void (*b_end_io)(struct buffer_head *bh, int uptodate);
@@ -364,17 +377,17 @@ prototypes:
 	void (*swap_slot_free_notify) (struct block_device *, unsigned long);
 	void (*swap_slot_free_notify) (struct block_device *, unsigned long);
 
 
 locking rules:
 locking rules:
-			BKL	bd_mutex
-open:			no	yes
-release:		no	yes
-ioctl:			no	no
-compat_ioctl:		no	no
-direct_access:		no	no
-media_changed:		no	no
-unlock_native_capacity:	no	no
-revalidate_disk:	no	no
-getgeo:			no	no
-swap_slot_free_notify:	no	no	(see below)
+			bd_mutex
+open:			yes
+release:		yes
+ioctl:			no
+compat_ioctl:		no
+direct_access:		no
+media_changed:		no
+unlock_native_capacity:	no
+revalidate_disk:	no
+getgeo:			no
+swap_slot_free_notify:	no	(see below)
 
 
 media_changed, unlock_native_capacity and revalidate_disk are called only from
 media_changed, unlock_native_capacity and revalidate_disk are called only from
 check_disk_change().
 check_disk_change().
@@ -413,34 +426,21 @@ prototypes:
 	unsigned long (*get_unmapped_area)(struct file *, unsigned long,
 	unsigned long (*get_unmapped_area)(struct file *, unsigned long,
 			unsigned long, unsigned long, unsigned long);
 			unsigned long, unsigned long, unsigned long);
 	int (*check_flags)(int);
 	int (*check_flags)(int);
+	int (*flock) (struct file *, int, struct file_lock *);
+	ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *,
+			size_t, unsigned int);
+	ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *,
+			size_t, unsigned int);
+	int (*setlease)(struct file *, long, struct file_lock **);
 };
 };
 
 
 locking rules:
 locking rules:
-	All may block.
-			BKL
-llseek:			no	(see below)
-read:			no
-aio_read:		no
-write:			no
-aio_write:		no
-readdir: 		no
-poll:			no
-unlocked_ioctl:		no
-compat_ioctl:		no
-mmap:			no
-open:			no
-flush:			no
-release:		no
-fsync:			no	(see below)
-aio_fsync:		no
-fasync:			no
-lock:			yes
-readv:			no
-writev:			no
-sendfile:		no
-sendpage:		no
-get_unmapped_area:	no
-check_flags:		no
+	All may block except for ->setlease.
+	No VFS locks held on entry except for ->fsync and ->setlease.
+
+->fsync() has i_mutex on inode.
+
+->setlease has the file_list_lock held and must not sleep.
 
 
 ->llseek() locking has moved from llseek to the individual llseek
 ->llseek() locking has moved from llseek to the individual llseek
 implementations.  If your fs is not using generic_file_llseek, you
 implementations.  If your fs is not using generic_file_llseek, you
@@ -450,17 +450,10 @@ mutex or just to use i_size_read() instead.
 Note: this does not protect the file->f_pos against concurrent modifications
 Note: this does not protect the file->f_pos against concurrent modifications
 since this is something the userspace has to take care about.
 since this is something the userspace has to take care about.
 
 
-Note: ext2_release() was *the* source of contention on fs-intensive
-loads and dropping BKL on ->release() helps to get rid of that (we still
-grab BKL for cases when we close a file that had been opened r/w, but that
-can and should be done using the internal locking with smaller critical areas).
-Current worst offender is ext2_get_block()...
-
-->fasync() is called without BKL protection, and is responsible for
-maintaining the FASYNC bit in filp->f_flags.  Most instances call
-fasync_helper(), which does that maintenance, so it's not normally
-something one needs to worry about.  Return values > 0 will be mapped to
-zero in the VFS layer.
+->fasync() is responsible for maintaining the FASYNC bit in filp->f_flags.
+Most instances call fasync_helper(), which does that maintenance, so it's
+not normally something one needs to worry about.  Return values > 0 will be
+mapped to zero in the VFS layer.
 
 
 ->readdir() and ->ioctl() on directories must be changed. Ideally we would
 ->readdir() and ->ioctl() on directories must be changed. Ideally we would
 move ->readdir() to inode_operations and use a separate method for directory
 move ->readdir() to inode_operations and use a separate method for directory
@@ -471,8 +464,6 @@ components. And there are other reasons why the current interface is a mess...
 ->read on directories probably must go away - we should just enforce -EISDIR
 ->read on directories probably must go away - we should just enforce -EISDIR
 in sys_read() and friends.
 in sys_read() and friends.
 
 
-->fsync() has i_mutex on inode.
-
 --------------------------- dquot_operations -------------------------------
 --------------------------- dquot_operations -------------------------------
 prototypes:
 prototypes:
 	int (*write_dquot) (struct dquot *);
 	int (*write_dquot) (struct dquot *);
@@ -507,12 +498,12 @@ prototypes:
 	int (*access)(struct vm_area_struct *, unsigned long, void*, int, int);
 	int (*access)(struct vm_area_struct *, unsigned long, void*, int, int);
 
 
 locking rules:
 locking rules:
-		BKL	mmap_sem	PageLocked(page)
-open:		no	yes
-close:		no	yes
-fault:		no	yes		can return with page locked
-page_mkwrite:	no	yes		can return with page locked
-access:		no	yes
+		mmap_sem	PageLocked(page)
+open:		yes
+close:		yes
+fault:		yes		can return with page locked
+page_mkwrite:	yes		can return with page locked
+access:		yes
 
 
 	->fault() is called when a previously not present pte is about
 	->fault() is called when a previously not present pte is about
 to be faulted in. The filesystem must find and return the page associated
 to be faulted in. The filesystem must find and return the page associated
@@ -539,6 +530,3 @@ VM_IO | VM_PFNMAP VMAs.
 
 
 (if you break something or notice that it is broken and do not fix it yourself
 (if you break something or notice that it is broken and do not fix it yourself
 - at least put it here)
 - at least put it here)
-
-ipc/shm.c::shm_delete() - may need BKL.
-->read() and ->write() in many drivers are (probably) missing BKL.

+ 1 - 6
Documentation/kernel-parameters.txt

@@ -1759,7 +1759,7 @@ and is between 256 and 4096 characters. It is defined in the file
 
 
 	nousb		[USB] Disable the USB subsystem
 	nousb		[USB] Disable the USB subsystem
 
 
-	nowatchdog	[KNL] Disable the lockup detector.
+	nowatchdog	[KNL] Disable the lockup detector (NMI watchdog).
 
 
 	nowb		[ARM]
 	nowb		[ARM]
 
 
@@ -2175,11 +2175,6 @@ and is between 256 and 4096 characters. It is defined in the file
 	reset_devices	[KNL] Force drivers to reset the underlying device
 	reset_devices	[KNL] Force drivers to reset the underlying device
 			during initialization.
 			during initialization.
 
 
-	resource_alloc_from_bottom
-			Allocate new resources from the beginning of available
-			space, not the end.  If you need to use this, please
-			report a bug.
-
 	resume=		[SWSUSP]
 	resume=		[SWSUSP]
 			Specify the partition device for software suspend
 			Specify the partition device for software suspend
 
 

+ 2 - 2
Documentation/power/runtime_pm.txt

@@ -379,8 +379,8 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
       zero)
       zero)
 
 
   bool pm_runtime_suspended(struct device *dev);
   bool pm_runtime_suspended(struct device *dev);
-    - return true if the device's runtime PM status is 'suspended', or false
-      otherwise
+    - return true if the device's runtime PM status is 'suspended' and its
+      'power.disable_depth' field is equal to zero, or false otherwise
 
 
   void pm_runtime_allow(struct device *dev);
   void pm_runtime_allow(struct device *dev);
     - set the power.runtime_auto flag for the device and decrease its usage
     - set the power.runtime_auto flag for the device and decrease its usage

+ 31 - 28
Documentation/scsi/scsi_mid_low_api.txt

@@ -1044,9 +1044,9 @@ Details:
 
 
 
 
 /**
 /**
- *      queuecommand - queue scsi command, invoke 'done' on completion
+ *      queuecommand - queue scsi command, invoke scp->scsi_done on completion
+ *      @shost: pointer to the scsi host object
  *      @scp: pointer to scsi command object
  *      @scp: pointer to scsi command object
- *      @done: function pointer to be invoked on completion
  *
  *
  *      Returns 0 on success.
  *      Returns 0 on success.
  *
  *
@@ -1074,42 +1074,45 @@ Details:
  *
  *
  *      Other types of errors that are detected immediately may be
  *      Other types of errors that are detected immediately may be
  *      flagged by setting scp->result to an appropriate value,
  *      flagged by setting scp->result to an appropriate value,
- *      invoking the 'done' callback, and then returning 0 from this
- *      function. If the command is not performed immediately (and the
- *      LLD is starting (or will start) the given command) then this
- *      function should place 0 in scp->result and return 0.
+ *      invoking the scp->scsi_done callback, and then returning 0
+ *      from this function. If the command is not performed
+ *      immediately (and the LLD is starting (or will start) the given
+ *      command) then this function should place 0 in scp->result and
+ *      return 0.
  *
  *
  *      Command ownership.  If the driver returns zero, it owns the
  *      Command ownership.  If the driver returns zero, it owns the
- *      command and must take responsibility for ensuring the 'done'
- *      callback is executed.  Note: the driver may call done before
- *      returning zero, but after it has called done, it may not
- *      return any value other than zero.  If the driver makes a
- *      non-zero return, it must not execute the command's done
- *      callback at any time.
- *
- *      Locks: struct Scsi_Host::host_lock held on entry (with "irqsave")
- *             and is expected to be held on return.
+ *      command and must take responsibility for ensuring the
+ *      scp->scsi_done callback is executed.  Note: the driver may
+ *      call scp->scsi_done before returning zero, but after it has
+ *      called scp->scsi_done, it may not return any value other than
+ *      zero.  If the driver makes a non-zero return, it must not
+ *      execute the command's scsi_done callback at any time.
+ *
+ *      Locks: up to and including 2.6.36, struct Scsi_Host::host_lock
+ *             held on entry (with "irqsave") and is expected to be
+ *             held on return. From 2.6.37 onwards, queuecommand is
+ *             called without any locks held.
  *
  *
  *      Calling context: in interrupt (soft irq) or process context
  *      Calling context: in interrupt (soft irq) or process context
  *
  *
- *      Notes: This function should be relatively fast. Normally it will
- *      not wait for IO to complete. Hence the 'done' callback is invoked 
- *      (often directly from an interrupt service routine) some time after
- *      this function has returned. In some cases (e.g. pseudo adapter 
- *      drivers that manufacture the response to a SCSI INQUIRY)
- *      the 'done' callback may be invoked before this function returns.
- *      If the 'done' callback is not invoked within a certain period
- *      the SCSI mid level will commence error processing.
- *      If a status of CHECK CONDITION is placed in "result" when the
- *      'done' callback is invoked, then the LLD driver should 
- *      perform autosense and fill in the struct scsi_cmnd::sense_buffer
+ *      Notes: This function should be relatively fast. Normally it
+ *      will not wait for IO to complete. Hence the scp->scsi_done
+ *      callback is invoked (often directly from an interrupt service
+ *      routine) some time after this function has returned. In some
+ *      cases (e.g. pseudo adapter drivers that manufacture the
+ *      response to a SCSI INQUIRY) the scp->scsi_done callback may be
+ *      invoked before this function returns.  If the scp->scsi_done
+ *      callback is not invoked within a certain period the SCSI mid
+ *      level will commence error processing.  If a status of CHECK
+ *      CONDITION is placed in "result" when the scp->scsi_done
+ *      callback is invoked, then the LLD driver should perform
+ *      autosense and fill in the struct scsi_cmnd::sense_buffer
  *      array. The scsi_cmnd::sense_buffer array is zeroed prior to
  *      array. The scsi_cmnd::sense_buffer array is zeroed prior to
  *      the mid level queuing a command to an LLD.
  *      the mid level queuing a command to an LLD.
  *
  *
  *      Defined in: LLD
  *      Defined in: LLD
  **/
  **/
-    int queuecommand(struct scsi_cmnd * scp, 
-                     void (*done)(struct scsi_cmnd *))
+    int queuecommand(struct Scsi_Host *shost, struct scsi_cmnd * scp)
 
 
 
 
 /**
 /**

+ 10 - 1
Documentation/trace/postprocess/trace-vmscan-postprocess.pl

@@ -373,9 +373,18 @@ EVENT_PROCESS:
 				print "         $regex_lru_isolate/o\n";
 				print "         $regex_lru_isolate/o\n";
 				next;
 				next;
 			}
 			}
+			my $isolate_mode = $1;
 			my $nr_scanned = $4;
 			my $nr_scanned = $4;
 			my $nr_contig_dirty = $7;
 			my $nr_contig_dirty = $7;
-			$perprocesspid{$process_pid}->{HIGH_NR_SCANNED} += $nr_scanned;
+
+			# To closer match vmstat scanning statistics, only count isolate_both
+			# and isolate_inactive as scanning. isolate_active is rotation
+			# isolate_inactive == 0
+			# isolate_active   == 1
+			# isolate_both     == 2
+			if ($isolate_mode != 1) {
+				$perprocesspid{$process_pid}->{HIGH_NR_SCANNED} += $nr_scanned;
+			}
 			$perprocesspid{$process_pid}->{HIGH_NR_CONTIG_DIRTY} += $nr_contig_dirty;
 			$perprocesspid{$process_pid}->{HIGH_NR_CONTIG_DIRTY} += $nr_contig_dirty;
 		} elsif ($tracepoint eq "mm_vmscan_lru_shrink_inactive") {
 		} elsif ($tracepoint eq "mm_vmscan_lru_shrink_inactive") {
 			$details = $5;
 			$details = $5;

+ 18 - 3
MAINTAINERS

@@ -405,7 +405,7 @@ S:	Supported
 F:	drivers/usb/gadget/amd5536udc.*
 F:	drivers/usb/gadget/amd5536udc.*
 
 
 AMD GEODE PROCESSOR/CHIPSET SUPPORT
 AMD GEODE PROCESSOR/CHIPSET SUPPORT
-P:	Jordan Crouse
+P:	Andres Salomon <dilinger@queued.net>
 L:	linux-geode@lists.infradead.org (moderated for non-subscribers)
 L:	linux-geode@lists.infradead.org (moderated for non-subscribers)
 W:	http://www.amd.com/us-en/ConnectivitySolutions/TechnicalResources/0,,50_2334_2452_11363,00.html
 W:	http://www.amd.com/us-en/ConnectivitySolutions/TechnicalResources/0,,50_2334_2452_11363,00.html
 S:	Supported
 S:	Supported
@@ -792,11 +792,14 @@ S:	Maintained
 
 
 ARM/NOMADIK ARCHITECTURE
 ARM/NOMADIK ARCHITECTURE
 M:	Alessandro Rubini <rubini@unipv.it>
 M:	Alessandro Rubini <rubini@unipv.it>
+M:	Linus Walleij <linus.walleij@stericsson.com>
 M:	STEricsson <STEricsson_nomadik_linux@list.st.com>
 M:	STEricsson <STEricsson_nomadik_linux@list.st.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 S:	Maintained
 F:	arch/arm/mach-nomadik/
 F:	arch/arm/mach-nomadik/
 F:	arch/arm/plat-nomadik/
 F:	arch/arm/plat-nomadik/
+F:	drivers/i2c/busses/i2c-nomadik.c
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git
 
 
 ARM/OPENMOKO NEO FREERUNNER (GTA02) MACHINE SUPPORT
 ARM/OPENMOKO NEO FREERUNNER (GTA02) MACHINE SUPPORT
 M:	Nelson Castillo <arhuaco@freaks-unidos.net>
 M:	Nelson Castillo <arhuaco@freaks-unidos.net>
@@ -998,12 +1001,24 @@ F:	drivers/i2c/busses/i2c-stu300.c
 F:	drivers/rtc/rtc-coh901331.c
 F:	drivers/rtc/rtc-coh901331.c
 F:	drivers/watchdog/coh901327_wdt.c
 F:	drivers/watchdog/coh901327_wdt.c
 F:	drivers/dma/coh901318*
 F:	drivers/dma/coh901318*
+F:	drivers/mfd/ab3100*
+F:	drivers/rtc/rtc-ab3100.c
+F:	drivers/rtc/rtc-coh901331.c
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git
 
 
-ARM/U8500 ARM ARCHITECTURE
+ARM/Ux500 ARM ARCHITECTURE
 M:	Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
 M:	Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
+M:	Linus Walleij <linus.walleij@stericsson.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 S:	Maintained
 F:	arch/arm/mach-ux500/
 F:	arch/arm/mach-ux500/
+F:	drivers/dma/ste_dma40*
+F:	drivers/mfd/ab3550*
+F:	drivers/mfd/abx500*
+F:	drivers/mfd/ab8500*
+F:	drivers/mfd/stmpe*
+F:	drivers/rtc/rtc-ab8500.c
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git
 
 
 ARM/VFP SUPPORT
 ARM/VFP SUPPORT
 M:	Russell King <linux@arm.linux.org.uk>
 M:	Russell King <linux@arm.linux.org.uk>
@@ -4590,7 +4605,7 @@ F:	drivers/pcmcia/
 F:	include/pcmcia/
 F:	include/pcmcia/
 
 
 PCNET32 NETWORK DRIVER
 PCNET32 NETWORK DRIVER
-M:	Don Fry <pcnet32@verizon.net>
+M:	Don Fry <pcnet32@frontier.com>
 L:	netdev@vger.kernel.org
 L:	netdev@vger.kernel.org
 S:	Maintained
 S:	Maintained
 F:	drivers/net/pcnet32.c
 F:	drivers/net/pcnet32.c

+ 1 - 1
Makefile

@@ -1,7 +1,7 @@
 VERSION = 2
 VERSION = 2
 PATCHLEVEL = 6
 PATCHLEVEL = 6
 SUBLEVEL = 37
 SUBLEVEL = 37
-EXTRAVERSION = -rc6
+EXTRAVERSION =
 NAME = Flesh-Eating Bats with Fangs
 NAME = Flesh-Eating Bats with Fangs
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*

+ 1 - 0
arch/arm/common/it8152.c

@@ -352,3 +352,4 @@ struct pci_bus * __init it8152_pci_scan_bus(int nr, struct pci_sys_data *sys)
 	return pci_scan_bus(nr, &it8152_ops, sys);
 	return pci_scan_bus(nr, &it8152_ops, sys);
 }
 }
 
 
+EXPORT_SYMBOL(dma_set_coherent_mask);

+ 1 - 0
arch/arm/include/asm/hardware/it8152.h

@@ -76,6 +76,7 @@ extern unsigned long it8152_base_address;
   IT8152_PD_IRQ(0)  Audio controller (ACR)
   IT8152_PD_IRQ(0)  Audio controller (ACR)
  */
  */
 #define IT8152_IRQ(x)   (IRQ_BOARD_START + (x))
 #define IT8152_IRQ(x)   (IRQ_BOARD_START + (x))
+#define IT8152_LAST_IRQ	(IRQ_BOARD_START + 40)
 
 
 /* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */
 /* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */
 #define IT8152_LD_IRQ_COUNT     9
 #define IT8152_LD_IRQ_COUNT     9

+ 0 - 3
arch/arm/include/asm/highmem.h

@@ -25,9 +25,6 @@ extern void *kmap_high(struct page *page);
 extern void *kmap_high_get(struct page *page);
 extern void *kmap_high_get(struct page *page);
 extern void kunmap_high(struct page *page);
 extern void kunmap_high(struct page *page);
 
 
-extern void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte);
-extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte);
-
 /*
 /*
  * The following functions are already defined by <linux/highmem.h>
  * The following functions are already defined by <linux/highmem.h>
  * when CONFIG_HIGHMEM is not set.
  * when CONFIG_HIGHMEM is not set.

+ 3 - 3
arch/arm/include/asm/sizes.h

@@ -13,9 +13,6 @@
  * along with this program; if not, write to the Free Software
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
  */
-/* DO NOT EDIT!! - this file automatically generated
- *                 from .s file by awk -f s2h.awk
- */
 /*  Size definitions
 /*  Size definitions
  *  Copyright (C) ARM Limited 1998. All rights reserved.
  *  Copyright (C) ARM Limited 1998. All rights reserved.
  */
  */
@@ -25,6 +22,9 @@
 
 
 /* handy sizes */
 /* handy sizes */
 #define SZ_16				0x00000010
 #define SZ_16				0x00000010
+#define SZ_32				0x00000020
+#define SZ_64				0x00000040
+#define SZ_128				0x00000080
 #define SZ_256				0x00000100
 #define SZ_256				0x00000100
 #define SZ_512				0x00000200
 #define SZ_512				0x00000200
 
 

+ 1 - 0
arch/arm/include/asm/system.h

@@ -150,6 +150,7 @@ extern unsigned int user_debug;
 #define rmb()		dmb()
 #define rmb()		dmb()
 #define wmb()		mb()
 #define wmb()		mb()
 #else
 #else
+#include <asm/memory.h>
 #define mb()	do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
 #define mb()	do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
 #define rmb()	do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
 #define rmb()	do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
 #define wmb()	do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
 #define wmb()	do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)

+ 6 - 0
arch/arm/kernel/entry-common.S

@@ -29,6 +29,9 @@ ret_fast_syscall:
 	ldr	r1, [tsk, #TI_FLAGS]
 	ldr	r1, [tsk, #TI_FLAGS]
 	tst	r1, #_TIF_WORK_MASK
 	tst	r1, #_TIF_WORK_MASK
 	bne	fast_work_pending
 	bne	fast_work_pending
+#if defined(CONFIG_IRQSOFF_TRACER)
+	asm_trace_hardirqs_on
+#endif
 
 
 	/* perform architecture specific actions before user return */
 	/* perform architecture specific actions before user return */
 	arch_ret_to_user r1, lr
 	arch_ret_to_user r1, lr
@@ -65,6 +68,9 @@ ret_slow_syscall:
 	tst	r1, #_TIF_WORK_MASK
 	tst	r1, #_TIF_WORK_MASK
 	bne	work_pending
 	bne	work_pending
 no_work_pending:
 no_work_pending:
+#if defined(CONFIG_IRQSOFF_TRACER)
+	asm_trace_hardirqs_on
+#endif
 	/* perform architecture specific actions before user return */
 	/* perform architecture specific actions before user return */
 	arch_ret_to_user r1, lr
 	arch_ret_to_user r1, lr
 
 

+ 0 - 1
arch/arm/kernel/smp.c

@@ -310,7 +310,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
 	 * All kernel threads share the same mm context; grab a
 	 * All kernel threads share the same mm context; grab a
 	 * reference and switch to it.
 	 * reference and switch to it.
 	 */
 	 */
-	atomic_inc(&mm->mm_users);
 	atomic_inc(&mm->mm_count);
 	atomic_inc(&mm->mm_count);
 	current->active_mm = mm;
 	current->active_mm = mm;
 	cpumask_set_cpu(cpu, mm_cpumask(mm));
 	cpumask_set_cpu(cpu, mm_cpumask(mm));

+ 1 - 1
arch/arm/mach-at91/Makefile

@@ -65,7 +65,7 @@ obj-$(CONFIG_MACH_AT91SAM9G20EK) += board-sam9g20ek.o
 obj-$(CONFIG_MACH_CPU9G20)	+= board-cpu9krea.o
 obj-$(CONFIG_MACH_CPU9G20)	+= board-cpu9krea.o
 obj-$(CONFIG_MACH_STAMP9G20)	+= board-stamp9g20.o
 obj-$(CONFIG_MACH_STAMP9G20)	+= board-stamp9g20.o
 obj-$(CONFIG_MACH_PORTUXG20)	+= board-stamp9g20.o
 obj-$(CONFIG_MACH_PORTUXG20)	+= board-stamp9g20.o
-obj-$(CONFIG_MACH_PCONTROL_G20)	+= board-pcontrol-g20.o
+obj-$(CONFIG_MACH_PCONTROL_G20)	+= board-pcontrol-g20.o board-stamp9g20.o
 
 
 # AT91SAM9260/AT91SAM9G20 board-specific support
 # AT91SAM9260/AT91SAM9G20 board-specific support
 obj-$(CONFIG_MACH_SNAPPER_9260)	+= board-snapper9260.o
 obj-$(CONFIG_MACH_SNAPPER_9260)	+= board-snapper9260.o

+ 3 - 95
arch/arm/mach-at91/board-pcontrol-g20.c

@@ -31,6 +31,7 @@
 
 
 #include <mach/board.h>
 #include <mach/board.h>
 #include <mach/at91sam9_smc.h>
 #include <mach/at91sam9_smc.h>
+#include <mach/stamp9g20.h>
 
 
 #include "sam9_smc.h"
 #include "sam9_smc.h"
 #include "generic.h"
 #include "generic.h"
@@ -38,11 +39,7 @@
 
 
 static void __init pcontrol_g20_map_io(void)
 static void __init pcontrol_g20_map_io(void)
 {
 {
-	/* Initialize processor: 18.432 MHz crystal */
-	at91sam9260_initialize(18432000);
-
-	/* DGBU on ttyS0. (Rx, Tx) only TTL -> JTAG connector X7 17,19 ) */
-	at91_register_uart(0, 0, 0);
+	stamp9g20_map_io();
 
 
 	/* USART0 on ttyS1. (Rx, Tx, CTS, RTS) piggyback  A2 */
 	/* USART0 on ttyS1. (Rx, Tx, CTS, RTS) piggyback  A2 */
 	at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS
 	at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS
@@ -54,9 +51,6 @@ static void __init pcontrol_g20_map_io(void)
 
 
 	/* USART2 on ttyS3. (Rx, Tx)  9bit-Bus  Multidrop-mode  X4 */
 	/* USART2 on ttyS3. (Rx, Tx)  9bit-Bus  Multidrop-mode  X4 */
 	at91_register_uart(AT91SAM9260_ID_US4, 3, 0);
 	at91_register_uart(AT91SAM9260_ID_US4, 3, 0);
-
-	/* set serial console to ttyS0 (ie, DBGU) */
-	at91_set_serial_console(0);
 }
 }
 
 
 
 
@@ -66,38 +60,6 @@ static void __init init_irq(void)
 }
 }
 
 
 
 
-/*
- * NAND flash 512MiB 1,8V 8-bit, sector size 128 KiB
- */
-static struct atmel_nand_data __initdata nand_data = {
-	.ale		= 21,
-	.cle		= 22,
-	.rdy_pin	= AT91_PIN_PC13,
-	.enable_pin	= AT91_PIN_PC14,
-};
-
-/*
- * Bus timings; unit = 7.57ns
- */
-static struct sam9_smc_config __initdata nand_smc_config = {
-	.ncs_read_setup		= 0,
-	.nrd_setup		= 2,
-	.ncs_write_setup	= 0,
-	.nwe_setup		= 2,
-
-	.ncs_read_pulse		= 4,
-	.nrd_pulse		= 4,
-	.ncs_write_pulse	= 4,
-	.nwe_pulse		= 4,
-
-	.read_cycle		= 7,
-	.write_cycle		= 7,
-
-	.mode			= AT91_SMC_READMODE | AT91_SMC_WRITEMODE
-			| AT91_SMC_EXNWMODE_DISABLE | AT91_SMC_DBW_8,
-	.tdf_cycles		= 3,
-};
-
 static struct sam9_smc_config __initdata pcontrol_smc_config[2] = { {
 static struct sam9_smc_config __initdata pcontrol_smc_config[2] = { {
 	.ncs_read_setup		= 16,
 	.ncs_read_setup		= 16,
 	.nrd_setup		= 18,
 	.nrd_setup		= 18,
@@ -138,14 +100,6 @@ static struct sam9_smc_config __initdata pcontrol_smc_config[2] = { {
 	.tdf_cycles		= 1,
 	.tdf_cycles		= 1,
 } };
 } };
 
 
-static void __init add_device_nand(void)
-{
-	/* configure chip-select 3 (NAND) */
-	sam9_smc_configure(3, &nand_smc_config);
-	at91_add_device_nand(&nand_data);
-}
-
-
 static void __init add_device_pcontrol(void)
 static void __init add_device_pcontrol(void)
 {
 {
 	/* configure chip-select 4 (IO compatible to 8051  X4 ) */
 	/* configure chip-select 4 (IO compatible to 8051  X4 ) */
@@ -155,23 +109,6 @@ static void __init add_device_pcontrol(void)
 }
 }
 
 
 
 
-/*
- * MCI (SD/MMC)
- * det_pin, wp_pin and vcc_pin are not connected
- */
-#if defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_ATMELMCI_MODULE)
-static struct mci_platform_data __initdata mmc_data = {
-	.slot[0] = {
-		.bus_width	= 4,
-	},
-};
-#else
-static struct at91_mmc_data __initdata mmc_data = {
-	.wire4		= 1,
-};
-#endif
-
-
 /*
 /*
  * USB Host port
  * USB Host port
  */
  */
@@ -265,42 +202,13 @@ static struct spi_board_info pcontrol_g20_spi_devices[] = {
 };
 };
 
 
 
 
-/*
- * Dallas 1-Wire  DS2431
- */
-static struct w1_gpio_platform_data w1_gpio_pdata = {
-	.pin		= AT91_PIN_PA29,
-	.is_open_drain	= 1,
-};
-
-static struct platform_device w1_device = {
-	.name			= "w1-gpio",
-	.id			= -1,
-	.dev.platform_data	= &w1_gpio_pdata,
-};
-
-static void add_wire1(void)
-{
-	at91_set_GPIO_periph(w1_gpio_pdata.pin, 1);
-	at91_set_multi_drive(w1_gpio_pdata.pin, 1);
-	platform_device_register(&w1_device);
-}
-
-
 static void __init pcontrol_g20_board_init(void)
 static void __init pcontrol_g20_board_init(void)
 {
 {
-	at91_add_device_serial();
-	add_device_nand();
-#if defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_ATMELMCI_MODULE)
-	at91_add_device_mci(0, &mmc_data);
-#else
-	at91_add_device_mmc(0, &mmc_data);
-#endif
+	stamp9g20_board_init();
 	at91_add_device_usbh(&usbh_data);
 	at91_add_device_usbh(&usbh_data);
 	at91_add_device_eth(&macb_data);
 	at91_add_device_eth(&macb_data);
 	at91_add_device_i2c(pcontrol_g20_i2c_devices,
 	at91_add_device_i2c(pcontrol_g20_i2c_devices,
 		ARRAY_SIZE(pcontrol_g20_i2c_devices));
 		ARRAY_SIZE(pcontrol_g20_i2c_devices));
-	add_wire1();
 	add_device_pcontrol();
 	add_device_pcontrol();
 	at91_add_device_spi(pcontrol_g20_spi_devices,
 	at91_add_device_spi(pcontrol_g20_spi_devices,
 		ARRAY_SIZE(pcontrol_g20_spi_devices));
 		ARRAY_SIZE(pcontrol_g20_spi_devices));

+ 43 - 39
arch/arm/mach-at91/board-stamp9g20.c

@@ -32,7 +32,7 @@
 #include "generic.h"
 #include "generic.h"
 
 
 
 
-static void __init portuxg20_map_io(void)
+void __init stamp9g20_map_io(void)
 {
 {
 	/* Initialize processor: 18.432 MHz crystal */
 	/* Initialize processor: 18.432 MHz crystal */
 	at91sam9260_initialize(18432000);
 	at91sam9260_initialize(18432000);
@@ -40,6 +40,24 @@ static void __init portuxg20_map_io(void)
 	/* DGBU on ttyS0. (Rx & Tx only) */
 	/* DGBU on ttyS0. (Rx & Tx only) */
 	at91_register_uart(0, 0, 0);
 	at91_register_uart(0, 0, 0);
 
 
+	/* set serial console to ttyS0 (ie, DBGU) */
+	at91_set_serial_console(0);
+}
+
+static void __init stamp9g20evb_map_io(void)
+{
+	stamp9g20_map_io();
+
+	/* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */
+	at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS
+						| ATMEL_UART_DTR | ATMEL_UART_DSR
+						| ATMEL_UART_DCD | ATMEL_UART_RI);
+}
+
+static void __init portuxg20_map_io(void)
+{
+	stamp9g20_map_io();
+
 	/* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */
 	/* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */
 	at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS
 	at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS
 						| ATMEL_UART_DTR | ATMEL_UART_DSR
 						| ATMEL_UART_DTR | ATMEL_UART_DSR
@@ -56,26 +74,6 @@ static void __init portuxg20_map_io(void)
 
 
 	/* USART5 on ttyS6. (Rx, Tx only) */
 	/* USART5 on ttyS6. (Rx, Tx only) */
 	at91_register_uart(AT91SAM9260_ID_US5, 6, 0);
 	at91_register_uart(AT91SAM9260_ID_US5, 6, 0);
-
-	/* set serial console to ttyS0 (ie, DBGU) */
-	at91_set_serial_console(0);
-}
-
-static void __init stamp9g20_map_io(void)
-{
-	/* Initialize processor: 18.432 MHz crystal */
-	at91sam9260_initialize(18432000);
-
-	/* DGBU on ttyS0. (Rx & Tx only) */
-	at91_register_uart(0, 0, 0);
-
-	/* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */
-	at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS
-						| ATMEL_UART_DTR | ATMEL_UART_DSR
-						| ATMEL_UART_DCD | ATMEL_UART_RI);
-
-	/* set serial console to ttyS0 (ie, DBGU) */
-	at91_set_serial_console(0);
 }
 }
 
 
 static void __init init_irq(void)
 static void __init init_irq(void)
@@ -156,7 +154,7 @@ static struct at91_udc_data __initdata portuxg20_udc_data = {
 	.pullup_pin	= 0,		/* pull-up driven by UDC */
 	.pullup_pin	= 0,		/* pull-up driven by UDC */
 };
 };
 
 
-static struct at91_udc_data __initdata stamp9g20_udc_data = {
+static struct at91_udc_data __initdata stamp9g20evb_udc_data = {
 	.vbus_pin	= AT91_PIN_PA22,
 	.vbus_pin	= AT91_PIN_PA22,
 	.pullup_pin	= 0,		/* pull-up driven by UDC */
 	.pullup_pin	= 0,		/* pull-up driven by UDC */
 };
 };
@@ -190,7 +188,7 @@ static struct gpio_led portuxg20_leds[] = {
 	}
 	}
 };
 };
 
 
-static struct gpio_led stamp9g20_leds[] = {
+static struct gpio_led stamp9g20evb_leds[] = {
 	{
 	{
 		.name			= "D8",
 		.name			= "D8",
 		.gpio			= AT91_PIN_PB18,
 		.gpio			= AT91_PIN_PB18,
@@ -250,7 +248,7 @@ void add_w1(void)
 }
 }
 
 
 
 
-static void __init generic_board_init(void)
+void __init stamp9g20_board_init(void)
 {
 {
 	/* Serial */
 	/* Serial */
 	at91_add_device_serial();
 	at91_add_device_serial();
@@ -262,34 +260,40 @@ static void __init generic_board_init(void)
 #else
 #else
 	at91_add_device_mmc(0, &mmc_data);
 	at91_add_device_mmc(0, &mmc_data);
 #endif
 #endif
-	/* USB Host */
-	at91_add_device_usbh(&usbh_data);
-	/* Ethernet */
-	at91_add_device_eth(&macb_data);
-	/* I2C */
-	at91_add_device_i2c(NULL, 0);
 	/* W1 */
 	/* W1 */
 	add_w1();
 	add_w1();
 }
 }
 
 
 static void __init portuxg20_board_init(void)
 static void __init portuxg20_board_init(void)
 {
 {
-	generic_board_init();
-	/* SPI */
-	at91_add_device_spi(portuxg20_spi_devices, ARRAY_SIZE(portuxg20_spi_devices));
+	stamp9g20_board_init();
+	/* USB Host */
+	at91_add_device_usbh(&usbh_data);
 	/* USB Device */
 	/* USB Device */
 	at91_add_device_udc(&portuxg20_udc_data);
 	at91_add_device_udc(&portuxg20_udc_data);
+	/* Ethernet */
+	at91_add_device_eth(&macb_data);
+	/* I2C */
+	at91_add_device_i2c(NULL, 0);
+	/* SPI */
+	at91_add_device_spi(portuxg20_spi_devices, ARRAY_SIZE(portuxg20_spi_devices));
 	/* LEDs */
 	/* LEDs */
 	at91_gpio_leds(portuxg20_leds, ARRAY_SIZE(portuxg20_leds));
 	at91_gpio_leds(portuxg20_leds, ARRAY_SIZE(portuxg20_leds));
 }
 }
 
 
-static void __init stamp9g20_board_init(void)
+static void __init stamp9g20evb_board_init(void)
 {
 {
-	generic_board_init();
+	stamp9g20_board_init();
+	/* USB Host */
+	at91_add_device_usbh(&usbh_data);
 	/* USB Device */
 	/* USB Device */
-	at91_add_device_udc(&stamp9g20_udc_data);
+	at91_add_device_udc(&stamp9g20evb_udc_data);
+	/* Ethernet */
+	at91_add_device_eth(&macb_data);
+	/* I2C */
+	at91_add_device_i2c(NULL, 0);
 	/* LEDs */
 	/* LEDs */
-	at91_gpio_leds(stamp9g20_leds, ARRAY_SIZE(stamp9g20_leds));
+	at91_gpio_leds(stamp9g20evb_leds, ARRAY_SIZE(stamp9g20evb_leds));
 }
 }
 
 
 MACHINE_START(PORTUXG20, "taskit PortuxG20")
 MACHINE_START(PORTUXG20, "taskit PortuxG20")
@@ -305,7 +309,7 @@ MACHINE_START(STAMP9G20, "taskit Stamp9G20")
 	/* Maintainer: taskit GmbH */
 	/* Maintainer: taskit GmbH */
 	.boot_params	= AT91_SDRAM_BASE + 0x100,
 	.boot_params	= AT91_SDRAM_BASE + 0x100,
 	.timer		= &at91sam926x_timer,
 	.timer		= &at91sam926x_timer,
-	.map_io		= stamp9g20_map_io,
+	.map_io		= stamp9g20evb_map_io,
 	.init_irq	= init_irq,
 	.init_irq	= init_irq,
-	.init_machine	= stamp9g20_board_init,
+	.init_machine	= stamp9g20evb_board_init,
 MACHINE_END
 MACHINE_END

+ 1 - 1
arch/arm/mach-at91/clock.c

@@ -658,7 +658,7 @@ static void __init at91_upll_usbfs_clock_init(unsigned long main_clock)
 	/* Now set uhpck values */
 	/* Now set uhpck values */
 	uhpck.parent = &utmi_clk;
 	uhpck.parent = &utmi_clk;
 	uhpck.pmc_mask = AT91SAM926x_PMC_UHP;
 	uhpck.pmc_mask = AT91SAM926x_PMC_UHP;
-	uhpck.rate_hz = utmi_clk.parent->rate_hz;
+	uhpck.rate_hz = utmi_clk.rate_hz;
 	uhpck.rate_hz /= 1 + ((at91_sys_read(AT91_PMC_USB) & AT91_PMC_OHCIUSBDIV) >> 8);
 	uhpck.rate_hz /= 1 + ((at91_sys_read(AT91_PMC_USB) & AT91_PMC_OHCIUSBDIV) >> 8);
 }
 }
 
 

+ 2 - 0
arch/arm/mach-at91/include/mach/at91_mci.h

@@ -74,6 +74,8 @@
 #define			AT91_MCI_TRTYP_BLOCK	(0 << 19)
 #define			AT91_MCI_TRTYP_BLOCK	(0 << 19)
 #define			AT91_MCI_TRTYP_MULTIPLE	(1 << 19)
 #define			AT91_MCI_TRTYP_MULTIPLE	(1 << 19)
 #define			AT91_MCI_TRTYP_STREAM	(2 << 19)
 #define			AT91_MCI_TRTYP_STREAM	(2 << 19)
+#define			AT91_MCI_TRTYP_SDIO_BYTE	(4 << 19)
+#define			AT91_MCI_TRTYP_SDIO_BLOCK	(5 << 19)
 
 
 #define AT91_MCI_BLKR		0x18		/* Block Register */
 #define AT91_MCI_BLKR		0x18		/* Block Register */
 #define		AT91_MCI_BLKR_BCNT(n)	((0xffff & (n)) << 0)	/* Block count */
 #define		AT91_MCI_BLKR_BCNT(n)	((0xffff & (n)) << 0)	/* Block count */

+ 7 - 0
arch/arm/mach-at91/include/mach/stamp9g20.h

@@ -0,0 +1,7 @@
+#ifndef __MACH_STAMP9G20_H
+#define __MACH_STAMP9G20_H
+
+void stamp9g20_map_io(void);
+void stamp9g20_board_init(void);
+
+#endif

+ 1 - 1
arch/arm/mach-ixp4xx/common-pci.c

@@ -513,4 +513,4 @@ int dma_set_coherent_mask(struct device *dev, u64 mask)
 
 
 EXPORT_SYMBOL(ixp4xx_pci_read);
 EXPORT_SYMBOL(ixp4xx_pci_read);
 EXPORT_SYMBOL(ixp4xx_pci_write);
 EXPORT_SYMBOL(ixp4xx_pci_write);
-
+EXPORT_SYMBOL(dma_set_coherent_mask);

+ 1 - 0
arch/arm/mach-pxa/Kconfig

@@ -540,6 +540,7 @@ config MACH_ICONTROL
 config ARCH_PXA_ESERIES
 config ARCH_PXA_ESERIES
 	bool "PXA based Toshiba e-series PDAs"
 	bool "PXA based Toshiba e-series PDAs"
 	select PXA25x
 	select PXA25x
+	select FB_W100
 
 
 config MACH_E330
 config MACH_E330
 	bool "Toshiba e330"
 	bool "Toshiba e330"

+ 2 - 2
arch/arm/mach-pxa/sleep.S

@@ -353,8 +353,8 @@ resume_turn_on_mmu:
 
 
 	@ Let us ensure we jump to resume_after_mmu only when the mcr above
 	@ Let us ensure we jump to resume_after_mmu only when the mcr above
 	@ actually took effect.  They call it the "cpwait" operation.
 	@ actually took effect.  They call it the "cpwait" operation.
-	mrc	p15, 0, r1, c2, c0, 0		@ queue a dependency on CP15
-	sub	pc, r2, r1, lsr #32		@ jump to virtual addr
+	mrc	p15, 0, r0, c2, c0, 0		@ queue a dependency on CP15
+	sub	pc, r2, r0, lsr #32		@ jump to virtual addr
 	nop
 	nop
 	nop
 	nop
 	nop
 	nop

+ 7 - 0
arch/arm/mach-s3c2412/Kconfig

@@ -28,9 +28,16 @@ config S3C2412_DMA
 
 
 config S3C2412_PM
 config S3C2412_PM
 	bool
 	bool
+	select S3C2412_PM_SLEEP
 	help
 	help
 	  Internal config node to apply S3C2412 power management
 	  Internal config node to apply S3C2412 power management
 
 
+config S3C2412_PM_SLEEP
+	bool
+	help
+	  Internal config node to apply sleep for S3C2412 power management.
+	  Can be selected by another SoCs with similar sleep procedure.
+
 # Note, the S3C2412 IOtiming support is in plat-s3c24xx
 # Note, the S3C2412 IOtiming support is in plat-s3c24xx
 
 
 config S3C2412_CPUFREQ
 config S3C2412_CPUFREQ

+ 2 - 1
arch/arm/mach-s3c2412/Makefile

@@ -14,7 +14,8 @@ obj-$(CONFIG_CPU_S3C2412)	+= irq.o
 obj-$(CONFIG_CPU_S3C2412)	+= clock.o
 obj-$(CONFIG_CPU_S3C2412)	+= clock.o
 obj-$(CONFIG_CPU_S3C2412)	+= gpio.o
 obj-$(CONFIG_CPU_S3C2412)	+= gpio.o
 obj-$(CONFIG_S3C2412_DMA)	+= dma.o
 obj-$(CONFIG_S3C2412_DMA)	+= dma.o
-obj-$(CONFIG_S3C2412_PM)	+= pm.o sleep.o
+obj-$(CONFIG_S3C2412_PM)	+= pm.o
+obj-$(CONFIG_S3C2412_PM_SLEEP)	+= sleep.o
 obj-$(CONFIG_S3C2412_CPUFREQ)	+= cpu-freq.o
 obj-$(CONFIG_S3C2412_CPUFREQ)	+= cpu-freq.o
 
 
 # Machine support
 # Machine support

+ 1 - 0
arch/arm/mach-s3c2416/Kconfig

@@ -27,6 +27,7 @@ config S3C2416_DMA
 
 
 config S3C2416_PM
 config S3C2416_PM
 	bool
 	bool
+	select S3C2412_PM_SLEEP
 	help
 	help
 	  Internal config node to apply S3C2416 power management
 	  Internal config node to apply S3C2416 power management
 
 

+ 6 - 0
arch/arm/mach-s5pv210/mach-aquila.c

@@ -378,6 +378,12 @@ static struct max8998_regulator_data aquila_regulators[] = {
 static struct max8998_platform_data aquila_max8998_pdata = {
 static struct max8998_platform_data aquila_max8998_pdata = {
 	.num_regulators	= ARRAY_SIZE(aquila_regulators),
 	.num_regulators	= ARRAY_SIZE(aquila_regulators),
 	.regulators	= aquila_regulators,
 	.regulators	= aquila_regulators,
+	.buck1_set1	= S5PV210_GPH0(3),
+	.buck1_set2	= S5PV210_GPH0(4),
+	.buck2_set3	= S5PV210_GPH0(5),
+	.buck1_max_voltage1 = 1200000,
+	.buck1_max_voltage2 = 1200000,
+	.buck2_max_voltage = 1200000,
 };
 };
 #endif
 #endif
 
 

+ 6 - 0
arch/arm/mach-s5pv210/mach-goni.c

@@ -518,6 +518,12 @@ static struct max8998_regulator_data goni_regulators[] = {
 static struct max8998_platform_data goni_max8998_pdata = {
 static struct max8998_platform_data goni_max8998_pdata = {
 	.num_regulators	= ARRAY_SIZE(goni_regulators),
 	.num_regulators	= ARRAY_SIZE(goni_regulators),
 	.regulators	= goni_regulators,
 	.regulators	= goni_regulators,
+	.buck1_set1	= S5PV210_GPH0(3),
+	.buck1_set2	= S5PV210_GPH0(4),
+	.buck2_set3	= S5PV210_GPH0(5),
+	.buck1_max_voltage1 = 1200000,
+	.buck1_max_voltage2 = 1200000,
+	.buck2_max_voltage = 1200000,
 };
 };
 #endif
 #endif
 
 

+ 26 - 4
arch/arm/mach-shmobile/include/mach/entry-macro.S

@@ -1,4 +1,5 @@
 /*
 /*
+ * Copyright (C) 2010 Magnus Damm
  * Copyright (C) 2008 Renesas Solutions Corp.
  * Copyright (C) 2008 Renesas Solutions Corp.
  *
  *
  * This program is free software; you can redistribute it and/or modify
  * This program is free software; you can redistribute it and/or modify
@@ -14,24 +15,45 @@
  * along with this program; if not, write to the Free Software
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
  */
-#include <mach/hardware.h>
 #include <mach/irqs.h>
 #include <mach/irqs.h>
 
 
+#define INTCA_BASE	0xe6980000
+#define INTFLGA_OFFS	0x00000018 /* accept pending interrupt */
+#define INTEVTA_OFFS	0x00000020 /* vector number of accepted interrupt */
+#define INTLVLA_OFFS	0x00000030 /* priority level of accepted interrupt */
+#define INTLVLB_OFFS	0x00000034 /* previous priority level */
+
 	.macro  disable_fiq
 	.macro  disable_fiq
 	.endm
 	.endm
 
 
 	.macro  get_irqnr_preamble, base, tmp
 	.macro  get_irqnr_preamble, base, tmp
-	ldr     \base, =INTFLGA
+	ldr     \base, =INTCA_BASE
 	.endm
 	.endm
 
 
 	.macro  arch_ret_to_user, tmp1, tmp2
 	.macro  arch_ret_to_user, tmp1, tmp2
 	.endm
 	.endm
 
 
 	.macro  get_irqnr_and_base, irqnr, irqstat, base, tmp
 	.macro  get_irqnr_and_base, irqnr, irqstat, base, tmp
-	ldr     \irqnr, [\base]
+	/* The single INTFLGA read access below results in the following:
+	 *
+	 * 1. INTLVLB is updated with old priority value from INTLVLA
+	 * 2. Highest priority interrupt is accepted
+	 * 3. INTLVLA is updated to contain priority of accepted interrupt
+	 * 4. Accepted interrupt vector is stored in INTFLGA and INTEVTA
+	 */
+	ldr     \irqnr, [\base, #INTFLGA_OFFS]
+
+	/* Restore INTLVLA with the value saved in INTLVLB.
+	 * This is required to support interrupt priorities properly.
+	 */
+	ldrb	\tmp, [\base, #INTLVLB_OFFS]
+	strb    \tmp, [\base, #INTLVLA_OFFS]
+
+	/* Handle invalid vector number case */
 	cmp	\irqnr, #0
 	cmp	\irqnr, #0
 	beq	1000f
 	beq	1000f
-	/* intevt to irq number */
+
+	/* Convert vector to irq number, same as the evt2irq() macro */
 	lsr	\irqnr, \irqnr, #0x5
 	lsr	\irqnr, \irqnr, #0x5
 	subs	\irqnr, \irqnr, #16
 	subs	\irqnr, \irqnr, #16
 
 

+ 1 - 1
arch/arm/mach-shmobile/include/mach/vmalloc.h

@@ -2,6 +2,6 @@
 #define __ASM_MACH_VMALLOC_H
 #define __ASM_MACH_VMALLOC_H
 
 
 /* Vmalloc at ... - 0xe5ffffff */
 /* Vmalloc at ... - 0xe5ffffff */
-#define VMALLOC_END 0xe6000000
+#define VMALLOC_END 0xe6000000UL
 
 
 #endif /* __ASM_MACH_VMALLOC_H */
 #endif /* __ASM_MACH_VMALLOC_H */

+ 19 - 18
arch/arm/mm/cache-feroceon-l2.c

@@ -13,13 +13,9 @@
  */
  */
 
 
 #include <linux/init.h>
 #include <linux/init.h>
+#include <linux/highmem.h>
 #include <asm/cacheflush.h>
 #include <asm/cacheflush.h>
-#include <asm/kmap_types.h>
-#include <asm/fixmap.h>
-#include <asm/pgtable.h>
-#include <asm/tlbflush.h>
 #include <plat/cache-feroceon-l2.h>
 #include <plat/cache-feroceon-l2.h>
-#include "mm.h"
 
 
 /*
 /*
  * Low-level cache maintenance operations.
  * Low-level cache maintenance operations.
@@ -39,27 +35,30 @@
  * between which we don't want to be preempted.
  * between which we don't want to be preempted.
  */
  */
 
 
-static inline unsigned long l2_start_va(unsigned long paddr)
+static inline unsigned long l2_get_va(unsigned long paddr)
 {
 {
 #ifdef CONFIG_HIGHMEM
 #ifdef CONFIG_HIGHMEM
 	/*
 	/*
-	 * Let's do our own fixmap stuff in a minimal way here.
 	 * Because range ops can't be done on physical addresses,
 	 * Because range ops can't be done on physical addresses,
 	 * we simply install a virtual mapping for it only for the
 	 * we simply install a virtual mapping for it only for the
 	 * TLB lookup to occur, hence no need to flush the untouched
 	 * TLB lookup to occur, hence no need to flush the untouched
-	 * memory mapping.  This is protected with the disabling of
-	 * interrupts by the caller.
+	 * memory mapping afterwards (note: a cache flush may happen
+	 * in some circumstances depending on the path taken in kunmap_atomic).
 	 */
 	 */
-	unsigned long idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id();
-	unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-	set_pte_ext(TOP_PTE(vaddr), pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL), 0);
-	local_flush_tlb_kernel_page(vaddr);
-	return vaddr + (paddr & ~PAGE_MASK);
+	void *vaddr = kmap_atomic_pfn(paddr >> PAGE_SHIFT);
+	return (unsigned long)vaddr + (paddr & ~PAGE_MASK);
 #else
 #else
 	return __phys_to_virt(paddr);
 	return __phys_to_virt(paddr);
 #endif
 #endif
 }
 }
 
 
+static inline void l2_put_va(unsigned long vaddr)
+{
+#ifdef CONFIG_HIGHMEM
+	kunmap_atomic((void *)vaddr);
+#endif
+}
+
 static inline void l2_clean_pa(unsigned long addr)
 static inline void l2_clean_pa(unsigned long addr)
 {
 {
 	__asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr));
 	__asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr));
@@ -76,13 +75,14 @@ static inline void l2_clean_pa_range(unsigned long start, unsigned long end)
 	 */
 	 */
 	BUG_ON((start ^ end) >> PAGE_SHIFT);
 	BUG_ON((start ^ end) >> PAGE_SHIFT);
 
 
-	raw_local_irq_save(flags);
-	va_start = l2_start_va(start);
+	va_start = l2_get_va(start);
 	va_end = va_start + (end - start);
 	va_end = va_start + (end - start);
+	raw_local_irq_save(flags);
 	__asm__("mcr p15, 1, %0, c15, c9, 4\n\t"
 	__asm__("mcr p15, 1, %0, c15, c9, 4\n\t"
 		"mcr p15, 1, %1, c15, c9, 5"
 		"mcr p15, 1, %1, c15, c9, 5"
 		: : "r" (va_start), "r" (va_end));
 		: : "r" (va_start), "r" (va_end));
 	raw_local_irq_restore(flags);
 	raw_local_irq_restore(flags);
+	l2_put_va(va_start);
 }
 }
 
 
 static inline void l2_clean_inv_pa(unsigned long addr)
 static inline void l2_clean_inv_pa(unsigned long addr)
@@ -106,13 +106,14 @@ static inline void l2_inv_pa_range(unsigned long start, unsigned long end)
 	 */
 	 */
 	BUG_ON((start ^ end) >> PAGE_SHIFT);
 	BUG_ON((start ^ end) >> PAGE_SHIFT);
 
 
-	raw_local_irq_save(flags);
-	va_start = l2_start_va(start);
+	va_start = l2_get_va(start);
 	va_end = va_start + (end - start);
 	va_end = va_start + (end - start);
+	raw_local_irq_save(flags);
 	__asm__("mcr p15, 1, %0, c15, c11, 4\n\t"
 	__asm__("mcr p15, 1, %0, c15, c11, 4\n\t"
 		"mcr p15, 1, %1, c15, c11, 5"
 		"mcr p15, 1, %1, c15, c11, 5"
 		: : "r" (va_start), "r" (va_end));
 		: : "r" (va_start), "r" (va_end));
 	raw_local_irq_restore(flags);
 	raw_local_irq_restore(flags);
+	l2_put_va(va_start);
 }
 }
 
 
 static inline void l2_inv_all(void)
 static inline void l2_inv_all(void)

+ 21 - 36
arch/arm/mm/cache-xsc3l2.c

@@ -17,14 +17,10 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  */
  */
 #include <linux/init.h>
 #include <linux/init.h>
+#include <linux/highmem.h>
 #include <asm/system.h>
 #include <asm/system.h>
 #include <asm/cputype.h>
 #include <asm/cputype.h>
 #include <asm/cacheflush.h>
 #include <asm/cacheflush.h>
-#include <asm/kmap_types.h>
-#include <asm/fixmap.h>
-#include <asm/pgtable.h>
-#include <asm/tlbflush.h>
-#include "mm.h"
 
 
 #define CR_L2	(1 << 26)
 #define CR_L2	(1 << 26)
 
 
@@ -71,16 +67,15 @@ static inline void xsc3_l2_inv_all(void)
 	dsb();
 	dsb();
 }
 }
 
 
+static inline void l2_unmap_va(unsigned long va)
+{
 #ifdef CONFIG_HIGHMEM
 #ifdef CONFIG_HIGHMEM
-#define l2_map_save_flags(x)		raw_local_save_flags(x)
-#define l2_map_restore_flags(x)		raw_local_irq_restore(x)
-#else
-#define l2_map_save_flags(x)		((x) = 0)
-#define l2_map_restore_flags(x)		((void)(x))
+	if (va != -1)
+		kunmap_atomic((void *)va);
 #endif
 #endif
+}
 
 
-static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va,
-				      unsigned long flags)
+static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va)
 {
 {
 #ifdef CONFIG_HIGHMEM
 #ifdef CONFIG_HIGHMEM
 	unsigned long va = prev_va & PAGE_MASK;
 	unsigned long va = prev_va & PAGE_MASK;
@@ -89,17 +84,10 @@ static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va,
 		/*
 		/*
 		 * Switching to a new page.  Because cache ops are
 		 * Switching to a new page.  Because cache ops are
 		 * using virtual addresses only, we must put a mapping
 		 * using virtual addresses only, we must put a mapping
-		 * in place for it.  We also enable interrupts for a
-		 * short while and disable them again to protect this
-		 * mapping.
+		 * in place for it.
 		 */
 		 */
-		unsigned long idx;
-		raw_local_irq_restore(flags);
-		idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id();
-		va = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-		raw_local_irq_restore(flags | PSR_I_BIT);
-		set_pte_ext(TOP_PTE(va), pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL), 0);
-		local_flush_tlb_kernel_page(va);
+		l2_unmap_va(prev_va);
+		va = (unsigned long)kmap_atomic_pfn(pa >> PAGE_SHIFT);
 	}
 	}
 	return va + (pa_offset >> (32 - PAGE_SHIFT));
 	return va + (pa_offset >> (32 - PAGE_SHIFT));
 #else
 #else
@@ -109,7 +97,7 @@ static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va,
 
 
 static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
 static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
 {
 {
-	unsigned long vaddr, flags;
+	unsigned long vaddr;
 
 
 	if (start == 0 && end == -1ul) {
 	if (start == 0 && end == -1ul) {
 		xsc3_l2_inv_all();
 		xsc3_l2_inv_all();
@@ -117,13 +105,12 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
 	}
 	}
 
 
 	vaddr = -1;  /* to force the first mapping */
 	vaddr = -1;  /* to force the first mapping */
-	l2_map_save_flags(flags);
 
 
 	/*
 	/*
 	 * Clean and invalidate partial first cache line.
 	 * Clean and invalidate partial first cache line.
 	 */
 	 */
 	if (start & (CACHE_LINE_SIZE - 1)) {
 	if (start & (CACHE_LINE_SIZE - 1)) {
-		vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr, flags);
+		vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr);
 		xsc3_l2_clean_mva(vaddr);
 		xsc3_l2_clean_mva(vaddr);
 		xsc3_l2_inv_mva(vaddr);
 		xsc3_l2_inv_mva(vaddr);
 		start = (start | (CACHE_LINE_SIZE - 1)) + 1;
 		start = (start | (CACHE_LINE_SIZE - 1)) + 1;
@@ -133,7 +120,7 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
 	 * Invalidate all full cache lines between 'start' and 'end'.
 	 * Invalidate all full cache lines between 'start' and 'end'.
 	 */
 	 */
 	while (start < (end & ~(CACHE_LINE_SIZE - 1))) {
 	while (start < (end & ~(CACHE_LINE_SIZE - 1))) {
-		vaddr = l2_map_va(start, vaddr, flags);
+		vaddr = l2_map_va(start, vaddr);
 		xsc3_l2_inv_mva(vaddr);
 		xsc3_l2_inv_mva(vaddr);
 		start += CACHE_LINE_SIZE;
 		start += CACHE_LINE_SIZE;
 	}
 	}
@@ -142,31 +129,30 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
 	 * Clean and invalidate partial last cache line.
 	 * Clean and invalidate partial last cache line.
 	 */
 	 */
 	if (start < end) {
 	if (start < end) {
-		vaddr = l2_map_va(start, vaddr, flags);
+		vaddr = l2_map_va(start, vaddr);
 		xsc3_l2_clean_mva(vaddr);
 		xsc3_l2_clean_mva(vaddr);
 		xsc3_l2_inv_mva(vaddr);
 		xsc3_l2_inv_mva(vaddr);
 	}
 	}
 
 
-	l2_map_restore_flags(flags);
+	l2_unmap_va(vaddr);
 
 
 	dsb();
 	dsb();
 }
 }
 
 
 static void xsc3_l2_clean_range(unsigned long start, unsigned long end)
 static void xsc3_l2_clean_range(unsigned long start, unsigned long end)
 {
 {
-	unsigned long vaddr, flags;
+	unsigned long vaddr;
 
 
 	vaddr = -1;  /* to force the first mapping */
 	vaddr = -1;  /* to force the first mapping */
-	l2_map_save_flags(flags);
 
 
 	start &= ~(CACHE_LINE_SIZE - 1);
 	start &= ~(CACHE_LINE_SIZE - 1);
 	while (start < end) {
 	while (start < end) {
-		vaddr = l2_map_va(start, vaddr, flags);
+		vaddr = l2_map_va(start, vaddr);
 		xsc3_l2_clean_mva(vaddr);
 		xsc3_l2_clean_mva(vaddr);
 		start += CACHE_LINE_SIZE;
 		start += CACHE_LINE_SIZE;
 	}
 	}
 
 
-	l2_map_restore_flags(flags);
+	l2_unmap_va(vaddr);
 
 
 	dsb();
 	dsb();
 }
 }
@@ -193,7 +179,7 @@ static inline void xsc3_l2_flush_all(void)
 
 
 static void xsc3_l2_flush_range(unsigned long start, unsigned long end)
 static void xsc3_l2_flush_range(unsigned long start, unsigned long end)
 {
 {
-	unsigned long vaddr, flags;
+	unsigned long vaddr;
 
 
 	if (start == 0 && end == -1ul) {
 	if (start == 0 && end == -1ul) {
 		xsc3_l2_flush_all();
 		xsc3_l2_flush_all();
@@ -201,17 +187,16 @@ static void xsc3_l2_flush_range(unsigned long start, unsigned long end)
 	}
 	}
 
 
 	vaddr = -1;  /* to force the first mapping */
 	vaddr = -1;  /* to force the first mapping */
-	l2_map_save_flags(flags);
 
 
 	start &= ~(CACHE_LINE_SIZE - 1);
 	start &= ~(CACHE_LINE_SIZE - 1);
 	while (start < end) {
 	while (start < end) {
-		vaddr = l2_map_va(start, vaddr, flags);
+		vaddr = l2_map_va(start, vaddr);
 		xsc3_l2_clean_mva(vaddr);
 		xsc3_l2_clean_mva(vaddr);
 		xsc3_l2_inv_mva(vaddr);
 		xsc3_l2_inv_mva(vaddr);
 		start += CACHE_LINE_SIZE;
 		start += CACHE_LINE_SIZE;
 	}
 	}
 
 
-	l2_map_restore_flags(flags);
+	l2_unmap_va(vaddr);
 
 
 	dsb();
 	dsb();
 }
 }

+ 4 - 3
arch/arm/mm/dma-mapping.c

@@ -17,6 +17,7 @@
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/device.h>
 #include <linux/device.h>
 #include <linux/dma-mapping.h>
 #include <linux/dma-mapping.h>
+#include <linux/highmem.h>
 
 
 #include <asm/memory.h>
 #include <asm/memory.h>
 #include <asm/highmem.h>
 #include <asm/highmem.h>
@@ -480,10 +481,10 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
 				op(vaddr, len, dir);
 				op(vaddr, len, dir);
 				kunmap_high(page);
 				kunmap_high(page);
 			} else if (cache_is_vipt()) {
 			} else if (cache_is_vipt()) {
-				pte_t saved_pte;
-				vaddr = kmap_high_l1_vipt(page, &saved_pte);
+				/* unmapped pages might still be cached */
+				vaddr = kmap_atomic(page);
 				op(vaddr + offset, len, dir);
 				op(vaddr + offset, len, dir);
-				kunmap_high_l1_vipt(page, saved_pte);
+				kunmap_atomic(vaddr);
 			}
 			}
 		} else {
 		} else {
 			vaddr = page_address(page) + offset;
 			vaddr = page_address(page) + offset;

+ 4 - 3
arch/arm/mm/flush.c

@@ -10,6 +10,7 @@
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/mm.h>
 #include <linux/mm.h>
 #include <linux/pagemap.h>
 #include <linux/pagemap.h>
+#include <linux/highmem.h>
 
 
 #include <asm/cacheflush.h>
 #include <asm/cacheflush.h>
 #include <asm/cachetype.h>
 #include <asm/cachetype.h>
@@ -180,10 +181,10 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
 			__cpuc_flush_dcache_area(addr, PAGE_SIZE);
 			__cpuc_flush_dcache_area(addr, PAGE_SIZE);
 			kunmap_high(page);
 			kunmap_high(page);
 		} else if (cache_is_vipt()) {
 		} else if (cache_is_vipt()) {
-			pte_t saved_pte;
-			addr = kmap_high_l1_vipt(page, &saved_pte);
+			/* unmapped pages might still be cached */
+			addr = kmap_atomic(page);
 			__cpuc_flush_dcache_area(addr, PAGE_SIZE);
 			__cpuc_flush_dcache_area(addr, PAGE_SIZE);
-			kunmap_high_l1_vipt(page, saved_pte);
+			kunmap_atomic(addr);
 		}
 		}
 	}
 	}
 
 

+ 0 - 87
arch/arm/mm/highmem.c

@@ -140,90 +140,3 @@ struct page *kmap_atomic_to_page(const void *ptr)
 	pte = TOP_PTE(vaddr);
 	pte = TOP_PTE(vaddr);
 	return pte_page(*pte);
 	return pte_page(*pte);
 }
 }
-
-#ifdef CONFIG_CPU_CACHE_VIPT
-
-#include <linux/percpu.h>
-
-/*
- * The VIVT cache of a highmem page is always flushed before the page
- * is unmapped. Hence unmapped highmem pages need no cache maintenance
- * in that case.
- *
- * However unmapped pages may still be cached with a VIPT cache, and
- * it is not possible to perform cache maintenance on them using physical
- * addresses unfortunately.  So we have no choice but to set up a temporary
- * virtual mapping for that purpose.
- *
- * Yet this VIPT cache maintenance may be triggered from DMA support
- * functions which are possibly called from interrupt context. As we don't
- * want to keep interrupt disabled all the time when such maintenance is
- * taking place, we therefore allow for some reentrancy by preserving and
- * restoring the previous fixmap entry before the interrupted context is
- * resumed.  If the reentrancy depth is 0 then there is no need to restore
- * the previous fixmap, and leaving the current one in place allow it to
- * be reused the next time without a TLB flush (common with DMA).
- */
-
-static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
-
-void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
-{
-	unsigned int idx, cpu;
-	int *depth;
-	unsigned long vaddr, flags;
-	pte_t pte, *ptep;
-
-	if (!in_interrupt())
-		preempt_disable();
-
-	cpu = smp_processor_id();
-	depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
-
-	idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
-	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-	ptep = TOP_PTE(vaddr);
-	pte = mk_pte(page, kmap_prot);
-
-	raw_local_irq_save(flags);
-	(*depth)++;
-	if (pte_val(*ptep) == pte_val(pte)) {
-		*saved_pte = pte;
-	} else {
-		*saved_pte = *ptep;
-		set_pte_ext(ptep, pte, 0);
-		local_flush_tlb_kernel_page(vaddr);
-	}
-	raw_local_irq_restore(flags);
-
-	return (void *)vaddr;
-}
-
-void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte)
-{
-	unsigned int idx, cpu = smp_processor_id();
-	int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
-	unsigned long vaddr, flags;
-	pte_t pte, *ptep;
-
-	idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
-	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-	ptep = TOP_PTE(vaddr);
-	pte = mk_pte(page, kmap_prot);
-
-	BUG_ON(pte_val(*ptep) != pte_val(pte));
-	BUG_ON(*depth <= 0);
-
-	raw_local_irq_save(flags);
-	(*depth)--;
-	if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) {
-		set_pte_ext(ptep, saved_pte, 0);
-		local_flush_tlb_kernel_page(vaddr);
-	}
-	raw_local_irq_restore(flags);
-
-	if (!in_interrupt())
-		preempt_enable();
-}
-
-#endif  /* CONFIG_CPU_CACHE_VIPT */

+ 1 - 1
arch/arm/plat-s3c24xx/Kconfig

@@ -8,7 +8,7 @@ config PLAT_S3C24XX
 	default y
 	default y
 	select NO_IOPORT
 	select NO_IOPORT
 	select ARCH_REQUIRE_GPIOLIB
 	select ARCH_REQUIRE_GPIOLIB
-	select S3C_DEVICE_NAND
+	select S3C_DEV_NAND
 	select S3C_GPIO_CFG_S3C24XX
 	select S3C_GPIO_CFG_S3C24XX
 	help
 	help
 	  Base platform code for any Samsung S3C24XX device
 	  Base platform code for any Samsung S3C24XX device

+ 24 - 14
arch/mips/Kconfig

@@ -19,6 +19,8 @@ config MIPS
 	select GENERIC_ATOMIC64 if !64BIT
 	select GENERIC_ATOMIC64 if !64BIT
 	select HAVE_DMA_ATTRS
 	select HAVE_DMA_ATTRS
 	select HAVE_DMA_API_DEBUG
 	select HAVE_DMA_API_DEBUG
+	select HAVE_GENERIC_HARDIRQS
+	select GENERIC_IRQ_PROBE
 
 
 menu "Machine selection"
 menu "Machine selection"
 
 
@@ -1664,6 +1666,28 @@ config PAGE_SIZE_64KB
 
 
 endchoice
 endchoice
 
 
+config FORCE_MAX_ZONEORDER
+	int "Maximum zone order"
+	range 13 64 if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_32KB
+	default "13" if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_32KB
+	range 12 64 if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_16KB
+	default "12" if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_16KB
+	range 11 64
+	default "11"
+	help
+	  The kernel memory allocator divides physically contiguous memory
+	  blocks into "zones", where each zone is a power of two number of
+	  pages.  This option selects the largest power of two that the kernel
+	  keeps in the memory allocator.  If you need to allocate very large
+	  blocks of physically contiguous memory, then you may need to
+	  increase this value.
+
+	  This config option is actually maximum order plus one. For example,
+	  a value of 11 means that the largest free memory block is 2^10 pages.
+
+	  The page size is not necessarily 4KB.  Keep this in mind
+	  when choosing a value for this option.
+
 config BOARD_SCACHE
 config BOARD_SCACHE
 	bool
 	bool
 
 
@@ -1921,20 +1945,6 @@ config CPU_R4000_WORKAROUNDS
 config CPU_R4400_WORKAROUNDS
 config CPU_R4400_WORKAROUNDS
 	bool
 	bool
 
 
-#
-# Use the generic interrupt handling code in kernel/irq/:
-#
-config GENERIC_HARDIRQS
-	bool
-	default y
-
-config GENERIC_IRQ_PROBE
-	bool
-	default y
-
-config IRQ_PER_CPU
-	bool
-
 #
 #
 # - Highmem only makes sense for the 32-bit kernel.
 # - Highmem only makes sense for the 32-bit kernel.
 # - The current highmem code will only work properly on physically indexed
 # - The current highmem code will only work properly on physically indexed

+ 2 - 0
arch/mips/alchemy/common/platform.c

@@ -27,6 +27,7 @@
 static void alchemy_8250_pm(struct uart_port *port, unsigned int state,
 static void alchemy_8250_pm(struct uart_port *port, unsigned int state,
 			    unsigned int old_state)
 			    unsigned int old_state)
 {
 {
+#ifdef CONFIG_SERIAL_8250
 	switch (state) {
 	switch (state) {
 	case 0:
 	case 0:
 		if ((__raw_readl(port->membase + UART_MOD_CNTRL) & 3) != 3) {
 		if ((__raw_readl(port->membase + UART_MOD_CNTRL) & 3) != 3) {
@@ -49,6 +50,7 @@ static void alchemy_8250_pm(struct uart_port *port, unsigned int state,
 		serial8250_do_pm(port, state, old_state);
 		serial8250_do_pm(port, state, old_state);
 		break;
 		break;
 	}
 	}
+#endif
 }
 }
 
 
 #define PORT(_base, _irq)					\
 #define PORT(_base, _irq)					\

+ 2 - 3
arch/mips/alchemy/devboards/prom.c

@@ -54,10 +54,9 @@ void __init prom_init(void)
 
 
 	prom_init_cmdline();
 	prom_init_cmdline();
 	memsize_str = prom_getenv("memsize");
 	memsize_str = prom_getenv("memsize");
-	if (!memsize_str)
+	if (!memsize_str || strict_strtoul(memsize_str, 0, &memsize))
 		memsize = ALCHEMY_BOARD_DEFAULT_MEMSIZE;
 		memsize = ALCHEMY_BOARD_DEFAULT_MEMSIZE;
-	else
-		strict_strtoul(memsize_str, 0, &memsize);
+
 	add_memory_region(0, memsize, BOOT_MEM_RAM);
 	add_memory_region(0, memsize, BOOT_MEM_RAM);
 }
 }
 
 

+ 3 - 6
arch/mips/ar7/clock.c

@@ -239,12 +239,12 @@ static void tnetd7300_set_clock(u32 shift, struct tnetd7300_clock *clock,
 	calculate(base_clock, frequency, &prediv, &postdiv, &mul);
 	calculate(base_clock, frequency, &prediv, &postdiv, &mul);
 
 
 	writel(((prediv - 1) << PREDIV_SHIFT) | (postdiv - 1), &clock->ctrl);
 	writel(((prediv - 1) << PREDIV_SHIFT) | (postdiv - 1), &clock->ctrl);
-	msleep(1);
+	mdelay(1);
 	writel(4, &clock->pll);
 	writel(4, &clock->pll);
 	while (readl(&clock->pll) & PLL_STATUS)
 	while (readl(&clock->pll) & PLL_STATUS)
 		;
 		;
 	writel(((mul - 1) << MUL_SHIFT) | (0xff << 3) | 0x0e, &clock->pll);
 	writel(((mul - 1) << MUL_SHIFT) | (0xff << 3) | 0x0e, &clock->pll);
-	msleep(75);
+	mdelay(75);
 }
 }
 
 
 static void __init tnetd7300_init_clocks(void)
 static void __init tnetd7300_init_clocks(void)
@@ -456,7 +456,7 @@ void clk_put(struct clk *clk)
 }
 }
 EXPORT_SYMBOL(clk_put);
 EXPORT_SYMBOL(clk_put);
 
 
-int __init ar7_init_clocks(void)
+void __init ar7_init_clocks(void)
 {
 {
 	switch (ar7_chip_id()) {
 	switch (ar7_chip_id()) {
 	case AR7_CHIP_7100:
 	case AR7_CHIP_7100:
@@ -472,7 +472,4 @@ int __init ar7_init_clocks(void)
 	}
 	}
 	/* adjust vbus clock rate */
 	/* adjust vbus clock rate */
 	vbus_clk.rate = bus_clk.rate / 2;
 	vbus_clk.rate = bus_clk.rate / 2;
-
-	return 0;
 }
 }
-arch_initcall(ar7_init_clocks);

+ 3 - 0
arch/mips/ar7/time.c

@@ -30,6 +30,9 @@ void __init plat_time_init(void)
 {
 {
 	struct clk *cpu_clk;
 	struct clk *cpu_clk;
 
 
+	/* Initialize ar7 clocks so the CPU clock frequency is correct */
+	ar7_init_clocks();
+
 	cpu_clk = clk_get(NULL, "cpu");
 	cpu_clk = clk_get(NULL, "cpu");
 	if (IS_ERR(cpu_clk)) {
 	if (IS_ERR(cpu_clk)) {
 		printk(KERN_ERR "unable to get cpu clock\n");
 		printk(KERN_ERR "unable to get cpu clock\n");

+ 106 - 47
arch/mips/bcm47xx/setup.c

@@ -32,7 +32,6 @@
 #include <asm/reboot.h>
 #include <asm/reboot.h>
 #include <asm/time.h>
 #include <asm/time.h>
 #include <bcm47xx.h>
 #include <bcm47xx.h>
-#include <asm/fw/cfe/cfe_api.h>
 #include <asm/mach-bcm47xx/nvram.h>
 #include <asm/mach-bcm47xx/nvram.h>
 
 
 struct ssb_bus ssb_bcm47xx;
 struct ssb_bus ssb_bcm47xx;
@@ -57,68 +56,112 @@ static void bcm47xx_machine_halt(void)
 		cpu_relax();
 		cpu_relax();
 }
 }
 
 
-static void str2eaddr(char *str, char *dest)
-{
-	int i = 0;
+#define READ_FROM_NVRAM(_outvar, name, buf) \
+	if (nvram_getenv(name, buf, sizeof(buf)) >= 0)\
+		sprom->_outvar = simple_strtoul(buf, NULL, 0);
 
 
-	if (str == NULL) {
-		memset(dest, 0, 6);
-		return;
+static void bcm47xx_fill_sprom(struct ssb_sprom *sprom)
+{
+	char buf[100];
+	u32 boardflags;
+
+	memset(sprom, 0, sizeof(struct ssb_sprom));
+
+	sprom->revision = 1; /* Fallback: Old hardware does not define this. */
+	READ_FROM_NVRAM(revision, "sromrev", buf);
+	if (nvram_getenv("il0macaddr", buf, sizeof(buf)) >= 0)
+		nvram_parse_macaddr(buf, sprom->il0mac);
+	if (nvram_getenv("et0macaddr", buf, sizeof(buf)) >= 0)
+		nvram_parse_macaddr(buf, sprom->et0mac);
+	if (nvram_getenv("et1macaddr", buf, sizeof(buf)) >= 0)
+		nvram_parse_macaddr(buf, sprom->et1mac);
+	READ_FROM_NVRAM(et0phyaddr, "et0phyaddr", buf);
+	READ_FROM_NVRAM(et1phyaddr, "et1phyaddr", buf);
+	READ_FROM_NVRAM(et0mdcport, "et0mdcport", buf);
+	READ_FROM_NVRAM(et1mdcport, "et1mdcport", buf);
+	READ_FROM_NVRAM(board_rev, "boardrev", buf);
+	READ_FROM_NVRAM(country_code, "ccode", buf);
+	READ_FROM_NVRAM(ant_available_a, "aa5g", buf);
+	READ_FROM_NVRAM(ant_available_bg, "aa2g", buf);
+	READ_FROM_NVRAM(pa0b0, "pa0b0", buf);
+	READ_FROM_NVRAM(pa0b1, "pa0b1", buf);
+	READ_FROM_NVRAM(pa0b2, "pa0b2", buf);
+	READ_FROM_NVRAM(pa1b0, "pa1b0", buf);
+	READ_FROM_NVRAM(pa1b1, "pa1b1", buf);
+	READ_FROM_NVRAM(pa1b2, "pa1b2", buf);
+	READ_FROM_NVRAM(pa1lob0, "pa1lob0", buf);
+	READ_FROM_NVRAM(pa1lob2, "pa1lob1", buf);
+	READ_FROM_NVRAM(pa1lob1, "pa1lob2", buf);
+	READ_FROM_NVRAM(pa1hib0, "pa1hib0", buf);
+	READ_FROM_NVRAM(pa1hib2, "pa1hib1", buf);
+	READ_FROM_NVRAM(pa1hib1, "pa1hib2", buf);
+	READ_FROM_NVRAM(gpio0, "wl0gpio0", buf);
+	READ_FROM_NVRAM(gpio1, "wl0gpio1", buf);
+	READ_FROM_NVRAM(gpio2, "wl0gpio2", buf);
+	READ_FROM_NVRAM(gpio3, "wl0gpio3", buf);
+	READ_FROM_NVRAM(maxpwr_bg, "pa0maxpwr", buf);
+	READ_FROM_NVRAM(maxpwr_al, "pa1lomaxpwr", buf);
+	READ_FROM_NVRAM(maxpwr_a, "pa1maxpwr", buf);
+	READ_FROM_NVRAM(maxpwr_ah, "pa1himaxpwr", buf);
+	READ_FROM_NVRAM(itssi_a, "pa1itssit", buf);
+	READ_FROM_NVRAM(itssi_bg, "pa0itssit", buf);
+	READ_FROM_NVRAM(tri2g, "tri2g", buf);
+	READ_FROM_NVRAM(tri5gl, "tri5gl", buf);
+	READ_FROM_NVRAM(tri5g, "tri5g", buf);
+	READ_FROM_NVRAM(tri5gh, "tri5gh", buf);
+	READ_FROM_NVRAM(rxpo2g, "rxpo2g", buf);
+	READ_FROM_NVRAM(rxpo5g, "rxpo5g", buf);
+	READ_FROM_NVRAM(rssisav2g, "rssisav2g", buf);
+	READ_FROM_NVRAM(rssismc2g, "rssismc2g", buf);
+	READ_FROM_NVRAM(rssismf2g, "rssismf2g", buf);
+	READ_FROM_NVRAM(bxa2g, "bxa2g", buf);
+	READ_FROM_NVRAM(rssisav5g, "rssisav5g", buf);
+	READ_FROM_NVRAM(rssismc5g, "rssismc5g", buf);
+	READ_FROM_NVRAM(rssismf5g, "rssismf5g", buf);
+	READ_FROM_NVRAM(bxa5g, "bxa5g", buf);
+	READ_FROM_NVRAM(cck2gpo, "cck2gpo", buf);
+	READ_FROM_NVRAM(ofdm2gpo, "ofdm2gpo", buf);
+	READ_FROM_NVRAM(ofdm5glpo, "ofdm5glpo", buf);
+	READ_FROM_NVRAM(ofdm5gpo, "ofdm5gpo", buf);
+	READ_FROM_NVRAM(ofdm5ghpo, "ofdm5ghpo", buf);
+
+	if (nvram_getenv("boardflags", buf, sizeof(buf)) >= 0) {
+		boardflags = simple_strtoul(buf, NULL, 0);
+		if (boardflags) {
+			sprom->boardflags_lo = (boardflags & 0x0000FFFFU);
+			sprom->boardflags_hi = (boardflags & 0xFFFF0000U) >> 16;
+		}
 	}
 	}
-
-	for (;;) {
-		dest[i++] = (char) simple_strtoul(str, NULL, 16);
-		str += 2;
-		if (!*str++ || i == 6)
-			break;
+	if (nvram_getenv("boardflags2", buf, sizeof(buf)) >= 0) {
+		boardflags = simple_strtoul(buf, NULL, 0);
+		if (boardflags) {
+			sprom->boardflags2_lo = (boardflags & 0x0000FFFFU);
+			sprom->boardflags2_hi = (boardflags & 0xFFFF0000U) >> 16;
+		}
 	}
 	}
 }
 }
 
 
 static int bcm47xx_get_invariants(struct ssb_bus *bus,
 static int bcm47xx_get_invariants(struct ssb_bus *bus,
 				   struct ssb_init_invariants *iv)
 				   struct ssb_init_invariants *iv)
 {
 {
-	char buf[100];
+	char buf[20];
 
 
 	/* Fill boardinfo structure */
 	/* Fill boardinfo structure */
 	memset(&(iv->boardinfo), 0 , sizeof(struct ssb_boardinfo));
 	memset(&(iv->boardinfo), 0 , sizeof(struct ssb_boardinfo));
 
 
-	if (cfe_getenv("boardvendor", buf, sizeof(buf)) >= 0 ||
-	    nvram_getenv("boardvendor", buf, sizeof(buf)) >= 0)
-		iv->boardinfo.type = (u16)simple_strtoul(buf, NULL, 0);
-	if (cfe_getenv("boardtype", buf, sizeof(buf)) >= 0 ||
-	    nvram_getenv("boardtype", buf, sizeof(buf)) >= 0)
+	if (nvram_getenv("boardvendor", buf, sizeof(buf)) >= 0)
+		iv->boardinfo.vendor = (u16)simple_strtoul(buf, NULL, 0);
+	else
+		iv->boardinfo.vendor = SSB_BOARDVENDOR_BCM;
+	if (nvram_getenv("boardtype", buf, sizeof(buf)) >= 0)
 		iv->boardinfo.type = (u16)simple_strtoul(buf, NULL, 0);
 		iv->boardinfo.type = (u16)simple_strtoul(buf, NULL, 0);
-	if (cfe_getenv("boardrev", buf, sizeof(buf)) >= 0 ||
-	    nvram_getenv("boardrev", buf, sizeof(buf)) >= 0)
+	if (nvram_getenv("boardrev", buf, sizeof(buf)) >= 0)
 		iv->boardinfo.rev = (u16)simple_strtoul(buf, NULL, 0);
 		iv->boardinfo.rev = (u16)simple_strtoul(buf, NULL, 0);
 
 
-	/* Fill sprom structure */
-	memset(&(iv->sprom), 0, sizeof(struct ssb_sprom));
-	iv->sprom.revision = 3;
-
-	if (cfe_getenv("et0macaddr", buf, sizeof(buf)) >= 0 ||
-	    nvram_getenv("et0macaddr", buf, sizeof(buf)) >= 0)
-		str2eaddr(buf, iv->sprom.et0mac);
+	bcm47xx_fill_sprom(&iv->sprom);
 
 
-	if (cfe_getenv("et1macaddr", buf, sizeof(buf)) >= 0 ||
-	    nvram_getenv("et1macaddr", buf, sizeof(buf)) >= 0)
-		str2eaddr(buf, iv->sprom.et1mac);
-
-	if (cfe_getenv("et0phyaddr", buf, sizeof(buf)) >= 0 ||
-	    nvram_getenv("et0phyaddr", buf, sizeof(buf)) >= 0)
-		iv->sprom.et0phyaddr = simple_strtoul(buf, NULL, 0);
-
-	if (cfe_getenv("et1phyaddr", buf, sizeof(buf)) >= 0 ||
-	    nvram_getenv("et1phyaddr", buf, sizeof(buf)) >= 0)
-		iv->sprom.et1phyaddr = simple_strtoul(buf, NULL, 0);
-
-	if (cfe_getenv("et0mdcport", buf, sizeof(buf)) >= 0 ||
-	    nvram_getenv("et0mdcport", buf, sizeof(buf)) >= 0)
-		iv->sprom.et0mdcport = simple_strtoul(buf, NULL, 10);
-
-	if (cfe_getenv("et1mdcport", buf, sizeof(buf)) >= 0 ||
-	    nvram_getenv("et1mdcport", buf, sizeof(buf)) >= 0)
-		iv->sprom.et1mdcport = simple_strtoul(buf, NULL, 10);
+	if (nvram_getenv("cardbus", buf, sizeof(buf)) >= 0)
+		iv->has_cardbus_slot = !!simple_strtoul(buf, NULL, 10);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -126,12 +169,28 @@ static int bcm47xx_get_invariants(struct ssb_bus *bus,
 void __init plat_mem_setup(void)
 void __init plat_mem_setup(void)
 {
 {
 	int err;
 	int err;
+	char buf[100];
+	struct ssb_mipscore *mcore;
 
 
 	err = ssb_bus_ssbbus_register(&ssb_bcm47xx, SSB_ENUM_BASE,
 	err = ssb_bus_ssbbus_register(&ssb_bcm47xx, SSB_ENUM_BASE,
 				      bcm47xx_get_invariants);
 				      bcm47xx_get_invariants);
 	if (err)
 	if (err)
 		panic("Failed to initialize SSB bus (err %d)\n", err);
 		panic("Failed to initialize SSB bus (err %d)\n", err);
 
 
+	mcore = &ssb_bcm47xx.mipscore;
+	if (nvram_getenv("kernel_args", buf, sizeof(buf)) >= 0) {
+		if (strstr(buf, "console=ttyS1")) {
+			struct ssb_serial_port port;
+
+			printk(KERN_DEBUG "Swapping serial ports!\n");
+			/* swap serial ports */
+			memcpy(&port, &mcore->serial_ports[0], sizeof(port));
+			memcpy(&mcore->serial_ports[0], &mcore->serial_ports[1],
+			       sizeof(port));
+			memcpy(&mcore->serial_ports[1], &port, sizeof(port));
+		}
+	}
+
 	_machine_restart = bcm47xx_machine_restart;
 	_machine_restart = bcm47xx_machine_restart;
 	_machine_halt = bcm47xx_machine_halt;
 	_machine_halt = bcm47xx_machine_halt;
 	pm_power_off = bcm47xx_machine_halt;
 	pm_power_off = bcm47xx_machine_halt;

+ 2 - 2
arch/mips/include/asm/cpu.h

@@ -111,8 +111,8 @@
  * These are the PRID's for when 23:16 == PRID_COMP_BROADCOM
  * These are the PRID's for when 23:16 == PRID_COMP_BROADCOM
  */
  */
 
 
-#define PRID_IMP_BMIPS4KC	0x4000
-#define PRID_IMP_BMIPS32	0x8000
+#define PRID_IMP_BMIPS32_REV4	0x4000
+#define PRID_IMP_BMIPS32_REV8	0x8000
 #define PRID_IMP_BMIPS3300	0x9000
 #define PRID_IMP_BMIPS3300	0x9000
 #define PRID_IMP_BMIPS3300_ALT	0x9100
 #define PRID_IMP_BMIPS3300_ALT	0x9100
 #define PRID_IMP_BMIPS3300_BUG	0x0000
 #define PRID_IMP_BMIPS3300_BUG	0x0000

+ 6 - 2
arch/mips/include/asm/elf.h

@@ -249,7 +249,8 @@ extern struct mips_abi mips_abi_n32;
 
 
 #define SET_PERSONALITY(ex)						\
 #define SET_PERSONALITY(ex)						\
 do {									\
 do {									\
-	set_personality(PER_LINUX);					\
+	if (personality(current->personality) != PER_LINUX)		\
+		set_personality(PER_LINUX);				\
 									\
 									\
 	current->thread.abi = &mips_abi;				\
 	current->thread.abi = &mips_abi;				\
 } while (0)
 } while (0)
@@ -296,6 +297,8 @@ do {									\
 
 
 #define SET_PERSONALITY(ex)						\
 #define SET_PERSONALITY(ex)						\
 do {									\
 do {									\
+	unsigned int p;							\
+									\
 	clear_thread_flag(TIF_32BIT_REGS);				\
 	clear_thread_flag(TIF_32BIT_REGS);				\
 	clear_thread_flag(TIF_32BIT_ADDR);				\
 	clear_thread_flag(TIF_32BIT_ADDR);				\
 									\
 									\
@@ -304,7 +307,8 @@ do {									\
 	else								\
 	else								\
 		current->thread.abi = &mips_abi;			\
 		current->thread.abi = &mips_abi;			\
 									\
 									\
-	if (current->personality != PER_LINUX32)			\
+	p = personality(current->personality);				\
+	if (p != PER_LINUX32 && p != PER_LINUX)				\
 		set_personality(PER_LINUX);				\
 		set_personality(PER_LINUX);				\
 } while (0)
 } while (0)
 
 

+ 10 - 2
arch/mips/include/asm/io.h

@@ -329,10 +329,14 @@ static inline void pfx##write##bwlq(type val,				\
 			"dsrl32	%L0, %L0, 0"			"\n\t"	\
 			"dsrl32	%L0, %L0, 0"			"\n\t"	\
 			"dsll32	%M0, %M0, 0"			"\n\t"	\
 			"dsll32	%M0, %M0, 0"			"\n\t"	\
 			"or	%L0, %L0, %M0"			"\n\t"	\
 			"or	%L0, %L0, %M0"			"\n\t"	\
+			".set	push"				"\n\t"	\
+			".set	noreorder"			"\n\t"	\
+			".set	nomacro"			"\n\t"	\
 			"sd	%L0, %2"			"\n\t"	\
 			"sd	%L0, %2"			"\n\t"	\
+			".set	pop"				"\n\t"	\
 			".set	mips0"				"\n"	\
 			".set	mips0"				"\n"	\
 			: "=r" (__tmp)					\
 			: "=r" (__tmp)					\
-			: "0" (__val), "m" (*__mem));			\
+			: "0" (__val), "R" (*__mem));			\
 		if (irq)						\
 		if (irq)						\
 			local_irq_restore(__flags);			\
 			local_irq_restore(__flags);			\
 	} else								\
 	} else								\
@@ -355,12 +359,16 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem)	\
 			local_irq_save(__flags);			\
 			local_irq_save(__flags);			\
 		__asm__ __volatile__(					\
 		__asm__ __volatile__(					\
 			".set	mips3"		"\t\t# __readq"	"\n\t"	\
 			".set	mips3"		"\t\t# __readq"	"\n\t"	\
+			".set	push"				"\n\t"	\
+			".set	noreorder"			"\n\t"	\
+			".set	nomacro"			"\n\t"	\
 			"ld	%L0, %1"			"\n\t"	\
 			"ld	%L0, %1"			"\n\t"	\
+			".set	pop"				"\n\t"	\
 			"dsra32	%M0, %L0, 0"			"\n\t"	\
 			"dsra32	%M0, %L0, 0"			"\n\t"	\
 			"sll	%L0, %L0, 0"			"\n\t"	\
 			"sll	%L0, %L0, 0"			"\n\t"	\
 			".set	mips0"				"\n"	\
 			".set	mips0"				"\n"	\
 			: "=r" (__val)					\
 			: "=r" (__val)					\
-			: "m" (*__mem));				\
+			: "R" (*__mem));				\
 		if (irq)						\
 		if (irq)						\
 			local_irq_restore(__flags);			\
 			local_irq_restore(__flags);			\
 	} else {							\
 	} else {							\

+ 1 - 2
arch/mips/include/asm/mach-ar7/ar7.h

@@ -201,7 +201,6 @@ static inline void ar7_device_off(u32 bit)
 }
 }
 
 
 int __init ar7_gpio_init(void);
 int __init ar7_gpio_init(void);
-
-int __init ar7_gpio_init(void);
+void __init ar7_init_clocks(void);
 
 
 #endif /* __AR7_H__ */
 #endif /* __AR7_H__ */

+ 7 - 0
arch/mips/include/asm/mach-bcm47xx/nvram.h

@@ -12,6 +12,7 @@
 #define __NVRAM_H
 #define __NVRAM_H
 
 
 #include <linux/types.h>
 #include <linux/types.h>
+#include <linux/kernel.h>
 
 
 struct nvram_header {
 struct nvram_header {
 	u32 magic;
 	u32 magic;
@@ -36,4 +37,10 @@ struct nvram_header {
 
 
 extern int nvram_getenv(char *name, char *val, size_t val_len);
 extern int nvram_getenv(char *name, char *val, size_t val_len);
 
 
+static inline void nvram_parse_macaddr(char *buf, u8 *macaddr)
+{
+	sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &macaddr[0], &macaddr[1],
+	       &macaddr[2], &macaddr[3], &macaddr[4], &macaddr[5]);
+}
+
 #endif
 #endif

+ 2 - 2
arch/mips/jz4740/board-qi_lb60.c

@@ -5,7 +5,7 @@
  *
  *
  * Copyright (c) 2009 Qi Hardware inc.,
  * Copyright (c) 2009 Qi Hardware inc.,
  * Author: Xiangfu Liu <xiangfu@qi-hardware.com>
  * Author: Xiangfu Liu <xiangfu@qi-hardware.com>
- * Copyright 2010, Lars-Petrer Clausen <lars@metafoo.de>
+ * Copyright 2010, Lars-Peter Clausen <lars@metafoo.de>
  *
  *
  * This program is free software; you can redistribute it and/or modify
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 or later
  * it under the terms of the GNU General Public License version 2 or later
@@ -235,7 +235,7 @@ static const unsigned int qi_lb60_keypad_rows[] = {
 	QI_LB60_GPIO_KEYIN(3),
 	QI_LB60_GPIO_KEYIN(3),
 	QI_LB60_GPIO_KEYIN(4),
 	QI_LB60_GPIO_KEYIN(4),
 	QI_LB60_GPIO_KEYIN(5),
 	QI_LB60_GPIO_KEYIN(5),
-	QI_LB60_GPIO_KEYIN(7),
+	QI_LB60_GPIO_KEYIN(6),
 	QI_LB60_GPIO_KEYIN8,
 	QI_LB60_GPIO_KEYIN8,
 };
 };
 
 

+ 1 - 1
arch/mips/jz4740/platform.c

@@ -208,7 +208,7 @@ struct platform_device jz4740_i2s_device = {
 
 
 /* PCM */
 /* PCM */
 struct platform_device jz4740_pcm_device = {
 struct platform_device jz4740_pcm_device = {
-	.name		= "jz4740-pcm",
+	.name		= "jz4740-pcm-audio",
 	.id		= -1,
 	.id		= -1,
 };
 };
 
 

+ 1 - 1
arch/mips/jz4740/prom.c

@@ -23,7 +23,7 @@
 #include <asm/bootinfo.h>
 #include <asm/bootinfo.h>
 #include <asm/mach-jz4740/base.h>
 #include <asm/mach-jz4740/base.h>
 
 
-void jz4740_init_cmdline(int argc, char *argv[])
+static __init void jz4740_init_cmdline(int argc, char *argv[])
 {
 {
 	unsigned int count = COMMAND_LINE_SIZE - 1;
 	unsigned int count = COMMAND_LINE_SIZE - 1;
 	int i;
 	int i;

+ 1 - 1
arch/mips/kernel/cevt-r4k.c

@@ -32,7 +32,7 @@ static int mips_next_event(unsigned long delta,
 	cnt = read_c0_count();
 	cnt = read_c0_count();
 	cnt += delta;
 	cnt += delta;
 	write_c0_compare(cnt);
 	write_c0_compare(cnt);
-	res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0;
+	res = ((int)(read_c0_count() - cnt) >= 0) ? -ETIME : 0;
 	return res;
 	return res;
 }
 }
 
 

+ 2 - 5
arch/mips/kernel/cpu-probe.c

@@ -905,7 +905,8 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
 {
 {
 	decode_configs(c);
 	decode_configs(c);
 	switch (c->processor_id & 0xff00) {
 	switch (c->processor_id & 0xff00) {
-	case PRID_IMP_BMIPS32:
+	case PRID_IMP_BMIPS32_REV4:
+	case PRID_IMP_BMIPS32_REV8:
 		c->cputype = CPU_BMIPS32;
 		c->cputype = CPU_BMIPS32;
 		__cpu_name[cpu] = "Broadcom BMIPS32";
 		__cpu_name[cpu] = "Broadcom BMIPS32";
 		break;
 		break;
@@ -933,10 +934,6 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
 		__cpu_name[cpu] = "Broadcom BMIPS5000";
 		__cpu_name[cpu] = "Broadcom BMIPS5000";
 		c->options |= MIPS_CPU_ULRI;
 		c->options |= MIPS_CPU_ULRI;
 		break;
 		break;
-	case PRID_IMP_BMIPS4KC:
-		c->cputype = CPU_4KC;
-		__cpu_name[cpu] = "MIPS 4Kc";
-		break;
 	}
 	}
 }
 }
 
 

+ 7 - 6
arch/mips/kernel/linux32.c

@@ -251,14 +251,15 @@ SYSCALL_DEFINE5(n32_msgrcv, int, msqid, u32, msgp, size_t, msgsz,
 
 
 SYSCALL_DEFINE1(32_personality, unsigned long, personality)
 SYSCALL_DEFINE1(32_personality, unsigned long, personality)
 {
 {
+	unsigned int p = personality & 0xffffffff;
 	int ret;
 	int ret;
-	personality &= 0xffffffff;
+
 	if (personality(current->personality) == PER_LINUX32 &&
 	if (personality(current->personality) == PER_LINUX32 &&
-	    personality == PER_LINUX)
-		personality = PER_LINUX32;
-	ret = sys_personality(personality);
-	if (ret == PER_LINUX32)
-		ret = PER_LINUX;
+	    personality(p) == PER_LINUX)
+		p = (p & ~PER_MASK) | PER_LINUX32;
+	ret = sys_personality(p);
+	if (ret != -1 && personality(ret) == PER_LINUX32)
+		ret = (ret & ~PER_MASK) | PER_LINUX;
 	return ret;
 	return ret;
 }
 }
 
 

+ 0 - 1
arch/mips/kernel/process.c

@@ -142,7 +142,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
 	childregs->regs[7] = 0;	/* Clear error flag */
 	childregs->regs[7] = 0;	/* Clear error flag */
 
 
 	childregs->regs[2] = 0;	/* Child gets zero as return value */
 	childregs->regs[2] = 0;	/* Child gets zero as return value */
-	regs->regs[2] = p->pid;
 
 
 	if (childregs->cp0_status & ST0_CU0) {
 	if (childregs->cp0_status & ST0_CU0) {
 		childregs->regs[28] = (unsigned long) ti;
 		childregs->regs[28] = (unsigned long) ti;

+ 1 - 1
arch/mips/kernel/prom.c

@@ -100,7 +100,7 @@ void __init device_tree_init(void)
 		return;
 		return;
 
 
 	base = virt_to_phys((void *)initial_boot_params);
 	base = virt_to_phys((void *)initial_boot_params);
-	size = initial_boot_params->totalsize;
+	size = be32_to_cpu(initial_boot_params->totalsize);
 
 
 	/* Before we do anything, lets reserve the dt blob */
 	/* Before we do anything, lets reserve the dt blob */
 	reserve_mem_mach(base, size);
 	reserve_mem_mach(base, size);

+ 1 - 1
arch/mips/kernel/smp-mt.c

@@ -153,7 +153,7 @@ static void __cpuinit vsmp_init_secondary(void)
 {
 {
 	extern int gic_present;
 	extern int gic_present;
 
 
-	/* This is Malta specific: IPI,performance and timer inetrrupts */
+	/* This is Malta specific: IPI,performance and timer interrupts */
 	if (gic_present)
 	if (gic_present)
 		change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
 		change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
 					 STATUSF_IP6 | STATUSF_IP7);
 					 STATUSF_IP6 | STATUSF_IP7);

+ 35 - 9
arch/mips/kernel/traps.c

@@ -83,7 +83,8 @@ extern asmlinkage void handle_mcheck(void);
 extern asmlinkage void handle_reserved(void);
 extern asmlinkage void handle_reserved(void);
 
 
 extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
 extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
-	struct mips_fpu_struct *ctx, int has_fpu);
+				    struct mips_fpu_struct *ctx, int has_fpu,
+				    void *__user *fault_addr);
 
 
 void (*board_be_init)(void);
 void (*board_be_init)(void);
 int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
 int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
@@ -661,12 +662,36 @@ asmlinkage void do_ov(struct pt_regs *regs)
 	force_sig_info(SIGFPE, &info, current);
 	force_sig_info(SIGFPE, &info, current);
 }
 }
 
 
+static int process_fpemu_return(int sig, void __user *fault_addr)
+{
+	if (sig == SIGSEGV || sig == SIGBUS) {
+		struct siginfo si = {0};
+		si.si_addr = fault_addr;
+		si.si_signo = sig;
+		if (sig == SIGSEGV) {
+			if (find_vma(current->mm, (unsigned long)fault_addr))
+				si.si_code = SEGV_ACCERR;
+			else
+				si.si_code = SEGV_MAPERR;
+		} else {
+			si.si_code = BUS_ADRERR;
+		}
+		force_sig_info(sig, &si, current);
+		return 1;
+	} else if (sig) {
+		force_sig(sig, current);
+		return 1;
+	} else {
+		return 0;
+	}
+}
+
 /*
 /*
  * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
  * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
  */
  */
 asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
 asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
 {
 {
-	siginfo_t info;
+	siginfo_t info = {0};
 
 
 	if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), SIGFPE)
 	if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), SIGFPE)
 	    == NOTIFY_STOP)
 	    == NOTIFY_STOP)
@@ -675,6 +700,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
 
 
 	if (fcr31 & FPU_CSR_UNI_X) {
 	if (fcr31 & FPU_CSR_UNI_X) {
 		int sig;
 		int sig;
+		void __user *fault_addr = NULL;
 
 
 		/*
 		/*
 		 * Unimplemented operation exception.  If we've got the full
 		 * Unimplemented operation exception.  If we've got the full
@@ -690,7 +716,8 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
 		lose_fpu(1);
 		lose_fpu(1);
 
 
 		/* Run the emulator */
 		/* Run the emulator */
-		sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1);
+		sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
+					       &fault_addr);
 
 
 		/*
 		/*
 		 * We can't allow the emulated instruction to leave any of
 		 * We can't allow the emulated instruction to leave any of
@@ -702,8 +729,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
 		own_fpu(1);	/* Using the FPU again.  */
 		own_fpu(1);	/* Using the FPU again.  */
 
 
 		/* If something went wrong, signal */
 		/* If something went wrong, signal */
-		if (sig)
-			force_sig(sig, current);
+		process_fpemu_return(sig, fault_addr);
 
 
 		return;
 		return;
 	} else if (fcr31 & FPU_CSR_INV_X)
 	} else if (fcr31 & FPU_CSR_INV_X)
@@ -996,11 +1022,11 @@ asmlinkage void do_cpu(struct pt_regs *regs)
 
 
 		if (!raw_cpu_has_fpu) {
 		if (!raw_cpu_has_fpu) {
 			int sig;
 			int sig;
+			void __user *fault_addr = NULL;
 			sig = fpu_emulator_cop1Handler(regs,
 			sig = fpu_emulator_cop1Handler(regs,
-						&current->thread.fpu, 0);
-			if (sig)
-				force_sig(sig, current);
-			else
+						       &current->thread.fpu,
+						       0, &fault_addr);
+			if (!process_fpemu_return(sig, fault_addr))
 				mt_ase_fp_affinity();
 				mt_ase_fp_affinity();
 		}
 		}
 
 

+ 6 - 8
arch/mips/kernel/vpe.c

@@ -1092,6 +1092,10 @@ static int vpe_open(struct inode *inode, struct file *filp)
 
 
 	/* this of-course trashes what was there before... */
 	/* this of-course trashes what was there before... */
 	v->pbuffer = vmalloc(P_SIZE);
 	v->pbuffer = vmalloc(P_SIZE);
+	if (!v->pbuffer) {
+		pr_warning("VPE loader: unable to allocate memory\n");
+		return -ENOMEM;
+	}
 	v->plen = P_SIZE;
 	v->plen = P_SIZE;
 	v->load_addr = NULL;
 	v->load_addr = NULL;
 	v->len = 0;
 	v->len = 0;
@@ -1149,10 +1153,9 @@ static int vpe_release(struct inode *inode, struct file *filp)
 	if (ret < 0)
 	if (ret < 0)
 		v->shared_ptr = NULL;
 		v->shared_ptr = NULL;
 
 
-	// cleanup any temp buffers
-	if (v->pbuffer)
-		vfree(v->pbuffer);
+	vfree(v->pbuffer);
 	v->plen = 0;
 	v->plen = 0;
+
 	return ret;
 	return ret;
 }
 }
 
 
@@ -1169,11 +1172,6 @@ static ssize_t vpe_write(struct file *file, const char __user * buffer,
 	if (v == NULL)
 	if (v == NULL)
 		return -ENODEV;
 		return -ENODEV;
 
 
-	if (v->pbuffer == NULL) {
-		printk(KERN_ERR "VPE loader: no buffer for program\n");
-		return -ENOMEM;
-	}
-
 	if ((count + v->len) > v->plen) {
 	if ((count + v->len) > v->plen) {
 		printk(KERN_WARNING
 		printk(KERN_WARNING
 		       "VPE loader: elf size too big. Perhaps strip uneeded symbols\n");
 		       "VPE loader: elf size too big. Perhaps strip uneeded symbols\n");

+ 2 - 2
arch/mips/lib/memset.S

@@ -161,16 +161,16 @@ FEXPORT(__bzero)
 
 
 .Lfwd_fixup:
 .Lfwd_fixup:
 	PTR_L		t0, TI_TASK($28)
 	PTR_L		t0, TI_TASK($28)
-	LONG_L		t0, THREAD_BUADDR(t0)
 	andi		a2, 0x3f
 	andi		a2, 0x3f
+	LONG_L		t0, THREAD_BUADDR(t0)
 	LONG_ADDU	a2, t1
 	LONG_ADDU	a2, t1
 	jr		ra
 	jr		ra
 	 LONG_SUBU	a2, t0
 	 LONG_SUBU	a2, t0
 
 
 .Lpartial_fixup:
 .Lpartial_fixup:
 	PTR_L		t0, TI_TASK($28)
 	PTR_L		t0, TI_TASK($28)
-	LONG_L		t0, THREAD_BUADDR(t0)
 	andi		a2, LONGMASK
 	andi		a2, LONGMASK
+	LONG_L		t0, THREAD_BUADDR(t0)
 	LONG_ADDU	a2, t1
 	LONG_ADDU	a2, t1
 	jr		ra
 	jr		ra
 	 LONG_SUBU	a2, t0
 	 LONG_SUBU	a2, t0

+ 2 - 2
arch/mips/loongson/common/env.c

@@ -29,9 +29,9 @@ unsigned long memsize, highmemsize;
 
 
 #define parse_even_earlier(res, option, p)				\
 #define parse_even_earlier(res, option, p)				\
 do {									\
 do {									\
+	int ret;							\
 	if (strncmp(option, (char *)p, strlen(option)) == 0)		\
 	if (strncmp(option, (char *)p, strlen(option)) == 0)		\
-			strict_strtol((char *)p + strlen(option"="),	\
-					10, &res);			\
+		ret = strict_strtol((char *)p + strlen(option"="), 10, &res); \
 } while (0)
 } while (0)
 
 
 void __init prom_init_env(void)
 void __init prom_init_env(void)

+ 95 - 21
arch/mips/math-emu/cp1emu.c

@@ -64,7 +64,7 @@ static int fpu_emu(struct pt_regs *, struct mips_fpu_struct *,
 
 
 #if __mips >= 4 && __mips != 32
 #if __mips >= 4 && __mips != 32
 static int fpux_emu(struct pt_regs *,
 static int fpux_emu(struct pt_regs *,
-	struct mips_fpu_struct *, mips_instruction);
+	struct mips_fpu_struct *, mips_instruction, void *__user *);
 #endif
 #endif
 
 
 /* Further private data for which no space exists in mips_fpu_struct */
 /* Further private data for which no space exists in mips_fpu_struct */
@@ -208,16 +208,23 @@ static inline int cop1_64bit(struct pt_regs *xcp)
  * Two instructions if the instruction is in a branch delay slot.
  * Two instructions if the instruction is in a branch delay slot.
  */
  */
 
 
-static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx)
+static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
+		       void *__user *fault_addr)
 {
 {
 	mips_instruction ir;
 	mips_instruction ir;
 	unsigned long emulpc, contpc;
 	unsigned long emulpc, contpc;
 	unsigned int cond;
 	unsigned int cond;
 
 
-	if (get_user(ir, (mips_instruction __user *) xcp->cp0_epc)) {
+	if (!access_ok(VERIFY_READ, xcp->cp0_epc, sizeof(mips_instruction))) {
 		MIPS_FPU_EMU_INC_STATS(errors);
 		MIPS_FPU_EMU_INC_STATS(errors);
+		*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
 		return SIGBUS;
 		return SIGBUS;
 	}
 	}
+	if (__get_user(ir, (mips_instruction __user *) xcp->cp0_epc)) {
+		MIPS_FPU_EMU_INC_STATS(errors);
+		*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
+		return SIGSEGV;
+	}
 
 
 	/* XXX NEC Vr54xx bug workaround */
 	/* XXX NEC Vr54xx bug workaround */
 	if ((xcp->cp0_cause & CAUSEF_BD) && !isBranchInstr(&ir))
 	if ((xcp->cp0_cause & CAUSEF_BD) && !isBranchInstr(&ir))
@@ -245,10 +252,16 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx)
 #endif
 #endif
 			return SIGILL;
 			return SIGILL;
 		}
 		}
-		if (get_user(ir, (mips_instruction __user *) emulpc)) {
+		if (!access_ok(VERIFY_READ, emulpc, sizeof(mips_instruction))) {
 			MIPS_FPU_EMU_INC_STATS(errors);
 			MIPS_FPU_EMU_INC_STATS(errors);
+			*fault_addr = (mips_instruction __user *)emulpc;
 			return SIGBUS;
 			return SIGBUS;
 		}
 		}
+		if (__get_user(ir, (mips_instruction __user *) emulpc)) {
+			MIPS_FPU_EMU_INC_STATS(errors);
+			*fault_addr = (mips_instruction __user *)emulpc;
+			return SIGSEGV;
+		}
 		/* __compute_return_epc() will have updated cp0_epc */
 		/* __compute_return_epc() will have updated cp0_epc */
 		contpc = xcp->cp0_epc;
 		contpc = xcp->cp0_epc;
 		/* In order not to confuse ptrace() et al, tweak context */
 		/* In order not to confuse ptrace() et al, tweak context */
@@ -269,10 +282,17 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx)
 		u64 val;
 		u64 val;
 
 
 		MIPS_FPU_EMU_INC_STATS(loads);
 		MIPS_FPU_EMU_INC_STATS(loads);
-		if (get_user(val, va)) {
+
+		if (!access_ok(VERIFY_READ, va, sizeof(u64))) {
 			MIPS_FPU_EMU_INC_STATS(errors);
 			MIPS_FPU_EMU_INC_STATS(errors);
+			*fault_addr = va;
 			return SIGBUS;
 			return SIGBUS;
 		}
 		}
+		if (__get_user(val, va)) {
+			MIPS_FPU_EMU_INC_STATS(errors);
+			*fault_addr = va;
+			return SIGSEGV;
+		}
 		DITOREG(val, MIPSInst_RT(ir));
 		DITOREG(val, MIPSInst_RT(ir));
 		break;
 		break;
 	}
 	}
@@ -284,10 +304,16 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx)
 
 
 		MIPS_FPU_EMU_INC_STATS(stores);
 		MIPS_FPU_EMU_INC_STATS(stores);
 		DIFROMREG(val, MIPSInst_RT(ir));
 		DIFROMREG(val, MIPSInst_RT(ir));
-		if (put_user(val, va)) {
+		if (!access_ok(VERIFY_WRITE, va, sizeof(u64))) {
 			MIPS_FPU_EMU_INC_STATS(errors);
 			MIPS_FPU_EMU_INC_STATS(errors);
+			*fault_addr = va;
 			return SIGBUS;
 			return SIGBUS;
 		}
 		}
+		if (__put_user(val, va)) {
+			MIPS_FPU_EMU_INC_STATS(errors);
+			*fault_addr = va;
+			return SIGSEGV;
+		}
 		break;
 		break;
 	}
 	}
 
 
@@ -297,10 +323,16 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx)
 		u32 val;
 		u32 val;
 
 
 		MIPS_FPU_EMU_INC_STATS(loads);
 		MIPS_FPU_EMU_INC_STATS(loads);
-		if (get_user(val, va)) {
+		if (!access_ok(VERIFY_READ, va, sizeof(u32))) {
 			MIPS_FPU_EMU_INC_STATS(errors);
 			MIPS_FPU_EMU_INC_STATS(errors);
+			*fault_addr = va;
 			return SIGBUS;
 			return SIGBUS;
 		}
 		}
+		if (__get_user(val, va)) {
+			MIPS_FPU_EMU_INC_STATS(errors);
+			*fault_addr = va;
+			return SIGSEGV;
+		}
 		SITOREG(val, MIPSInst_RT(ir));
 		SITOREG(val, MIPSInst_RT(ir));
 		break;
 		break;
 	}
 	}
@@ -312,10 +344,16 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx)
 
 
 		MIPS_FPU_EMU_INC_STATS(stores);
 		MIPS_FPU_EMU_INC_STATS(stores);
 		SIFROMREG(val, MIPSInst_RT(ir));
 		SIFROMREG(val, MIPSInst_RT(ir));
-		if (put_user(val, va)) {
+		if (!access_ok(VERIFY_WRITE, va, sizeof(u32))) {
 			MIPS_FPU_EMU_INC_STATS(errors);
 			MIPS_FPU_EMU_INC_STATS(errors);
+			*fault_addr = va;
 			return SIGBUS;
 			return SIGBUS;
 		}
 		}
+		if (__put_user(val, va)) {
+			MIPS_FPU_EMU_INC_STATS(errors);
+			*fault_addr = va;
+			return SIGSEGV;
+		}
 		break;
 		break;
 	}
 	}
 
 
@@ -440,11 +478,18 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx)
 				contpc = (xcp->cp0_epc +
 				contpc = (xcp->cp0_epc +
 					(MIPSInst_SIMM(ir) << 2));
 					(MIPSInst_SIMM(ir) << 2));
 
 
-				if (get_user(ir,
-				    (mips_instruction __user *) xcp->cp0_epc)) {
+				if (!access_ok(VERIFY_READ, xcp->cp0_epc,
+					       sizeof(mips_instruction))) {
 					MIPS_FPU_EMU_INC_STATS(errors);
 					MIPS_FPU_EMU_INC_STATS(errors);
+					*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
 					return SIGBUS;
 					return SIGBUS;
 				}
 				}
+				if (__get_user(ir,
+				    (mips_instruction __user *) xcp->cp0_epc)) {
+					MIPS_FPU_EMU_INC_STATS(errors);
+					*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
+					return SIGSEGV;
+				}
 
 
 				switch (MIPSInst_OPCODE(ir)) {
 				switch (MIPSInst_OPCODE(ir)) {
 				case lwc1_op:
 				case lwc1_op:
@@ -506,9 +551,8 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx)
 
 
 #if __mips >= 4 && __mips != 32
 #if __mips >= 4 && __mips != 32
 	case cop1x_op:{
 	case cop1x_op:{
-		int sig;
-
-		if ((sig = fpux_emu(xcp, ctx, ir)))
+		int sig = fpux_emu(xcp, ctx, ir, fault_addr);
+		if (sig)
 			return sig;
 			return sig;
 		break;
 		break;
 	}
 	}
@@ -604,7 +648,7 @@ DEF3OP(nmadd, dp, ieee754dp_mul, ieee754dp_add, ieee754dp_neg);
 DEF3OP(nmsub, dp, ieee754dp_mul, ieee754dp_sub, ieee754dp_neg);
 DEF3OP(nmsub, dp, ieee754dp_mul, ieee754dp_sub, ieee754dp_neg);
 
 
 static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
 static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
-	mips_instruction ir)
+	mips_instruction ir, void *__user *fault_addr)
 {
 {
 	unsigned rcsr = 0;	/* resulting csr */
 	unsigned rcsr = 0;	/* resulting csr */
 
 
@@ -624,10 +668,16 @@ static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
 				xcp->regs[MIPSInst_FT(ir)]);
 				xcp->regs[MIPSInst_FT(ir)]);
 
 
 			MIPS_FPU_EMU_INC_STATS(loads);
 			MIPS_FPU_EMU_INC_STATS(loads);
-			if (get_user(val, va)) {
+			if (!access_ok(VERIFY_READ, va, sizeof(u32))) {
 				MIPS_FPU_EMU_INC_STATS(errors);
 				MIPS_FPU_EMU_INC_STATS(errors);
+				*fault_addr = va;
 				return SIGBUS;
 				return SIGBUS;
 			}
 			}
+			if (__get_user(val, va)) {
+				MIPS_FPU_EMU_INC_STATS(errors);
+				*fault_addr = va;
+				return SIGSEGV;
+			}
 			SITOREG(val, MIPSInst_FD(ir));
 			SITOREG(val, MIPSInst_FD(ir));
 			break;
 			break;
 
 
@@ -638,10 +688,16 @@ static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
 			MIPS_FPU_EMU_INC_STATS(stores);
 			MIPS_FPU_EMU_INC_STATS(stores);
 
 
 			SIFROMREG(val, MIPSInst_FS(ir));
 			SIFROMREG(val, MIPSInst_FS(ir));
-			if (put_user(val, va)) {
+			if (!access_ok(VERIFY_WRITE, va, sizeof(u32))) {
 				MIPS_FPU_EMU_INC_STATS(errors);
 				MIPS_FPU_EMU_INC_STATS(errors);
+				*fault_addr = va;
 				return SIGBUS;
 				return SIGBUS;
 			}
 			}
+			if (put_user(val, va)) {
+				MIPS_FPU_EMU_INC_STATS(errors);
+				*fault_addr = va;
+				return SIGSEGV;
+			}
 			break;
 			break;
 
 
 		case madd_s_op:
 		case madd_s_op:
@@ -701,10 +757,16 @@ static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
 				xcp->regs[MIPSInst_FT(ir)]);
 				xcp->regs[MIPSInst_FT(ir)]);
 
 
 			MIPS_FPU_EMU_INC_STATS(loads);
 			MIPS_FPU_EMU_INC_STATS(loads);
-			if (get_user(val, va)) {
+			if (!access_ok(VERIFY_READ, va, sizeof(u64))) {
 				MIPS_FPU_EMU_INC_STATS(errors);
 				MIPS_FPU_EMU_INC_STATS(errors);
+				*fault_addr = va;
 				return SIGBUS;
 				return SIGBUS;
 			}
 			}
+			if (__get_user(val, va)) {
+				MIPS_FPU_EMU_INC_STATS(errors);
+				*fault_addr = va;
+				return SIGSEGV;
+			}
 			DITOREG(val, MIPSInst_FD(ir));
 			DITOREG(val, MIPSInst_FD(ir));
 			break;
 			break;
 
 
@@ -714,10 +776,16 @@ static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
 
 
 			MIPS_FPU_EMU_INC_STATS(stores);
 			MIPS_FPU_EMU_INC_STATS(stores);
 			DIFROMREG(val, MIPSInst_FS(ir));
 			DIFROMREG(val, MIPSInst_FS(ir));
-			if (put_user(val, va)) {
+			if (!access_ok(VERIFY_WRITE, va, sizeof(u64))) {
 				MIPS_FPU_EMU_INC_STATS(errors);
 				MIPS_FPU_EMU_INC_STATS(errors);
+				*fault_addr = va;
 				return SIGBUS;
 				return SIGBUS;
 			}
 			}
+			if (__put_user(val, va)) {
+				MIPS_FPU_EMU_INC_STATS(errors);
+				*fault_addr = va;
+				return SIGSEGV;
+			}
 			break;
 			break;
 
 
 		case madd_d_op:
 		case madd_d_op:
@@ -1242,7 +1310,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
 }
 }
 
 
 int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
 int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
-	int has_fpu)
+	int has_fpu, void *__user *fault_addr)
 {
 {
 	unsigned long oldepc, prevepc;
 	unsigned long oldepc, prevepc;
 	mips_instruction insn;
 	mips_instruction insn;
@@ -1252,10 +1320,16 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
 	do {
 	do {
 		prevepc = xcp->cp0_epc;
 		prevepc = xcp->cp0_epc;
 
 
-		if (get_user(insn, (mips_instruction __user *) xcp->cp0_epc)) {
+		if (!access_ok(VERIFY_READ, xcp->cp0_epc, sizeof(mips_instruction))) {
 			MIPS_FPU_EMU_INC_STATS(errors);
 			MIPS_FPU_EMU_INC_STATS(errors);
+			*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
 			return SIGBUS;
 			return SIGBUS;
 		}
 		}
+		if (__get_user(insn, (mips_instruction __user *) xcp->cp0_epc)) {
+			MIPS_FPU_EMU_INC_STATS(errors);
+			*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
+			return SIGSEGV;
+		}
 		if (insn == 0)
 		if (insn == 0)
 			xcp->cp0_epc += 4;	/* skip nops */
 			xcp->cp0_epc += 4;	/* skip nops */
 		else {
 		else {
@@ -1267,7 +1341,7 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
 			 */
 			 */
 			/* convert to ieee library modes */
 			/* convert to ieee library modes */
 			ieee754_csr.rm = ieee_rm[ieee754_csr.rm];
 			ieee754_csr.rm = ieee_rm[ieee754_csr.rm];
-			sig = cop1Emulate(xcp, ctx);
+			sig = cop1Emulate(xcp, ctx, fault_addr);
 			/* revert to mips rounding mode */
 			/* revert to mips rounding mode */
 			ieee754_csr.rm = mips_rm[ieee754_csr.rm];
 			ieee754_csr.rm = mips_rm[ieee754_csr.rm];
 		}
 		}

+ 3 - 1
arch/mips/mm/dma-default.c

@@ -288,7 +288,7 @@ int mips_dma_supported(struct device *dev, u64 mask)
 	return plat_dma_supported(dev, mask);
 	return plat_dma_supported(dev, mask);
 }
 }
 
 
-void mips_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 			 enum dma_data_direction direction)
 			 enum dma_data_direction direction)
 {
 {
 	BUG_ON(direction == DMA_NONE);
 	BUG_ON(direction == DMA_NONE);
@@ -298,6 +298,8 @@ void mips_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 		__dma_sync((unsigned long)vaddr, size, direction);
 		__dma_sync((unsigned long)vaddr, size, direction);
 }
 }
 
 
+EXPORT_SYMBOL(dma_cache_sync);
+
 static struct dma_map_ops mips_default_dma_map_ops = {
 static struct dma_map_ops mips_default_dma_map_ops = {
 	.alloc_coherent = mips_dma_alloc_coherent,
 	.alloc_coherent = mips_dma_alloc_coherent,
 	.free_coherent = mips_dma_free_coherent,
 	.free_coherent = mips_dma_free_coherent,

+ 4 - 0
arch/mips/mm/sc-mips.c

@@ -68,6 +68,9 @@ static struct bcache_ops mips_sc_ops = {
  */
  */
 static inline int mips_sc_is_activated(struct cpuinfo_mips *c)
 static inline int mips_sc_is_activated(struct cpuinfo_mips *c)
 {
 {
+	unsigned int config2 = read_c0_config2();
+	unsigned int tmp;
+
 	/* Check the bypass bit (L2B) */
 	/* Check the bypass bit (L2B) */
 	switch (c->cputype) {
 	switch (c->cputype) {
 	case CPU_34K:
 	case CPU_34K:
@@ -83,6 +86,7 @@ static inline int mips_sc_is_activated(struct cpuinfo_mips *c)
 		c->scache.linesz = 2 << tmp;
 		c->scache.linesz = 2 << tmp;
 	else
 	else
 		return 0;
 		return 0;
+	return 1;
 }
 }
 
 
 static inline int __init mips_sc_probe(void)
 static inline int __init mips_sc_probe(void)

+ 10 - 2
arch/mips/pmc-sierra/yosemite/py-console.c

@@ -65,11 +65,15 @@ static unsigned char readb_outer_space(unsigned long long phys)
 
 
 	__asm__ __volatile__ (
 	__asm__ __volatile__ (
 	"	.set	mips3		\n"
 	"	.set	mips3		\n"
+	"	.set	push		\n"
+	"	.set	noreorder	\n"
+	"	.set	nomacro		\n"
 	"	ld	%0, %1		\n"
 	"	ld	%0, %1		\n"
+	"	.set	pop		\n"
 	"	lbu	%0, (%0)	\n"
 	"	lbu	%0, (%0)	\n"
 	"	.set	mips0		\n"
 	"	.set	mips0		\n"
 	: "=r" (res)
 	: "=r" (res)
-	: "m" (vaddr));
+	: "R" (vaddr));
 
 
 	write_c0_status(sr);
 	write_c0_status(sr);
 	ssnop_4();
 	ssnop_4();
@@ -89,11 +93,15 @@ static void writeb_outer_space(unsigned long long phys, unsigned char c)
 
 
 	__asm__ __volatile__ (
 	__asm__ __volatile__ (
 	"	.set	mips3		\n"
 	"	.set	mips3		\n"
+	"	.set	push		\n"
+	"	.set	noreorder	\n"
+	"	.set	nomacro		\n"
 	"	ld	%0, %1		\n"
 	"	ld	%0, %1		\n"
+	"	.set	pop		\n"
 	"	sb	%2, (%0)	\n"
 	"	sb	%2, (%0)	\n"
 	"	.set	mips0		\n"
 	"	.set	mips0		\n"
 	: "=&r" (tmp)
 	: "=&r" (tmp)
-	: "m" (vaddr), "r" (c));
+	: "R" (vaddr), "r" (c));
 
 
 	write_c0_status(sr);
 	write_c0_status(sr);
 	ssnop_4();
 	ssnop_4();

+ 4 - 4
arch/mips/sibyte/swarm/setup.c

@@ -82,7 +82,7 @@ int swarm_be_handler(struct pt_regs *regs, int is_fixup)
 enum swarm_rtc_type {
 enum swarm_rtc_type {
 	RTC_NONE,
 	RTC_NONE,
 	RTC_XICOR,
 	RTC_XICOR,
-	RTC_M4LT81
+	RTC_M41T81,
 };
 };
 
 
 enum swarm_rtc_type swarm_rtc_type;
 enum swarm_rtc_type swarm_rtc_type;
@@ -96,7 +96,7 @@ void read_persistent_clock(struct timespec *ts)
 		sec = xicor_get_time();
 		sec = xicor_get_time();
 		break;
 		break;
 
 
-	case RTC_M4LT81:
+	case RTC_M41T81:
 		sec = m41t81_get_time();
 		sec = m41t81_get_time();
 		break;
 		break;
 
 
@@ -115,7 +115,7 @@ int rtc_mips_set_time(unsigned long sec)
 	case RTC_XICOR:
 	case RTC_XICOR:
 		return xicor_set_time(sec);
 		return xicor_set_time(sec);
 
 
-	case RTC_M4LT81:
+	case RTC_M41T81:
 		return m41t81_set_time(sec);
 		return m41t81_set_time(sec);
 
 
 	case RTC_NONE:
 	case RTC_NONE:
@@ -141,7 +141,7 @@ void __init plat_mem_setup(void)
 	if (xicor_probe())
 	if (xicor_probe())
 		swarm_rtc_type = RTC_XICOR;
 		swarm_rtc_type = RTC_XICOR;
 	if (m41t81_probe())
 	if (m41t81_probe())
-		swarm_rtc_type = RTC_M4LT81;
+		swarm_rtc_type = RTC_M41T81;
 
 
 #ifdef CONFIG_VT
 #ifdef CONFIG_VT
 	screen_info = (struct screen_info) {
 	screen_info = (struct screen_info) {

+ 1 - 1
arch/mn10300/kernel/irq.c

@@ -459,7 +459,7 @@ void migrate_irqs(void)
 			tmp = CROSS_GxICR(irq, new);
 			tmp = CROSS_GxICR(irq, new);
 
 
 			x &= GxICR_LEVEL | GxICR_ENABLE;
 			x &= GxICR_LEVEL | GxICR_ENABLE;
-			if (GxICR(irq) & GxICR_REQUEST) {
+			if (GxICR(irq) & GxICR_REQUEST)
 				x |= GxICR_REQUEST | GxICR_DETECT;
 				x |= GxICR_REQUEST | GxICR_DETECT;
 			CROSS_GxICR(irq, new) = x;
 			CROSS_GxICR(irq, new) = x;
 			tmp = CROSS_GxICR(irq, new);
 			tmp = CROSS_GxICR(irq, new);

+ 3 - 7
arch/mn10300/kernel/time.c

@@ -40,21 +40,17 @@ unsigned long long sched_clock(void)
 		unsigned long long ll;
 		unsigned long long ll;
 		unsigned l[2];
 		unsigned l[2];
 	} tsc64, result;
 	} tsc64, result;
-	unsigned long tsc, tmp;
+	unsigned long tmp;
 	unsigned product[3]; /* 96-bit intermediate value */
 	unsigned product[3]; /* 96-bit intermediate value */
 
 
 	/* cnt32_to_63() is not safe with preemption */
 	/* cnt32_to_63() is not safe with preemption */
 	preempt_disable();
 	preempt_disable();
 
 
-	/* read the TSC value
-	 */
-	tsc = get_cycles();
-
-	/* expand to 64-bits.
+	/* expand the tsc to 64-bits.
 	 * - sched_clock() must be called once a minute or better or the
 	 * - sched_clock() must be called once a minute or better or the
 	 *   following will go horribly wrong - see cnt32_to_63()
 	 *   following will go horribly wrong - see cnt32_to_63()
 	 */
 	 */
-	tsc64.ll = cnt32_to_63(tsc) & 0x7fffffffffffffffULL;
+	tsc64.ll = cnt32_to_63(get_cycles()) & 0x7fffffffffffffffULL;
 
 
 	preempt_enable();
 	preempt_enable();
 
 

+ 1 - 0
arch/powerpc/platforms/52xx/mpc52xx_gpt.c

@@ -63,6 +63,7 @@
 #include <linux/of_gpio.h>
 #include <linux/of_gpio.h>
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
+#include <linux/fs.h>
 #include <linux/watchdog.h>
 #include <linux/watchdog.h>
 #include <linux/miscdevice.h>
 #include <linux/miscdevice.h>
 #include <linux/uaccess.h>
 #include <linux/uaccess.h>

+ 1 - 1
arch/sh/boards/mach-se/7206/irq.c

@@ -140,7 +140,7 @@ void __init init_se7206_IRQ(void)
 	make_se7206_irq(IRQ1_IRQ); /* ATA */
 	make_se7206_irq(IRQ1_IRQ); /* ATA */
 	make_se7206_irq(IRQ3_IRQ); /* SLOT / PCM */
 	make_se7206_irq(IRQ3_IRQ); /* SLOT / PCM */
 
 
-	__raw_writew(__raw_readw(INTC_ICR1) | 0x000b, INTC_ICR); /* ICR1 */
+	__raw_writew(__raw_readw(INTC_ICR1) | 0x000b, INTC_ICR1); /* ICR1 */
 
 
 	/* FPGA System register setup*/
 	/* FPGA System register setup*/
 	__raw_writew(0x0000,INTSTS0); /* Clear INTSTS0 */
 	__raw_writew(0x0000,INTSTS0); /* Clear INTSTS0 */

+ 1 - 1
arch/sh/kernel/cpu/sh2a/clock-sh7201.c

@@ -34,7 +34,7 @@ static const int pfc_divisors[]={1,2,3,4,6,8,12};
 
 
 static void master_clk_init(struct clk *clk)
 static void master_clk_init(struct clk *clk)
 {
 {
-	return 10000000 * PLL2 * pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007];
+	clk->rate = 10000000 * PLL2 * pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007];
 }
 }
 
 
 static struct clk_ops sh7201_master_clk_ops = {
 static struct clk_ops sh7201_master_clk_ops = {

+ 1 - 2
arch/sh/kernel/cpu/sh4/clock-sh4-202.c

@@ -81,8 +81,7 @@ static void shoc_clk_init(struct clk *clk)
 	for (i = 0; i < ARRAY_SIZE(frqcr3_divisors); i++) {
 	for (i = 0; i < ARRAY_SIZE(frqcr3_divisors); i++) {
 		int divisor = frqcr3_divisors[i];
 		int divisor = frqcr3_divisors[i];
 
 
-		if (clk->ops->set_rate(clk, clk->parent->rate /
-						divisor, 0) == 0)
+		if (clk->ops->set_rate(clk, clk->parent->rate / divisor) == 0)
 			break;
 			break;
 	}
 	}
 
 

+ 1 - 1
arch/tile/include/asm/signal.h

@@ -25,7 +25,7 @@
 
 
 #if defined(__KERNEL__) && !defined(__ASSEMBLY__)
 #if defined(__KERNEL__) && !defined(__ASSEMBLY__)
 struct pt_regs;
 struct pt_regs;
-int restore_sigcontext(struct pt_regs *, struct sigcontext __user *, long *);
+int restore_sigcontext(struct pt_regs *, struct sigcontext __user *);
 int setup_sigcontext(struct sigcontext __user *, struct pt_regs *);
 int setup_sigcontext(struct sigcontext __user *, struct pt_regs *);
 void do_signal(struct pt_regs *regs);
 void do_signal(struct pt_regs *regs);
 #endif
 #endif

+ 3 - 3
arch/tile/kernel/compat_signal.c

@@ -290,12 +290,12 @@ long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
 	return ret;
 	return ret;
 }
 }
 
 
+/* The assembly shim for this function arranges to ignore the return value. */
 long compat_sys_rt_sigreturn(struct pt_regs *regs)
 long compat_sys_rt_sigreturn(struct pt_regs *regs)
 {
 {
 	struct compat_rt_sigframe __user *frame =
 	struct compat_rt_sigframe __user *frame =
 		(struct compat_rt_sigframe __user *) compat_ptr(regs->sp);
 		(struct compat_rt_sigframe __user *) compat_ptr(regs->sp);
 	sigset_t set;
 	sigset_t set;
-	long r0;
 
 
 	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
 	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
 		goto badframe;
 		goto badframe;
@@ -308,13 +308,13 @@ long compat_sys_rt_sigreturn(struct pt_regs *regs)
 	recalc_sigpending();
 	recalc_sigpending();
 	spin_unlock_irq(&current->sighand->siglock);
 	spin_unlock_irq(&current->sighand->siglock);
 
 
-	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
+	if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
 		goto badframe;
 		goto badframe;
 
 
 	if (compat_sys_sigaltstack(&frame->uc.uc_stack, NULL, regs) != 0)
 	if (compat_sys_sigaltstack(&frame->uc.uc_stack, NULL, regs) != 0)
 		goto badframe;
 		goto badframe;
 
 
-	return r0;
+	return 0;
 
 
 badframe:
 badframe:
 	force_sig(SIGSEGV, current);
 	force_sig(SIGSEGV, current);

+ 21 - 3
arch/tile/kernel/intvec_32.S

@@ -1342,8 +1342,8 @@ handle_syscall:
 	lw      r20, r20
 	lw      r20, r20
 
 
 	/* Jump to syscall handler. */
 	/* Jump to syscall handler. */
-	jalr    r20; .Lhandle_syscall_link:
-	FEEDBACK_REENTER(handle_syscall)
+	jalr    r20
+.Lhandle_syscall_link: /* value of "lr" after "jalr r20" above */
 
 
 	/*
 	/*
 	 * Write our r0 onto the stack so it gets restored instead
 	 * Write our r0 onto the stack so it gets restored instead
@@ -1352,6 +1352,9 @@ handle_syscall:
 	PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
 	PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
 	sw      r29, r0
 	sw      r29, r0
 
 
+.Lsyscall_sigreturn_skip:
+	FEEDBACK_REENTER(handle_syscall)
+
 	/* Do syscall trace again, if requested. */
 	/* Do syscall trace again, if requested. */
 	lw	r30, r31
 	lw	r30, r31
 	andi    r30, r30, _TIF_SYSCALL_TRACE
 	andi    r30, r30, _TIF_SYSCALL_TRACE
@@ -1536,9 +1539,24 @@ STD_ENTRY_LOCAL(bad_intr)
 	};                                              \
 	};                                              \
 	STD_ENDPROC(_##x)
 	STD_ENDPROC(_##x)
 
 
+/*
+ * Special-case sigreturn to not write r0 to the stack on return.
+ * This is technically more efficient, but it also avoids difficulties
+ * in the 64-bit OS when handling 32-bit compat code, since we must not
+ * sign-extend r0 for the sigreturn return-value case.
+ */
+#define PTREGS_SYSCALL_SIGRETURN(x, reg)                \
+	STD_ENTRY(_##x);                                \
+	addli   lr, lr, .Lsyscall_sigreturn_skip - .Lhandle_syscall_link; \
+	{                                               \
+	 PTREGS_PTR(reg, PTREGS_OFFSET_BASE);           \
+	 j      x                                       \
+	};                                              \
+	STD_ENDPROC(_##x)
+
 PTREGS_SYSCALL(sys_execve, r3)
 PTREGS_SYSCALL(sys_execve, r3)
 PTREGS_SYSCALL(sys_sigaltstack, r2)
 PTREGS_SYSCALL(sys_sigaltstack, r2)
-PTREGS_SYSCALL(sys_rt_sigreturn, r0)
+PTREGS_SYSCALL_SIGRETURN(sys_rt_sigreturn, r0)
 PTREGS_SYSCALL(sys_cmpxchg_badaddr, r1)
 PTREGS_SYSCALL(sys_cmpxchg_badaddr, r1)
 
 
 /* Save additional callee-saves to pt_regs, put address in r4 and jump. */
 /* Save additional callee-saves to pt_regs, put address in r4 and jump. */

+ 8 - 0
arch/tile/kernel/process.c

@@ -211,6 +211,13 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
 	childregs->regs[0] = 0;         /* return value is zero */
 	childregs->regs[0] = 0;         /* return value is zero */
 	childregs->sp = sp;  /* override with new user stack pointer */
 	childregs->sp = sp;  /* override with new user stack pointer */
 
 
+	/*
+	 * If CLONE_SETTLS is set, set "tp" in the new task to "r4",
+	 * which is passed in as arg #5 to sys_clone().
+	 */
+	if (clone_flags & CLONE_SETTLS)
+		childregs->tp = regs->regs[4];
+
 	/*
 	/*
 	 * Copy the callee-saved registers from the passed pt_regs struct
 	 * Copy the callee-saved registers from the passed pt_regs struct
 	 * into the context-switch callee-saved registers area.
 	 * into the context-switch callee-saved registers area.
@@ -539,6 +546,7 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
 	return __switch_to(prev, next, next_current_ksp0(next));
 	return __switch_to(prev, next, next_current_ksp0(next));
 }
 }
 
 
+/* Note there is an implicit fifth argument if (clone_flags & CLONE_SETTLS). */
 SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
 SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
 		void __user *, parent_tidptr, void __user *, child_tidptr,
 		void __user *, parent_tidptr, void __user *, child_tidptr,
 		struct pt_regs *, regs)
 		struct pt_regs *, regs)

+ 4 - 6
arch/tile/kernel/signal.c

@@ -52,7 +52,7 @@ SYSCALL_DEFINE3(sigaltstack, const stack_t __user *, uss,
  */
  */
 
 
 int restore_sigcontext(struct pt_regs *regs,
 int restore_sigcontext(struct pt_regs *regs,
-		       struct sigcontext __user *sc, long *pr0)
+		       struct sigcontext __user *sc)
 {
 {
 	int err = 0;
 	int err = 0;
 	int i;
 	int i;
@@ -75,17 +75,15 @@ int restore_sigcontext(struct pt_regs *regs,
 
 
 	regs->faultnum = INT_SWINT_1_SIGRETURN;
 	regs->faultnum = INT_SWINT_1_SIGRETURN;
 
 
-	err |= __get_user(*pr0, &sc->gregs[0]);
 	return err;
 	return err;
 }
 }
 
 
-/* sigreturn() returns long since it restores r0 in the interrupted code. */
+/* The assembly shim for this function arranges to ignore the return value. */
 SYSCALL_DEFINE1(rt_sigreturn, struct pt_regs *, regs)
 SYSCALL_DEFINE1(rt_sigreturn, struct pt_regs *, regs)
 {
 {
 	struct rt_sigframe __user *frame =
 	struct rt_sigframe __user *frame =
 		(struct rt_sigframe __user *)(regs->sp);
 		(struct rt_sigframe __user *)(regs->sp);
 	sigset_t set;
 	sigset_t set;
-	long r0;
 
 
 	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
 	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
 		goto badframe;
 		goto badframe;
@@ -98,13 +96,13 @@ SYSCALL_DEFINE1(rt_sigreturn, struct pt_regs *, regs)
 	recalc_sigpending();
 	recalc_sigpending();
 	spin_unlock_irq(&current->sighand->siglock);
 	spin_unlock_irq(&current->sighand->siglock);
 
 
-	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
+	if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
 		goto badframe;
 		goto badframe;
 
 
 	if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT)
 	if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT)
 		goto badframe;
 		goto badframe;
 
 
-	return r0;
+	return 0;
 
 
 badframe:
 badframe:
 	force_sig(SIGSEGV, current);
 	force_sig(SIGSEGV, current);

+ 1 - 1
arch/x86/boot/compressed/misc.c

@@ -355,7 +355,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
 	if (heap > 0x3fffffffffffUL)
 	if (heap > 0x3fffffffffffUL)
 		error("Destination address too large");
 		error("Destination address too large");
 #else
 #else
-	if (heap > ((-__PAGE_OFFSET-(512<<20)-1) & 0x7fffffff))
+	if (heap > ((-__PAGE_OFFSET-(128<<20)-1) & 0x7fffffff))
 		error("Destination address too large");
 		error("Destination address too large");
 #endif
 #endif
 #ifndef CONFIG_RELOCATABLE
 #ifndef CONFIG_RELOCATABLE

+ 3 - 0
arch/x86/include/asm/e820.h

@@ -72,6 +72,9 @@ struct e820map {
 #define BIOS_BEGIN		0x000a0000
 #define BIOS_BEGIN		0x000a0000
 #define BIOS_END		0x00100000
 #define BIOS_END		0x00100000
 
 
+#define BIOS_ROM_BASE		0xffe00000
+#define BIOS_ROM_END		0xffffffff
+
 #ifdef __KERNEL__
 #ifdef __KERNEL__
 /* see comment in arch/x86/kernel/e820.c */
 /* see comment in arch/x86/kernel/e820.c */
 extern struct e820map e820;
 extern struct e820map e820;

+ 1 - 1
arch/x86/include/asm/kvm_host.h

@@ -79,7 +79,7 @@
 #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
 #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
 #define KVM_MIN_FREE_MMU_PAGES 5
 #define KVM_MIN_FREE_MMU_PAGES 5
 #define KVM_REFILL_PAGES 25
 #define KVM_REFILL_PAGES 25
-#define KVM_MAX_CPUID_ENTRIES 40
+#define KVM_MAX_CPUID_ENTRIES 80
 #define KVM_NR_FIXED_MTRR_REGION 88
 #define KVM_NR_FIXED_MTRR_REGION 88
 #define KVM_NR_VAR_MTRR 8
 #define KVM_NR_VAR_MTRR 8
 
 

+ 1 - 0
arch/x86/kernel/Makefile

@@ -45,6 +45,7 @@ obj-y			+= pci-dma.o quirks.o i8237.o topology.o kdebugfs.o
 obj-y			+= alternative.o i8253.o pci-nommu.o hw_breakpoint.o
 obj-y			+= alternative.o i8253.o pci-nommu.o hw_breakpoint.o
 obj-y			+= tsc.o io_delay.o rtc.o
 obj-y			+= tsc.o io_delay.o rtc.o
 obj-y			+= pci-iommu_table.o
 obj-y			+= pci-iommu_table.o
+obj-y			+= resource.o
 
 
 obj-$(CONFIG_X86_TRAMPOLINE)	+= trampoline.o
 obj-$(CONFIG_X86_TRAMPOLINE)	+= trampoline.o
 obj-y				+= process.o
 obj-y				+= process.o

+ 8 - 0
arch/x86/kernel/apic/apic.c

@@ -1389,6 +1389,14 @@ void __cpuinit end_local_APIC_setup(void)
 
 
 	setup_apic_nmi_watchdog(NULL);
 	setup_apic_nmi_watchdog(NULL);
 	apic_pm_activate();
 	apic_pm_activate();
+
+	/*
+	 * Now that local APIC setup is completed for BP, configure the fault
+	 * handling for interrupt remapping.
+	 */
+	if (!smp_processor_id() && intr_remapping_enabled)
+		enable_drhd_fault_handling();
+
 }
 }
 
 
 #ifdef CONFIG_X86_X2APIC
 #ifdef CONFIG_X86_X2APIC

+ 2 - 2
arch/x86/kernel/apic/io_apic.c

@@ -2430,13 +2430,12 @@ static void ack_apic_level(struct irq_data *data)
 {
 {
 	struct irq_cfg *cfg = data->chip_data;
 	struct irq_cfg *cfg = data->chip_data;
 	int i, do_unmask_irq = 0, irq = data->irq;
 	int i, do_unmask_irq = 0, irq = data->irq;
-	struct irq_desc *desc = irq_to_desc(irq);
 	unsigned long v;
 	unsigned long v;
 
 
 	irq_complete_move(cfg);
 	irq_complete_move(cfg);
 #ifdef CONFIG_GENERIC_PENDING_IRQ
 #ifdef CONFIG_GENERIC_PENDING_IRQ
 	/* If we are moving the irq we need to mask it */
 	/* If we are moving the irq we need to mask it */
-	if (unlikely(desc->status & IRQ_MOVE_PENDING)) {
+	if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) {
 		do_unmask_irq = 1;
 		do_unmask_irq = 1;
 		mask_ioapic(cfg);
 		mask_ioapic(cfg);
 	}
 	}
@@ -3413,6 +3412,7 @@ dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
 	msg.data |= MSI_DATA_VECTOR(cfg->vector);
 	msg.data |= MSI_DATA_VECTOR(cfg->vector);
 	msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
 	msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
 	msg.address_lo |= MSI_ADDR_DEST_ID(dest);
 	msg.address_lo |= MSI_ADDR_DEST_ID(dest);
+	msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest);
 
 
 	dmar_msi_write(irq, &msg);
 	dmar_msi_write(irq, &msg);
 
 

+ 0 - 7
arch/x86/kernel/apic/probe_64.c

@@ -79,13 +79,6 @@ void __init default_setup_apic_routing(void)
 		/* need to update phys_pkg_id */
 		/* need to update phys_pkg_id */
 		apic->phys_pkg_id = apicid_phys_pkg_id;
 		apic->phys_pkg_id = apicid_phys_pkg_id;
 	}
 	}
-
-	/*
-	 * Now that apic routing model is selected, configure the
-	 * fault handling for intr remapping.
-	 */
-	if (intr_remapping_enabled)
-		enable_drhd_fault_handling();
 }
 }
 
 
 /* Same for both flat and physical. */
 /* Same for both flat and physical. */

+ 7 - 5
arch/x86/kernel/head_32.S

@@ -60,16 +60,18 @@
 #define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
 #define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
 #endif
 #endif
 
 
+/* Number of possible pages in the lowmem region */
+LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
+	
 /* Enough space to fit pagetables for the low memory linear map */
 /* Enough space to fit pagetables for the low memory linear map */
-MAPPING_BEYOND_END = \
-	PAGE_TABLE_SIZE(((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT) << PAGE_SHIFT
+MAPPING_BEYOND_END = PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT
 
 
 /*
 /*
  * Worst-case size of the kernel mapping we need to make:
  * Worst-case size of the kernel mapping we need to make:
- * the worst-case size of the kernel itself, plus the extra we need
- * to map for the linear map.
+ * a relocatable kernel can live anywhere in lowmem, so we need to be able
+ * to map all of lowmem.
  */
  */
-KERNEL_PAGES = (KERNEL_IMAGE_SIZE + MAPPING_BEYOND_END)>>PAGE_SHIFT
+KERNEL_PAGES = LOWMEM_PAGES
 
 
 INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm
 INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm
 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
 RESERVE_BRK(pagetables, INIT_MAP_SIZE)

+ 16 - 10
arch/x86/kernel/hpet.c

@@ -27,6 +27,9 @@
 #define HPET_DEV_FSB_CAP		0x1000
 #define HPET_DEV_FSB_CAP		0x1000
 #define HPET_DEV_PERI_CAP		0x2000
 #define HPET_DEV_PERI_CAP		0x2000
 
 
+#define HPET_MIN_CYCLES			128
+#define HPET_MIN_PROG_DELTA		(HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1))
+
 #define EVT_TO_HPET_DEV(evt) container_of(evt, struct hpet_dev, evt)
 #define EVT_TO_HPET_DEV(evt) container_of(evt, struct hpet_dev, evt)
 
 
 /*
 /*
@@ -299,8 +302,9 @@ static void hpet_legacy_clockevent_register(void)
 	/* Calculate the min / max delta */
 	/* Calculate the min / max delta */
 	hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF,
 	hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF,
 							   &hpet_clockevent);
 							   &hpet_clockevent);
-	/* 5 usec minimum reprogramming delta. */
-	hpet_clockevent.min_delta_ns = 5000;
+	/* Setup minimum reprogramming delta. */
+	hpet_clockevent.min_delta_ns = clockevent_delta2ns(HPET_MIN_PROG_DELTA,
+							   &hpet_clockevent);
 
 
 	/*
 	/*
 	 * Start hpet with the boot cpu mask and make it
 	 * Start hpet with the boot cpu mask and make it
@@ -393,22 +397,24 @@ static int hpet_next_event(unsigned long delta,
 	 * the wraparound into account) nor a simple count down event
 	 * the wraparound into account) nor a simple count down event
 	 * mode. Further the write to the comparator register is
 	 * mode. Further the write to the comparator register is
 	 * delayed internally up to two HPET clock cycles in certain
 	 * delayed internally up to two HPET clock cycles in certain
-	 * chipsets (ATI, ICH9,10). We worked around that by reading
-	 * back the compare register, but that required another
-	 * workaround for ICH9,10 chips where the first readout after
-	 * write can return the old stale value. We already have a
-	 * minimum delta of 5us enforced, but a NMI or SMI hitting
+	 * chipsets (ATI, ICH9,10). Some newer AMD chipsets have even
+	 * longer delays. We worked around that by reading back the
+	 * compare register, but that required another workaround for
+	 * ICH9,10 chips where the first readout after write can
+	 * return the old stale value. We already had a minimum
+	 * programming delta of 5us enforced, but a NMI or SMI hitting
 	 * between the counter readout and the comparator write can
 	 * between the counter readout and the comparator write can
 	 * move us behind that point easily. Now instead of reading
 	 * move us behind that point easily. Now instead of reading
 	 * the compare register back several times, we make the ETIME
 	 * the compare register back several times, we make the ETIME
 	 * decision based on the following: Return ETIME if the
 	 * decision based on the following: Return ETIME if the
-	 * counter value after the write is less than 8 HPET cycles
+	 * counter value after the write is less than HPET_MIN_CYCLES
 	 * away from the event or if the counter is already ahead of
 	 * away from the event or if the counter is already ahead of
-	 * the event.
+	 * the event. The minimum programming delta for the generic
+	 * clockevents code is set to 1.5 * HPET_MIN_CYCLES.
 	 */
 	 */
 	res = (s32)(cnt - hpet_readl(HPET_COUNTER));
 	res = (s32)(cnt - hpet_readl(HPET_COUNTER));
 
 
-	return res < 8 ? -ETIME : 0;
+	return res < HPET_MIN_CYCLES ? -ETIME : 0;
 }
 }
 
 
 static void hpet_legacy_set_mode(enum clock_event_mode mode,
 static void hpet_legacy_set_mode(enum clock_event_mode mode,

+ 5 - 11
arch/x86/kernel/microcode_intel.c

@@ -364,8 +364,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
 
 
 		/* For performance reasons, reuse mc area when possible */
 		/* For performance reasons, reuse mc area when possible */
 		if (!mc || mc_size > curr_mc_size) {
 		if (!mc || mc_size > curr_mc_size) {
-			if (mc)
-				vfree(mc);
+			vfree(mc);
 			mc = vmalloc(mc_size);
 			mc = vmalloc(mc_size);
 			if (!mc)
 			if (!mc)
 				break;
 				break;
@@ -374,13 +373,11 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
 
 
 		if (get_ucode_data(mc, ucode_ptr, mc_size) ||
 		if (get_ucode_data(mc, ucode_ptr, mc_size) ||
 		    microcode_sanity_check(mc) < 0) {
 		    microcode_sanity_check(mc) < 0) {
-			vfree(mc);
 			break;
 			break;
 		}
 		}
 
 
 		if (get_matching_microcode(&uci->cpu_sig, mc, new_rev)) {
 		if (get_matching_microcode(&uci->cpu_sig, mc, new_rev)) {
-			if (new_mc)
-				vfree(new_mc);
+			vfree(new_mc);
 			new_rev = mc_header.rev;
 			new_rev = mc_header.rev;
 			new_mc  = mc;
 			new_mc  = mc;
 			mc = NULL;	/* trigger new vmalloc */
 			mc = NULL;	/* trigger new vmalloc */
@@ -390,12 +387,10 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
 		leftover  -= mc_size;
 		leftover  -= mc_size;
 	}
 	}
 
 
-	if (mc)
-		vfree(mc);
+	vfree(mc);
 
 
 	if (leftover) {
 	if (leftover) {
-		if (new_mc)
-			vfree(new_mc);
+		vfree(new_mc);
 		state = UCODE_ERROR;
 		state = UCODE_ERROR;
 		goto out;
 		goto out;
 	}
 	}
@@ -405,8 +400,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
 		goto out;
 		goto out;
 	}
 	}
 
 
-	if (uci->mc)
-		vfree(uci->mc);
+	vfree(uci->mc);
 	uci->mc = (struct microcode_intel *)new_mc;
 	uci->mc = (struct microcode_intel *)new_mc;
 
 
 	pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
 	pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",

+ 48 - 0
arch/x86/kernel/resource.c

@@ -0,0 +1,48 @@
+#include <linux/ioport.h>
+#include <asm/e820.h>
+
+static void resource_clip(struct resource *res, resource_size_t start,
+			  resource_size_t end)
+{
+	resource_size_t low = 0, high = 0;
+
+	if (res->end < start || res->start > end)
+		return;		/* no conflict */
+
+	if (res->start < start)
+		low = start - res->start;
+
+	if (res->end > end)
+		high = res->end - end;
+
+	/* Keep the area above or below the conflict, whichever is larger */
+	if (low > high)
+		res->end = start - 1;
+	else
+		res->start = end + 1;
+}
+
+static void remove_e820_regions(struct resource *avail)
+{
+	int i;
+	struct e820entry *entry;
+
+	for (i = 0; i < e820.nr_map; i++) {
+		entry = &e820.map[i];
+
+		resource_clip(avail, entry->addr,
+			      entry->addr + entry->size - 1);
+	}
+}
+
+void arch_remove_reservations(struct resource *avail)
+{
+	/* Trim out BIOS areas (low 1MB and high 2MB) and E820 regions */
+	if (avail->flags & IORESOURCE_MEM) {
+		if (avail->start < BIOS_END)
+			avail->start = BIOS_END;
+		resource_clip(avail, BIOS_ROM_BASE, BIOS_ROM_END);
+
+		remove_e820_regions(avail);
+	}
+}

+ 14 - 4
arch/x86/kernel/setup.c

@@ -501,7 +501,18 @@ static inline unsigned long long get_total_mem(void)
 	return total << PAGE_SHIFT;
 	return total << PAGE_SHIFT;
 }
 }
 
 
-#define DEFAULT_BZIMAGE_ADDR_MAX 0x37FFFFFF
+/*
+ * Keep the crash kernel below this limit.  On 32 bits earlier kernels
+ * would limit the kernel to the low 512 MiB due to mapping restrictions.
+ * On 64 bits, kexec-tools currently limits us to 896 MiB; increase this
+ * limit once kexec-tools are fixed.
+ */
+#ifdef CONFIG_X86_32
+# define CRASH_KERNEL_ADDR_MAX	(512 << 20)
+#else
+# define CRASH_KERNEL_ADDR_MAX	(896 << 20)
+#endif
+
 static void __init reserve_crashkernel(void)
 static void __init reserve_crashkernel(void)
 {
 {
 	unsigned long long total_mem;
 	unsigned long long total_mem;
@@ -520,10 +531,10 @@ static void __init reserve_crashkernel(void)
 		const unsigned long long alignment = 16<<20;	/* 16M */
 		const unsigned long long alignment = 16<<20;	/* 16M */
 
 
 		/*
 		/*
-		 *  kexec want bzImage is below DEFAULT_BZIMAGE_ADDR_MAX
+		 *  kexec want bzImage is below CRASH_KERNEL_ADDR_MAX
 		 */
 		 */
 		crash_base = memblock_find_in_range(alignment,
 		crash_base = memblock_find_in_range(alignment,
-			       DEFAULT_BZIMAGE_ADDR_MAX, crash_size, alignment);
+			       CRASH_KERNEL_ADDR_MAX, crash_size, alignment);
 
 
 		if (crash_base == MEMBLOCK_ERROR) {
 		if (crash_base == MEMBLOCK_ERROR) {
 			pr_info("crashkernel reservation failed - No suitable area found.\n");
 			pr_info("crashkernel reservation failed - No suitable area found.\n");
@@ -769,7 +780,6 @@ void __init setup_arch(char **cmdline_p)
 
 
 	x86_init.oem.arch_setup();
 	x86_init.oem.arch_setup();
 
 
-	resource_alloc_from_bottom = 0;
 	iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
 	iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
 	setup_memory_map();
 	setup_memory_map();
 	parse_setup_data();
 	parse_setup_data();

+ 2 - 1
arch/x86/kernel/xsave.c

@@ -394,7 +394,8 @@ static void __init setup_xstate_init(void)
 	 * Setup init_xstate_buf to represent the init state of
 	 * Setup init_xstate_buf to represent the init state of
 	 * all the features managed by the xsave
 	 * all the features managed by the xsave
 	 */
 	 */
-	init_xstate_buf = alloc_bootmem(xstate_size);
+	init_xstate_buf = alloc_bootmem_align(xstate_size,
+					      __alignof__(struct xsave_struct));
 	init_xstate_buf->i387.mxcsr = MXCSR_DEFAULT;
 	init_xstate_buf->i387.mxcsr = MXCSR_DEFAULT;
 
 
 	clts();
 	clts();

+ 2 - 0
arch/x86/kvm/i8259.c

@@ -575,6 +575,8 @@ struct kvm_pic *kvm_create_pic(struct kvm *kvm)
 	s->pics[1].elcr_mask = 0xde;
 	s->pics[1].elcr_mask = 0xde;
 	s->pics[0].pics_state = s;
 	s->pics[0].pics_state = s;
 	s->pics[1].pics_state = s;
 	s->pics[1].pics_state = s;
+	s->pics[0].isr_ack = 0xff;
+	s->pics[1].isr_ack = 0xff;
 
 
 	/*
 	/*
 	 * Initialize PIO device
 	 * Initialize PIO device

+ 2 - 1
arch/x86/kvm/mmu.c

@@ -2394,7 +2394,8 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
 			ASSERT(!VALID_PAGE(root));
 			ASSERT(!VALID_PAGE(root));
 			spin_lock(&vcpu->kvm->mmu_lock);
 			spin_lock(&vcpu->kvm->mmu_lock);
 			kvm_mmu_free_some_pages(vcpu);
 			kvm_mmu_free_some_pages(vcpu);
-			sp = kvm_mmu_get_page(vcpu, i << 30, i << 30,
+			sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
+					      i << 30,
 					      PT32_ROOT_LEVEL, 1, ACC_ALL,
 					      PT32_ROOT_LEVEL, 1, ACC_ALL,
 					      NULL);
 					      NULL);
 			root = __pa(sp->spt);
 			root = __pa(sp->spt);

+ 4 - 0
arch/x86/kvm/svm.c

@@ -3494,6 +3494,10 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
 static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
 static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
 {
 {
 	switch (func) {
 	switch (func) {
+	case 0x00000001:
+		/* Mask out xsave bit as long as it is not supported by SVM */
+		entry->ecx &= ~(bit(X86_FEATURE_XSAVE));
+		break;
 	case 0x80000001:
 	case 0x80000001:
 		if (nested)
 		if (nested)
 			entry->ecx |= (1 << 2); /* Set SVM bit */
 			entry->ecx |= (1 << 2); /* Set SVM bit */

+ 0 - 5
arch/x86/kvm/vmx.c

@@ -4227,11 +4227,6 @@ static int vmx_get_lpage_level(void)
 		return PT_PDPE_LEVEL;
 		return PT_PDPE_LEVEL;
 }
 }
 
 
-static inline u32 bit(int bitno)
-{
-	return 1 << (bitno & 31);
-}
-
 static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
 static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
 {
 {
 	struct kvm_cpuid_entry2 *best;
 	struct kvm_cpuid_entry2 *best;

+ 5 - 6
arch/x86/kvm/x86.c

@@ -155,11 +155,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
 
 
 u64 __read_mostly host_xcr0;
 u64 __read_mostly host_xcr0;
 
 
-static inline u32 bit(int bitno)
-{
-	return 1 << (bitno & 31);
-}
-
 static void kvm_on_user_return(struct user_return_notifier *urn)
 static void kvm_on_user_return(struct user_return_notifier *urn)
 {
 {
 	unsigned slot;
 	unsigned slot;
@@ -4569,9 +4564,11 @@ static void kvm_timer_init(void)
 #ifdef CONFIG_CPU_FREQ
 #ifdef CONFIG_CPU_FREQ
 		struct cpufreq_policy policy;
 		struct cpufreq_policy policy;
 		memset(&policy, 0, sizeof(policy));
 		memset(&policy, 0, sizeof(policy));
-		cpufreq_get_policy(&policy, get_cpu());
+		cpu = get_cpu();
+		cpufreq_get_policy(&policy, cpu);
 		if (policy.cpuinfo.max_freq)
 		if (policy.cpuinfo.max_freq)
 			max_tsc_khz = policy.cpuinfo.max_freq;
 			max_tsc_khz = policy.cpuinfo.max_freq;
+		put_cpu();
 #endif
 #endif
 		cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
 		cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
 					  CPUFREQ_TRANSITION_NOTIFIER);
 					  CPUFREQ_TRANSITION_NOTIFIER);
@@ -5522,6 +5519,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 
 
 	mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
 	mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
 	kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
 	kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
+	if (sregs->cr4 & X86_CR4_OSXSAVE)
+		update_cpuid(vcpu);
 	if (!is_long_mode(vcpu) && is_pae(vcpu)) {
 	if (!is_long_mode(vcpu) && is_pae(vcpu)) {
 		load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3);
 		load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3);
 		mmu_reset_needed = 1;
 		mmu_reset_needed = 1;

+ 5 - 0
arch/x86/kvm/x86.h

@@ -70,6 +70,11 @@ static inline int is_paging(struct kvm_vcpu *vcpu)
 	return kvm_read_cr0_bits(vcpu, X86_CR0_PG);
 	return kvm_read_cr0_bits(vcpu, X86_CR0_PG);
 }
 }
 
 
+static inline u32 bit(int bitno)
+{
+	return 1 << (bitno & 31);
+}
+
 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq);
 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq);

+ 16 - 8
arch/x86/oprofile/op_model_amd.c

@@ -630,21 +630,29 @@ static int __init_ibs_nmi(void)
 	return 0;
 	return 0;
 }
 }
 
 
-/* initialize the APIC for the IBS interrupts if available */
+/*
+ * check and reserve APIC extended interrupt LVT offset for IBS if
+ * available
+ *
+ * init_ibs() preforms implicitly cpu-local operations, so pin this
+ * thread to its current CPU
+ */
+
 static void init_ibs(void)
 static void init_ibs(void)
 {
 {
-	ibs_caps = get_ibs_caps();
+	preempt_disable();
 
 
+	ibs_caps = get_ibs_caps();
 	if (!ibs_caps)
 	if (!ibs_caps)
-		return;
+		goto out;
 
 
-	if (__init_ibs_nmi()) {
+	if (__init_ibs_nmi() < 0)
 		ibs_caps = 0;
 		ibs_caps = 0;
-		return;
-	}
+	else
+		printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps);
 
 
-	printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n",
-	       (unsigned)ibs_caps);
+out:
+	preempt_enable();
 }
 }
 
 
 static int (*create_arch_files)(struct super_block *sb, struct dentry *root);
 static int (*create_arch_files)(struct super_block *sb, struct dentry *root);

+ 5 - 13
arch/x86/pci/i386.c

@@ -65,21 +65,13 @@ pcibios_align_resource(void *data, const struct resource *res,
 			resource_size_t size, resource_size_t align)
 			resource_size_t size, resource_size_t align)
 {
 {
 	struct pci_dev *dev = data;
 	struct pci_dev *dev = data;
-	resource_size_t start = round_down(res->end - size + 1, align);
+	resource_size_t start = res->start;
 
 
 	if (res->flags & IORESOURCE_IO) {
 	if (res->flags & IORESOURCE_IO) {
-
-		/*
-		 * If we're avoiding ISA aliases, the largest contiguous I/O
-		 * port space is 256 bytes.  Clearing bits 9 and 10 preserves
-		 * all 256-byte and smaller alignments, so the result will
-		 * still be correctly aligned.
-		 */
-		if (!skip_isa_ioresource_align(dev))
-			start &= ~0x300;
-	} else if (res->flags & IORESOURCE_MEM) {
-		if (start < BIOS_END)
-			start = res->end;	/* fail; no space */
+		if (skip_isa_ioresource_align(dev))
+			return start;
+		if (start & 0x300)
+			start = (start + 0x3ff) & ~0x3ff;
 	}
 	}
 	return start;
 	return start;
 }
 }

+ 2 - 2
arch/x86/vdso/Makefile

@@ -25,7 +25,7 @@ targets += vdso.so vdso.so.dbg vdso.lds $(vobjs-y)
 
 
 export CPPFLAGS_vdso.lds += -P -C
 export CPPFLAGS_vdso.lds += -P -C
 
 
-VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -Wl,-soname=linux-vdso.so.1 \
+VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
 		      	-Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
 		      	-Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
 
 
 $(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so
 $(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so
@@ -69,7 +69,7 @@ vdso32.so-$(VDSO32-y)		+= sysenter
 vdso32-images			= $(vdso32.so-y:%=vdso32-%.so)
 vdso32-images			= $(vdso32.so-y:%=vdso32-%.so)
 
 
 CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
 CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
-VDSO_LDFLAGS_vdso32.lds = -m elf_i386 -Wl,-soname=linux-gate.so.1
+VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-soname=linux-gate.so.1
 
 
 # This makes sure the $(obj) subdirectory exists even though vdso32/
 # This makes sure the $(obj) subdirectory exists even though vdso32/
 # is not a kbuild sub-make subdirectory.
 # is not a kbuild sub-make subdirectory.

+ 3 - 2
block/blk-map.c

@@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
 	for (i = 0; i < iov_count; i++) {
 	for (i = 0; i < iov_count; i++) {
 		unsigned long uaddr = (unsigned long)iov[i].iov_base;
 		unsigned long uaddr = (unsigned long)iov[i].iov_base;
 
 
+		if (!iov[i].iov_len)
+			return -EINVAL;
+
 		if (uaddr & queue_dma_alignment(q)) {
 		if (uaddr & queue_dma_alignment(q)) {
 			unaligned = 1;
 			unaligned = 1;
 			break;
 			break;
 		}
 		}
-		if (!iov[i].iov_len)
-			return -EINVAL;
 	}
 	}
 
 
 	if (unaligned || (q->dma_pad_mask & len) || map_data)
 	if (unaligned || (q->dma_pad_mask & len) || map_data)

Unele fișiere nu au fost afișate deoarece prea multe fișiere au fost modificate în acest diff