فهرست منبع

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial: (47 commits)
  doc: CONFIG_UNEVICTABLE_LRU doesn't exist anymore
  Update cpuset info & webiste for cgroups
  dcdbas: force SMI to happen when expected
  arch/arm/Kconfig: remove one to many l's in the word.
  asm-generic/user.h: Fix spelling in comment
  drm: fix printk typo 'sracth'
  Remove one to many n's in a word
  Documentation/filesystems/romfs.txt: fixing link to genromfs
  drivers:scsi Change printk typo initate -> initiate
  serial, pch uart: Remove duplicate inclusion of linux/pci.h header
  fs/eventpoll.c: fix spelling
  mm: Fix out-of-date comments which refers non-existent functions
  drm: Fix printk typo 'failled'
  coh901318.c: Change initate to initiate.
  mbox-db5500.c Change initate to initiate.
  edac: correct i82975x error-info reported
  edac: correct i82975x mci initialisation
  edac: correct commented info
  fs: update comments to point correct document
  target: remove duplicate include of target/target_core_device.h from drivers/target/target_core_hba.c
  ...

Trivial conflict in fs/eventpoll.c (spelling vs addition)
Linus Torvalds 14 سال پیش
والد
کامیت
e16b396ce3
83فایلهای تغییر یافته به همراه228 افزوده شده و 214 حذف شده
  1. 9 8
      Documentation/cgroups/cpusets.txt
  2. 3 2
      Documentation/cgroups/memory.txt
  3. 1 1
      Documentation/device-mapper/dm-crypt.txt
  4. 1 2
      Documentation/filesystems/romfs.txt
  5. 1 1
      Documentation/kbuild/kbuild.txt
  6. 2 1
      Documentation/kbuild/makefiles.txt
  7. 48 48
      Documentation/kvm/api.txt
  8. 1 1
      Documentation/sysctl/kernel.txt
  9. 1 2
      Documentation/vm/unevictable-lru.txt
  10. 8 7
      MAINTAINERS
  11. 1 1
      Makefile
  12. 1 1
      arch/alpha/include/asm/cacheflush.h
  13. 1 1
      arch/arm/mach-ux500/mbox-db5500.c
  14. 1 1
      arch/arm/plat-omap/Kconfig
  15. 1 1
      arch/avr32/mm/cache.c
  16. 1 1
      arch/cris/arch-v10/mm/init.c
  17. 1 1
      arch/ia64/include/asm/perfmon.h
  18. 1 1
      arch/x86/kernel/apic/io_apic.c
  19. 1 1
      arch/x86/oprofile/op_model_p4.c
  20. 1 1
      arch/xtensa/configs/s6105_defconfig
  21. 1 1
      drivers/atm/firestream.c
  22. 1 1
      drivers/block/smart1,2.h
  23. 2 2
      drivers/bluetooth/btusb.c
  24. 1 1
      drivers/cpuidle/sysfs.c
  25. 2 2
      drivers/dma/coh901318.c
  26. 1 1
      drivers/dma/shdma.c
  27. 1 1
      drivers/dma/timb_dma.c
  28. 1 1
      drivers/edac/i7300_edac.c
  29. 44 25
      drivers/edac/i82975x_edac.c
  30. 3 1
      drivers/firmware/dcdbas.c
  31. 1 3
      drivers/gpu/drm/drm_sman.c
  32. 1 1
      drivers/gpu/drm/radeon/evergreen.c
  33. 5 5
      drivers/gpu/drm/radeon/r100.c
  34. 2 2
      drivers/gpu/drm/radeon/r300.c
  35. 2 2
      drivers/gpu/drm/radeon/r420.c
  36. 2 2
      drivers/gpu/drm/radeon/r520.c
  37. 1 1
      drivers/gpu/drm/radeon/r600.c
  38. 1 1
      drivers/gpu/drm/radeon/radeon_ring.c
  39. 2 2
      drivers/gpu/drm/radeon/rs400.c
  40. 2 2
      drivers/gpu/drm/radeon/rs600.c
  41. 2 2
      drivers/gpu/drm/radeon/rs690.c
  42. 2 2
      drivers/gpu/drm/radeon/rv515.c
  43. 1 1
      drivers/gpu/drm/radeon/rv770.c
  44. 3 3
      drivers/isdn/mISDN/hwchannel.c
  45. 1 2
      drivers/message/i2o/i2o_config.c
  46. 2 3
      drivers/mtd/nand/mxc_nand.c
  47. 2 2
      drivers/net/atl1c/atl1c.h
  48. 1 1
      drivers/net/qla3xxx.c
  49. 1 1
      drivers/net/sungem.h
  50. 0 1
      drivers/platform/x86/acer-wmi.c
  51. 1 1
      drivers/scsi/aic7xxx/aic79xx.h
  52. 1 1
      drivers/scsi/aic7xxx/aic7xxx.h
  53. 1 1
      drivers/scsi/aic7xxx/aic7xxx_core.c
  54. 2 2
      drivers/scsi/megaraid.c
  55. 1 1
      drivers/scsi/megaraid/megaraid_sas_base.c
  56. 4 6
      drivers/scsi/osst.c
  57. 1 1
      drivers/scsi/qla4xxx/ql4_isr.c
  58. 1 1
      drivers/scsi/qla4xxx/ql4_os.c
  59. 0 1
      drivers/target/target_core_hba.c
  60. 1 1
      drivers/tty/hvc/hvcs.c
  61. 0 1
      drivers/tty/serial/pch_uart.c
  62. 1 1
      drivers/watchdog/sbc_epx_c3.c
  63. 1 1
      fs/btrfs/disk-io.c
  64. 2 2
      fs/dcache.c
  65. 3 3
      fs/direct-io.c
  66. 6 6
      fs/eventpoll.c
  67. 2 2
      fs/ext4/extents.c
  68. 1 1
      fs/fuse/cuse.c
  69. 1 1
      fs/notify/fanotify/fanotify_user.c
  70. 1 1
      fs/notify/inotify/inotify_user.c
  71. 1 1
      fs/ocfs2/dir.c
  72. 2 2
      include/asm-generic/user.h
  73. 1 1
      include/linux/mmzone.h
  74. 3 3
      init/Kconfig
  75. 1 1
      kernel/trace/ring_buffer.c
  76. 3 3
      mm/memory.c
  77. 1 1
      mm/mempolicy.c
  78. 1 1
      mm/shmem.c
  79. 2 2
      net/core/dev_addr_lists.c
  80. 1 1
      net/ipv6/inet6_hashtables.c
  81. 1 1
      net/mac80211/tx.c
  82. 2 2
      sound/pci/au88x0/au88x0.h
  83. 2 2
      sound/pci/au88x0/au88x0_core.c

+ 9 - 8
Documentation/cgroups/cpusets.txt

@@ -693,7 +693,7 @@ There are ways to query or modify cpusets:
  - via the C library libcgroup.
  - via the C library libcgroup.
    (http://sourceforge.net/projects/libcg/)
    (http://sourceforge.net/projects/libcg/)
  - via the python application cset.
  - via the python application cset.
-   (http://developer.novell.com/wiki/index.php/Cpuset)
+   (http://code.google.com/p/cpuset/)
 
 
 The sched_setaffinity calls can also be done at the shell prompt using
 The sched_setaffinity calls can also be done at the shell prompt using
 SGI's runon or Robert Love's taskset.  The mbind and set_mempolicy
 SGI's runon or Robert Love's taskset.  The mbind and set_mempolicy
@@ -725,13 +725,14 @@ Now you want to do something with this cpuset.
 
 
 In this directory you can find several files:
 In this directory you can find several files:
 # ls
 # ls
-cpuset.cpu_exclusive       cpuset.memory_spread_slab
-cpuset.cpus                cpuset.mems
-cpuset.mem_exclusive       cpuset.sched_load_balance
-cpuset.mem_hardwall        cpuset.sched_relax_domain_level
-cpuset.memory_migrate      notify_on_release
-cpuset.memory_pressure     tasks
-cpuset.memory_spread_page
+cgroup.clone_children  cpuset.memory_pressure
+cgroup.event_control   cpuset.memory_spread_page
+cgroup.procs           cpuset.memory_spread_slab
+cpuset.cpu_exclusive   cpuset.mems
+cpuset.cpus            cpuset.sched_load_balance
+cpuset.mem_exclusive   cpuset.sched_relax_domain_level
+cpuset.mem_hardwall    notify_on_release
+cpuset.memory_migrate  tasks
 
 
 Reading them will give you information about the state of this cpuset:
 Reading them will give you information about the state of this cpuset:
 the CPUs and Memory Nodes it can use, the processes that are using
 the CPUs and Memory Nodes it can use, the processes that are using

+ 3 - 2
Documentation/cgroups/memory.txt

@@ -485,8 +485,9 @@ The feature can be disabled by
 
 
 # echo 0 > memory.use_hierarchy
 # echo 0 > memory.use_hierarchy
 
 
-NOTE1: Enabling/disabling will fail if the cgroup already has other
-       cgroups created below it.
+NOTE1: Enabling/disabling will fail if either the cgroup already has other
+       cgroups created below it, or if the parent cgroup has use_hierarchy
+       enabled.
 
 
 NOTE2: When panic_on_oom is set to "2", the whole system will panic in
 NOTE2: When panic_on_oom is set to "2", the whole system will panic in
        case of an OOM event in any cgroup.
        case of an OOM event in any cgroup.

+ 1 - 1
Documentation/device-mapper/dm-crypt.txt

@@ -41,7 +41,7 @@ Example scripts
 ===============
 ===============
 LUKS (Linux Unified Key Setup) is now the preferred way to set up disk
 LUKS (Linux Unified Key Setup) is now the preferred way to set up disk
 encryption with dm-crypt using the 'cryptsetup' utility, see
 encryption with dm-crypt using the 'cryptsetup' utility, see
-http://clemens.endorphin.org/cryptography
+http://code.google.com/p/cryptsetup/
 
 
 [[
 [[
 #!/bin/sh
 #!/bin/sh

+ 1 - 2
Documentation/filesystems/romfs.txt

@@ -17,8 +17,7 @@ comparison, an actual rescue disk used up 3202 blocks with ext2, while
 with romfs, it needed 3079 blocks.
 with romfs, it needed 3079 blocks.
 
 
 To create such a file system, you'll need a user program named
 To create such a file system, you'll need a user program named
-genromfs.  It is available via anonymous ftp on sunsite.unc.edu and
-its mirrors, in the /pub/Linux/system/recovery/ directory.
+genromfs. It is available on http://romfs.sourceforge.net/
 
 
 As the name suggests, romfs could be also used (space-efficiently) on
 As the name suggests, romfs could be also used (space-efficiently) on
 various read-only media, like (E)EPROM disks if someone will have the
 various read-only media, like (E)EPROM disks if someone will have the

+ 1 - 1
Documentation/kbuild/kbuild.txt

@@ -146,7 +146,7 @@ INSTALL_MOD_STRIP
 INSTALL_MOD_STRIP, if defined, will cause modules to be
 INSTALL_MOD_STRIP, if defined, will cause modules to be
 stripped after they are installed.  If INSTALL_MOD_STRIP is '1', then
 stripped after they are installed.  If INSTALL_MOD_STRIP is '1', then
 the default option --strip-debug will be used.  Otherwise,
 the default option --strip-debug will be used.  Otherwise,
-INSTALL_MOD_STRIP will used as the options to the strip command.
+INSTALL_MOD_STRIP value will be used as the options to the strip command.
 
 
 INSTALL_FW_PATH
 INSTALL_FW_PATH
 --------------------------------------------------
 --------------------------------------------------

+ 2 - 1
Documentation/kbuild/makefiles.txt

@@ -1325,7 +1325,8 @@ The top Makefile exports the following variables:
 	If this variable is specified, will cause modules to be stripped
 	If this variable is specified, will cause modules to be stripped
 	after they are installed.  If INSTALL_MOD_STRIP is '1', then the
 	after they are installed.  If INSTALL_MOD_STRIP is '1', then the
 	default option --strip-debug will be used.  Otherwise,
 	default option --strip-debug will be used.  Otherwise,
-	INSTALL_MOD_STRIP will used as the option(s) to the strip command.
+	INSTALL_MOD_STRIP value will be used as the option(s) to the strip
+	command.
 
 
 
 
 === 9 Makefile language
 === 9 Makefile language

+ 48 - 48
Documentation/kvm/api.txt

@@ -166,7 +166,7 @@ Returns: 0 on success, -1 on error
 
 
 This ioctl is obsolete and has been removed.
 This ioctl is obsolete and has been removed.
 
 
-4.6 KVM_CREATE_VCPU
+4.7 KVM_CREATE_VCPU
 
 
 Capability: basic
 Capability: basic
 Architectures: all
 Architectures: all
@@ -177,7 +177,7 @@ Returns: vcpu fd on success, -1 on error
 This API adds a vcpu to a virtual machine.  The vcpu id is a small integer
 This API adds a vcpu to a virtual machine.  The vcpu id is a small integer
 in the range [0, max_vcpus).
 in the range [0, max_vcpus).
 
 
-4.7 KVM_GET_DIRTY_LOG (vm ioctl)
+4.8 KVM_GET_DIRTY_LOG (vm ioctl)
 
 
 Capability: basic
 Capability: basic
 Architectures: x86
 Architectures: x86
@@ -200,7 +200,7 @@ since the last call to this ioctl.  Bit 0 is the first page in the
 memory slot.  Ensure the entire structure is cleared to avoid padding
 memory slot.  Ensure the entire structure is cleared to avoid padding
 issues.
 issues.
 
 
-4.8 KVM_SET_MEMORY_ALIAS
+4.9 KVM_SET_MEMORY_ALIAS
 
 
 Capability: basic
 Capability: basic
 Architectures: x86
 Architectures: x86
@@ -210,7 +210,7 @@ Returns: 0 (success), -1 (error)
 
 
 This ioctl is obsolete and has been removed.
 This ioctl is obsolete and has been removed.
 
 
-4.9 KVM_RUN
+4.10 KVM_RUN
 
 
 Capability: basic
 Capability: basic
 Architectures: all
 Architectures: all
@@ -226,7 +226,7 @@ obtained by mmap()ing the vcpu fd at offset 0, with the size given by
 KVM_GET_VCPU_MMAP_SIZE.  The parameter block is formatted as a 'struct
 KVM_GET_VCPU_MMAP_SIZE.  The parameter block is formatted as a 'struct
 kvm_run' (see below).
 kvm_run' (see below).
 
 
-4.10 KVM_GET_REGS
+4.11 KVM_GET_REGS
 
 
 Capability: basic
 Capability: basic
 Architectures: all
 Architectures: all
@@ -246,7 +246,7 @@ struct kvm_regs {
 	__u64 rip, rflags;
 	__u64 rip, rflags;
 };
 };
 
 
-4.11 KVM_SET_REGS
+4.12 KVM_SET_REGS
 
 
 Capability: basic
 Capability: basic
 Architectures: all
 Architectures: all
@@ -258,7 +258,7 @@ Writes the general purpose registers into the vcpu.
 
 
 See KVM_GET_REGS for the data structure.
 See KVM_GET_REGS for the data structure.
 
 
-4.12 KVM_GET_SREGS
+4.13 KVM_GET_SREGS
 
 
 Capability: basic
 Capability: basic
 Architectures: x86
 Architectures: x86
@@ -283,7 +283,7 @@ interrupt_bitmap is a bitmap of pending external interrupts.  At most
 one bit may be set.  This interrupt has been acknowledged by the APIC
 one bit may be set.  This interrupt has been acknowledged by the APIC
 but not yet injected into the cpu core.
 but not yet injected into the cpu core.
 
 
-4.13 KVM_SET_SREGS
+4.14 KVM_SET_SREGS
 
 
 Capability: basic
 Capability: basic
 Architectures: x86
 Architectures: x86
@@ -294,7 +294,7 @@ Returns: 0 on success, -1 on error
 Writes special registers into the vcpu.  See KVM_GET_SREGS for the
 Writes special registers into the vcpu.  See KVM_GET_SREGS for the
 data structures.
 data structures.
 
 
-4.14 KVM_TRANSLATE
+4.15 KVM_TRANSLATE
 
 
 Capability: basic
 Capability: basic
 Architectures: x86
 Architectures: x86
@@ -317,7 +317,7 @@ struct kvm_translation {
 	__u8  pad[5];
 	__u8  pad[5];
 };
 };
 
 
-4.15 KVM_INTERRUPT
+4.16 KVM_INTERRUPT
 
 
 Capability: basic
 Capability: basic
 Architectures: x86, ppc
 Architectures: x86, ppc
@@ -365,7 +365,7 @@ c) KVM_INTERRUPT_SET_LEVEL
 Note that any value for 'irq' other than the ones stated above is invalid
 Note that any value for 'irq' other than the ones stated above is invalid
 and incurs unexpected behavior.
 and incurs unexpected behavior.
 
 
-4.16 KVM_DEBUG_GUEST
+4.17 KVM_DEBUG_GUEST
 
 
 Capability: basic
 Capability: basic
 Architectures: none
 Architectures: none
@@ -375,7 +375,7 @@ Returns: -1 on error
 
 
 Support for this has been removed.  Use KVM_SET_GUEST_DEBUG instead.
 Support for this has been removed.  Use KVM_SET_GUEST_DEBUG instead.
 
 
-4.17 KVM_GET_MSRS
+4.18 KVM_GET_MSRS
 
 
 Capability: basic
 Capability: basic
 Architectures: x86
 Architectures: x86
@@ -403,7 +403,7 @@ Application code should set the 'nmsrs' member (which indicates the
 size of the entries array) and the 'index' member of each array entry.
 size of the entries array) and the 'index' member of each array entry.
 kvm will fill in the 'data' member.
 kvm will fill in the 'data' member.
 
 
-4.18 KVM_SET_MSRS
+4.19 KVM_SET_MSRS
 
 
 Capability: basic
 Capability: basic
 Architectures: x86
 Architectures: x86
@@ -418,7 +418,7 @@ Application code should set the 'nmsrs' member (which indicates the
 size of the entries array), and the 'index' and 'data' members of each
 size of the entries array), and the 'index' and 'data' members of each
 array entry.
 array entry.
 
 
-4.19 KVM_SET_CPUID
+4.20 KVM_SET_CPUID
 
 
 Capability: basic
 Capability: basic
 Architectures: x86
 Architectures: x86
@@ -446,7 +446,7 @@ struct kvm_cpuid {
 	struct kvm_cpuid_entry entries[0];
 	struct kvm_cpuid_entry entries[0];
 };
 };
 
 
-4.20 KVM_SET_SIGNAL_MASK
+4.21 KVM_SET_SIGNAL_MASK
 
 
 Capability: basic
 Capability: basic
 Architectures: x86
 Architectures: x86
@@ -468,7 +468,7 @@ struct kvm_signal_mask {
 	__u8  sigset[0];
 	__u8  sigset[0];
 };
 };
 
 
-4.21 KVM_GET_FPU
+4.22 KVM_GET_FPU
 
 
 Capability: basic
 Capability: basic
 Architectures: x86
 Architectures: x86
@@ -493,7 +493,7 @@ struct kvm_fpu {
 	__u32 pad2;
 	__u32 pad2;
 };
 };
 
 
-4.22 KVM_SET_FPU
+4.23 KVM_SET_FPU
 
 
 Capability: basic
 Capability: basic
 Architectures: x86
 Architectures: x86
@@ -518,7 +518,7 @@ struct kvm_fpu {
 	__u32 pad2;
 	__u32 pad2;
 };
 };
 
 
-4.23 KVM_CREATE_IRQCHIP
+4.24 KVM_CREATE_IRQCHIP
 
 
 Capability: KVM_CAP_IRQCHIP
 Capability: KVM_CAP_IRQCHIP
 Architectures: x86, ia64
 Architectures: x86, ia64
@@ -531,7 +531,7 @@ ioapic, a virtual PIC (two PICs, nested), and sets up future vcpus to have a
 local APIC.  IRQ routing for GSIs 0-15 is set to both PIC and IOAPIC; GSI 16-23
 local APIC.  IRQ routing for GSIs 0-15 is set to both PIC and IOAPIC; GSI 16-23
 only go to the IOAPIC.  On ia64, a IOSAPIC is created.
 only go to the IOAPIC.  On ia64, a IOSAPIC is created.
 
 
-4.24 KVM_IRQ_LINE
+4.25 KVM_IRQ_LINE
 
 
 Capability: KVM_CAP_IRQCHIP
 Capability: KVM_CAP_IRQCHIP
 Architectures: x86, ia64
 Architectures: x86, ia64
@@ -552,7 +552,7 @@ struct kvm_irq_level {
 	__u32 level;           /* 0 or 1 */
 	__u32 level;           /* 0 or 1 */
 };
 };
 
 
-4.25 KVM_GET_IRQCHIP
+4.26 KVM_GET_IRQCHIP
 
 
 Capability: KVM_CAP_IRQCHIP
 Capability: KVM_CAP_IRQCHIP
 Architectures: x86, ia64
 Architectures: x86, ia64
@@ -573,7 +573,7 @@ struct kvm_irqchip {
 	} chip;
 	} chip;
 };
 };
 
 
-4.26 KVM_SET_IRQCHIP
+4.27 KVM_SET_IRQCHIP
 
 
 Capability: KVM_CAP_IRQCHIP
 Capability: KVM_CAP_IRQCHIP
 Architectures: x86, ia64
 Architectures: x86, ia64
@@ -594,7 +594,7 @@ struct kvm_irqchip {
 	} chip;
 	} chip;
 };
 };
 
 
-4.27 KVM_XEN_HVM_CONFIG
+4.28 KVM_XEN_HVM_CONFIG
 
 
 Capability: KVM_CAP_XEN_HVM
 Capability: KVM_CAP_XEN_HVM
 Architectures: x86
 Architectures: x86
@@ -618,7 +618,7 @@ struct kvm_xen_hvm_config {
 	__u8 pad2[30];
 	__u8 pad2[30];
 };
 };
 
 
-4.27 KVM_GET_CLOCK
+4.29 KVM_GET_CLOCK
 
 
 Capability: KVM_CAP_ADJUST_CLOCK
 Capability: KVM_CAP_ADJUST_CLOCK
 Architectures: x86
 Architectures: x86
@@ -636,7 +636,7 @@ struct kvm_clock_data {
 	__u32 pad[9];
 	__u32 pad[9];
 };
 };
 
 
-4.28 KVM_SET_CLOCK
+4.30 KVM_SET_CLOCK
 
 
 Capability: KVM_CAP_ADJUST_CLOCK
 Capability: KVM_CAP_ADJUST_CLOCK
 Architectures: x86
 Architectures: x86
@@ -654,7 +654,7 @@ struct kvm_clock_data {
 	__u32 pad[9];
 	__u32 pad[9];
 };
 };
 
 
-4.29 KVM_GET_VCPU_EVENTS
+4.31 KVM_GET_VCPU_EVENTS
 
 
 Capability: KVM_CAP_VCPU_EVENTS
 Capability: KVM_CAP_VCPU_EVENTS
 Extended by: KVM_CAP_INTR_SHADOW
 Extended by: KVM_CAP_INTR_SHADOW
@@ -693,7 +693,7 @@ struct kvm_vcpu_events {
 KVM_VCPUEVENT_VALID_SHADOW may be set in the flags field to signal that
 KVM_VCPUEVENT_VALID_SHADOW may be set in the flags field to signal that
 interrupt.shadow contains a valid state. Otherwise, this field is undefined.
 interrupt.shadow contains a valid state. Otherwise, this field is undefined.
 
 
-4.30 KVM_SET_VCPU_EVENTS
+4.32 KVM_SET_VCPU_EVENTS
 
 
 Capability: KVM_CAP_VCPU_EVENTS
 Capability: KVM_CAP_VCPU_EVENTS
 Extended by: KVM_CAP_INTR_SHADOW
 Extended by: KVM_CAP_INTR_SHADOW
@@ -719,7 +719,7 @@ If KVM_CAP_INTR_SHADOW is available, KVM_VCPUEVENT_VALID_SHADOW can be set in
 the flags field to signal that interrupt.shadow contains a valid state and
 the flags field to signal that interrupt.shadow contains a valid state and
 shall be written into the VCPU.
 shall be written into the VCPU.
 
 
-4.32 KVM_GET_DEBUGREGS
+4.33 KVM_GET_DEBUGREGS
 
 
 Capability: KVM_CAP_DEBUGREGS
 Capability: KVM_CAP_DEBUGREGS
 Architectures: x86
 Architectures: x86
@@ -737,7 +737,7 @@ struct kvm_debugregs {
 	__u64 reserved[9];
 	__u64 reserved[9];
 };
 };
 
 
-4.33 KVM_SET_DEBUGREGS
+4.34 KVM_SET_DEBUGREGS
 
 
 Capability: KVM_CAP_DEBUGREGS
 Capability: KVM_CAP_DEBUGREGS
 Architectures: x86
 Architectures: x86
@@ -750,7 +750,7 @@ Writes debug registers into the vcpu.
 See KVM_GET_DEBUGREGS for the data structure. The flags field is unused
 See KVM_GET_DEBUGREGS for the data structure. The flags field is unused
 yet and must be cleared on entry.
 yet and must be cleared on entry.
 
 
-4.34 KVM_SET_USER_MEMORY_REGION
+4.35 KVM_SET_USER_MEMORY_REGION
 
 
 Capability: KVM_CAP_USER_MEM
 Capability: KVM_CAP_USER_MEM
 Architectures: all
 Architectures: all
@@ -796,7 +796,7 @@ It is recommended to use this API instead of the KVM_SET_MEMORY_REGION ioctl.
 The KVM_SET_MEMORY_REGION does not allow fine grained control over memory
 The KVM_SET_MEMORY_REGION does not allow fine grained control over memory
 allocation and is deprecated.
 allocation and is deprecated.
 
 
-4.35 KVM_SET_TSS_ADDR
+4.36 KVM_SET_TSS_ADDR
 
 
 Capability: KVM_CAP_SET_TSS_ADDR
 Capability: KVM_CAP_SET_TSS_ADDR
 Architectures: x86
 Architectures: x86
@@ -814,7 +814,7 @@ This ioctl is required on Intel-based hosts.  This is needed on Intel hardware
 because of a quirk in the virtualization implementation (see the internals
 because of a quirk in the virtualization implementation (see the internals
 documentation when it pops into existence).
 documentation when it pops into existence).
 
 
-4.36 KVM_ENABLE_CAP
+4.37 KVM_ENABLE_CAP
 
 
 Capability: KVM_CAP_ENABLE_CAP
 Capability: KVM_CAP_ENABLE_CAP
 Architectures: ppc
 Architectures: ppc
@@ -849,7 +849,7 @@ function properly, this is the place to put them.
        __u8  pad[64];
        __u8  pad[64];
 };
 };
 
 
-4.37 KVM_GET_MP_STATE
+4.38 KVM_GET_MP_STATE
 
 
 Capability: KVM_CAP_MP_STATE
 Capability: KVM_CAP_MP_STATE
 Architectures: x86, ia64
 Architectures: x86, ia64
@@ -879,7 +879,7 @@ Possible values are:
 This ioctl is only useful after KVM_CREATE_IRQCHIP.  Without an in-kernel
 This ioctl is only useful after KVM_CREATE_IRQCHIP.  Without an in-kernel
 irqchip, the multiprocessing state must be maintained by userspace.
 irqchip, the multiprocessing state must be maintained by userspace.
 
 
-4.38 KVM_SET_MP_STATE
+4.39 KVM_SET_MP_STATE
 
 
 Capability: KVM_CAP_MP_STATE
 Capability: KVM_CAP_MP_STATE
 Architectures: x86, ia64
 Architectures: x86, ia64
@@ -893,7 +893,7 @@ arguments.
 This ioctl is only useful after KVM_CREATE_IRQCHIP.  Without an in-kernel
 This ioctl is only useful after KVM_CREATE_IRQCHIP.  Without an in-kernel
 irqchip, the multiprocessing state must be maintained by userspace.
 irqchip, the multiprocessing state must be maintained by userspace.
 
 
-4.39 KVM_SET_IDENTITY_MAP_ADDR
+4.40 KVM_SET_IDENTITY_MAP_ADDR
 
 
 Capability: KVM_CAP_SET_IDENTITY_MAP_ADDR
 Capability: KVM_CAP_SET_IDENTITY_MAP_ADDR
 Architectures: x86
 Architectures: x86
@@ -911,7 +911,7 @@ This ioctl is required on Intel-based hosts.  This is needed on Intel hardware
 because of a quirk in the virtualization implementation (see the internals
 because of a quirk in the virtualization implementation (see the internals
 documentation when it pops into existence).
 documentation when it pops into existence).
 
 
-4.40 KVM_SET_BOOT_CPU_ID
+4.41 KVM_SET_BOOT_CPU_ID
 
 
 Capability: KVM_CAP_SET_BOOT_CPU_ID
 Capability: KVM_CAP_SET_BOOT_CPU_ID
 Architectures: x86, ia64
 Architectures: x86, ia64
@@ -923,7 +923,7 @@ Define which vcpu is the Bootstrap Processor (BSP).  Values are the same
 as the vcpu id in KVM_CREATE_VCPU.  If this ioctl is not called, the default
 as the vcpu id in KVM_CREATE_VCPU.  If this ioctl is not called, the default
 is vcpu 0.
 is vcpu 0.
 
 
-4.41 KVM_GET_XSAVE
+4.42 KVM_GET_XSAVE
 
 
 Capability: KVM_CAP_XSAVE
 Capability: KVM_CAP_XSAVE
 Architectures: x86
 Architectures: x86
@@ -937,7 +937,7 @@ struct kvm_xsave {
 
 
 This ioctl would copy current vcpu's xsave struct to the userspace.
 This ioctl would copy current vcpu's xsave struct to the userspace.
 
 
-4.42 KVM_SET_XSAVE
+4.43 KVM_SET_XSAVE
 
 
 Capability: KVM_CAP_XSAVE
 Capability: KVM_CAP_XSAVE
 Architectures: x86
 Architectures: x86
@@ -951,7 +951,7 @@ struct kvm_xsave {
 
 
 This ioctl would copy userspace's xsave struct to the kernel.
 This ioctl would copy userspace's xsave struct to the kernel.
 
 
-4.43 KVM_GET_XCRS
+4.44 KVM_GET_XCRS
 
 
 Capability: KVM_CAP_XCRS
 Capability: KVM_CAP_XCRS
 Architectures: x86
 Architectures: x86
@@ -974,7 +974,7 @@ struct kvm_xcrs {
 
 
 This ioctl would copy current vcpu's xcrs to the userspace.
 This ioctl would copy current vcpu's xcrs to the userspace.
 
 
-4.44 KVM_SET_XCRS
+4.45 KVM_SET_XCRS
 
 
 Capability: KVM_CAP_XCRS
 Capability: KVM_CAP_XCRS
 Architectures: x86
 Architectures: x86
@@ -997,7 +997,7 @@ struct kvm_xcrs {
 
 
 This ioctl would set vcpu's xcr to the value userspace specified.
 This ioctl would set vcpu's xcr to the value userspace specified.
 
 
-4.45 KVM_GET_SUPPORTED_CPUID
+4.46 KVM_GET_SUPPORTED_CPUID
 
 
 Capability: KVM_CAP_EXT_CPUID
 Capability: KVM_CAP_EXT_CPUID
 Architectures: x86
 Architectures: x86
@@ -1062,7 +1062,7 @@ emulate them efficiently. The fields in each entry are defined as follows:
    eax, ebx, ecx, edx: the values returned by the cpuid instruction for
    eax, ebx, ecx, edx: the values returned by the cpuid instruction for
          this function/index combination
          this function/index combination
 
 
-4.46 KVM_PPC_GET_PVINFO
+4.47 KVM_PPC_GET_PVINFO
 
 
 Capability: KVM_CAP_PPC_GET_PVINFO
 Capability: KVM_CAP_PPC_GET_PVINFO
 Architectures: ppc
 Architectures: ppc
@@ -1085,7 +1085,7 @@ of 4 instructions that make up a hypercall.
 If any additional field gets added to this structure later on, a bit for that
 If any additional field gets added to this structure later on, a bit for that
 additional piece of information will be set in the flags bitmap.
 additional piece of information will be set in the flags bitmap.
 
 
-4.47 KVM_ASSIGN_PCI_DEVICE
+4.48 KVM_ASSIGN_PCI_DEVICE
 
 
 Capability: KVM_CAP_DEVICE_ASSIGNMENT
 Capability: KVM_CAP_DEVICE_ASSIGNMENT
 Architectures: x86 ia64
 Architectures: x86 ia64
@@ -1113,7 +1113,7 @@ following flags are specified:
 /* Depends on KVM_CAP_IOMMU */
 /* Depends on KVM_CAP_IOMMU */
 #define KVM_DEV_ASSIGN_ENABLE_IOMMU	(1 << 0)
 #define KVM_DEV_ASSIGN_ENABLE_IOMMU	(1 << 0)
 
 
-4.48 KVM_DEASSIGN_PCI_DEVICE
+4.49 KVM_DEASSIGN_PCI_DEVICE
 
 
 Capability: KVM_CAP_DEVICE_DEASSIGNMENT
 Capability: KVM_CAP_DEVICE_DEASSIGNMENT
 Architectures: x86 ia64
 Architectures: x86 ia64
@@ -1126,7 +1126,7 @@ Ends PCI device assignment, releasing all associated resources.
 See KVM_CAP_DEVICE_ASSIGNMENT for the data structure. Only assigned_dev_id is
 See KVM_CAP_DEVICE_ASSIGNMENT for the data structure. Only assigned_dev_id is
 used in kvm_assigned_pci_dev to identify the device.
 used in kvm_assigned_pci_dev to identify the device.
 
 
-4.49 KVM_ASSIGN_DEV_IRQ
+4.50 KVM_ASSIGN_DEV_IRQ
 
 
 Capability: KVM_CAP_ASSIGN_DEV_IRQ
 Capability: KVM_CAP_ASSIGN_DEV_IRQ
 Architectures: x86 ia64
 Architectures: x86 ia64
@@ -1164,7 +1164,7 @@ The following flags are defined:
 It is not valid to specify multiple types per host or guest IRQ. However, the
 It is not valid to specify multiple types per host or guest IRQ. However, the
 IRQ type of host and guest can differ or can even be null.
 IRQ type of host and guest can differ or can even be null.
 
 
-4.50 KVM_DEASSIGN_DEV_IRQ
+4.51 KVM_DEASSIGN_DEV_IRQ
 
 
 Capability: KVM_CAP_ASSIGN_DEV_IRQ
 Capability: KVM_CAP_ASSIGN_DEV_IRQ
 Architectures: x86 ia64
 Architectures: x86 ia64
@@ -1178,7 +1178,7 @@ See KVM_ASSIGN_DEV_IRQ for the data structure. The target device is specified
 by assigned_dev_id, flags must correspond to the IRQ type specified on
 by assigned_dev_id, flags must correspond to the IRQ type specified on
 KVM_ASSIGN_DEV_IRQ. Partial deassignment of host or guest IRQ is allowed.
 KVM_ASSIGN_DEV_IRQ. Partial deassignment of host or guest IRQ is allowed.
 
 
-4.51 KVM_SET_GSI_ROUTING
+4.52 KVM_SET_GSI_ROUTING
 
 
 Capability: KVM_CAP_IRQ_ROUTING
 Capability: KVM_CAP_IRQ_ROUTING
 Architectures: x86 ia64
 Architectures: x86 ia64
@@ -1226,7 +1226,7 @@ struct kvm_irq_routing_msi {
 	__u32 pad;
 	__u32 pad;
 };
 };
 
 
-4.52 KVM_ASSIGN_SET_MSIX_NR
+4.53 KVM_ASSIGN_SET_MSIX_NR
 
 
 Capability: KVM_CAP_DEVICE_MSIX
 Capability: KVM_CAP_DEVICE_MSIX
 Architectures: x86 ia64
 Architectures: x86 ia64
@@ -1245,7 +1245,7 @@ struct kvm_assigned_msix_nr {
 
 
 #define KVM_MAX_MSIX_PER_DEV		256
 #define KVM_MAX_MSIX_PER_DEV		256
 
 
-4.53 KVM_ASSIGN_SET_MSIX_ENTRY
+4.54 KVM_ASSIGN_SET_MSIX_ENTRY
 
 
 Capability: KVM_CAP_DEVICE_MSIX
 Capability: KVM_CAP_DEVICE_MSIX
 Architectures: x86 ia64
 Architectures: x86 ia64

+ 1 - 1
Documentation/sysctl/kernel.txt

@@ -367,7 +367,7 @@ the different loglevels.
 
 
 - console_loglevel: messages with a higher priority than
 - console_loglevel: messages with a higher priority than
   this will be printed to the console
   this will be printed to the console
-- default_message_level: messages without an explicit priority
+- default_message_loglevel: messages without an explicit priority
   will be printed with this priority
   will be printed with this priority
 - minimum_console_loglevel: minimum (highest) value to which
 - minimum_console_loglevel: minimum (highest) value to which
   console_loglevel can be set
   console_loglevel can be set

+ 1 - 2
Documentation/vm/unevictable-lru.txt

@@ -84,8 +84,7 @@ indicate that the page is being managed on the unevictable list.
 
 
 The PG_unevictable flag is analogous to, and mutually exclusive with, the
 The PG_unevictable flag is analogous to, and mutually exclusive with, the
 PG_active flag in that it indicates on which LRU list a page resides when
 PG_active flag in that it indicates on which LRU list a page resides when
-PG_lru is set.  The unevictable list is compile-time configurable based on the
-UNEVICTABLE_LRU Kconfig option.
+PG_lru is set.
 
 
 The Unevictable LRU infrastructure maintains unevictable pages on an additional
 The Unevictable LRU infrastructure maintains unevictable pages on an additional
 LRU list for a few reasons:
 LRU list for a few reasons:

+ 8 - 7
MAINTAINERS

@@ -2376,7 +2376,7 @@ F:	include/linux/edac_mce.h
 
 
 EDAC-I82975X
 EDAC-I82975X
 M:	Ranganathan Desikan <ravi@jetztechnologies.com>
 M:	Ranganathan Desikan <ravi@jetztechnologies.com>
-M:	"Arvind R." <arvind@jetztechnologies.com>
+M:	"Arvind R." <arvino55@gmail.com>
 L:	bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
 L:	bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
 W:	bluesmoke.sourceforge.net
 W:	bluesmoke.sourceforge.net
 S:	Maintained
 S:	Maintained
@@ -3472,6 +3472,7 @@ F:	net/ipx/
 IRDA SUBSYSTEM
 IRDA SUBSYSTEM
 M:	Samuel Ortiz <samuel@sortiz.org>
 M:	Samuel Ortiz <samuel@sortiz.org>
 L:	irda-users@lists.sourceforge.net (subscribers-only)
 L:	irda-users@lists.sourceforge.net (subscribers-only)
+L:	netdev@vger.kernel.org
 W:	http://irda.sourceforge.net/
 W:	http://irda.sourceforge.net/
 S:	Maintained
 S:	Maintained
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/sameo/irda-2.6.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/sameo/irda-2.6.git
@@ -3909,6 +3910,12 @@ L:	linux-security-module@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/chrisw/lsm-2.6.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/chrisw/lsm-2.6.git
 S:	Supported
 S:	Supported
 
 
+LIS3LV02D ACCELEROMETER DRIVER
+M:	Eric Piel <eric.piel@tremplin-utc.net>
+S:	Maintained
+F:	Documentation/hwmon/lis3lv02d
+F:	drivers/hwmon/lis3lv02d.*
+
 LLC (802.2)
 LLC (802.2)
 M:	Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
 M:	Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
 S:	Maintained
 S:	Maintained
@@ -3916,12 +3923,6 @@ F:	include/linux/llc.h
 F:	include/net/llc*
 F:	include/net/llc*
 F:	net/llc/
 F:	net/llc/
 
 
-LIS3LV02D ACCELEROMETER DRIVER
-M:	Eric Piel <eric.piel@tremplin-utc.net>
-S:	Maintained
-F:	Documentation/hwmon/lis3lv02d
-F:	drivers/hwmon/lis3lv02d.*
-
 LM73 HARDWARE MONITOR DRIVER
 LM73 HARDWARE MONITOR DRIVER
 M:	Guillaume Ligneul <guillaume.ligneul@gmail.com>
 M:	Guillaume Ligneul <guillaume.ligneul@gmail.com>
 L:	lm-sensors@lm-sensors.org
 L:	lm-sensors@lm-sensors.org

+ 1 - 1
Makefile

@@ -666,7 +666,7 @@ export MODLIB
 #  INSTALL_MOD_STRIP, if defined, will cause modules to be
 #  INSTALL_MOD_STRIP, if defined, will cause modules to be
 #  stripped after they are installed.  If INSTALL_MOD_STRIP is '1', then
 #  stripped after they are installed.  If INSTALL_MOD_STRIP is '1', then
 #  the default option --strip-debug will be used.  Otherwise,
 #  the default option --strip-debug will be used.  Otherwise,
-#  INSTALL_MOD_STRIP will used as the options to the strip command.
+#  INSTALL_MOD_STRIP value will be used as the options to the strip command.
 
 
 ifdef INSTALL_MOD_STRIP
 ifdef INSTALL_MOD_STRIP
 ifeq ($(INSTALL_MOD_STRIP),1)
 ifeq ($(INSTALL_MOD_STRIP),1)

+ 1 - 1
arch/alpha/include/asm/cacheflush.h

@@ -63,7 +63,7 @@ extern void flush_icache_user_range(struct vm_area_struct *vma,
 		struct page *page, unsigned long addr, int len);
 		struct page *page, unsigned long addr, int len);
 #endif
 #endif
 
 
-/* This is used only in do_no_page and do_swap_page.  */
+/* This is used only in __do_fault and do_swap_page.  */
 #define flush_icache_page(vma, page) \
 #define flush_icache_page(vma, page) \
   flush_icache_user_range((vma), (page), 0, 0)
   flush_icache_user_range((vma), (page), 0, 0)
 
 

+ 1 - 1
arch/arm/mach-ux500/mbox-db5500.c

@@ -498,7 +498,7 @@ struct mbox *mbox_setup(u8 mbox_id, mbox_recv_cb_t *mbox_cb, void *priv)
 #endif
 #endif
 
 
 	dev_info(&(mbox->pdev->dev),
 	dev_info(&(mbox->pdev->dev),
-		 "Mailbox driver with index %d initated!\n", mbox_id);
+		 "Mailbox driver with index %d initiated!\n", mbox_id);
 
 
 exit:
 exit:
 	return mbox;
 	return mbox;

+ 1 - 1
arch/arm/plat-omap/Kconfig

@@ -54,7 +54,7 @@ config OMAP_SMARTREFLEX
 	  user must write 1 to
 	  user must write 1 to
 		/debug/voltage/vdd_<X>/smartreflex/autocomp,
 		/debug/voltage/vdd_<X>/smartreflex/autocomp,
 	  where X is mpu or core for OMAP3.
 	  where X is mpu or core for OMAP3.
-	  Optionallly autocompensation can be enabled in the kernel
+	  Optionally autocompensation can be enabled in the kernel
 	  by default during system init via the enable_on_init flag
 	  by default during system init via the enable_on_init flag
 	  which an be passed as platform data to the smartreflex driver.
 	  which an be passed as platform data to the smartreflex driver.
 
 

+ 1 - 1
arch/avr32/mm/cache.c

@@ -113,7 +113,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
 }
 }
 
 
 /*
 /*
- * This one is called from do_no_page(), do_swap_page() and install_page().
+ * This one is called from __do_fault() and do_swap_page().
  */
  */
 void flush_icache_page(struct vm_area_struct *vma, struct page *page)
 void flush_icache_page(struct vm_area_struct *vma, struct page *page)
 {
 {

+ 1 - 1
arch/cris/arch-v10/mm/init.c

@@ -241,7 +241,7 @@ flush_etrax_cacherange(void *startadr, int length)
 }
 }
 
 
 /* Due to a bug in Etrax100(LX) all versions, receiving DMA buffers
 /* Due to a bug in Etrax100(LX) all versions, receiving DMA buffers
- * will occationally corrupt certain CPU writes if the DMA buffers
+ * will occasionally corrupt certain CPU writes if the DMA buffers
  * happen to be hot in the cache.
  * happen to be hot in the cache.
  * 
  * 
  * As a workaround, we have to flush the relevant parts of the cache
  * As a workaround, we have to flush the relevant parts of the cache

+ 1 - 1
arch/ia64/include/asm/perfmon.h

@@ -7,7 +7,7 @@
 #define _ASM_IA64_PERFMON_H
 #define _ASM_IA64_PERFMON_H
 
 
 /*
 /*
- * perfmon comamnds supported on all CPU models
+ * perfmon commands supported on all CPU models
  */
  */
 #define PFM_WRITE_PMCS		0x01
 #define PFM_WRITE_PMCS		0x01
 #define PFM_WRITE_PMDS		0x02
 #define PFM_WRITE_PMDS		0x02

+ 1 - 1
arch/x86/kernel/apic/io_apic.c

@@ -3983,7 +3983,7 @@ int mp_find_ioapic_pin(int ioapic, u32 gsi)
 static __init int bad_ioapic(unsigned long address)
 static __init int bad_ioapic(unsigned long address)
 {
 {
 	if (nr_ioapics >= MAX_IO_APICS) {
 	if (nr_ioapics >= MAX_IO_APICS) {
-		printk(KERN_WARNING "WARING: Max # of I/O APICs (%d) exceeded "
+		printk(KERN_WARNING "WARNING: Max # of I/O APICs (%d) exceeded "
 		       "(found %d), skipping\n", MAX_IO_APICS, nr_ioapics);
 		       "(found %d), skipping\n", MAX_IO_APICS, nr_ioapics);
 		return 1;
 		return 1;
 	}
 	}

+ 1 - 1
arch/x86/oprofile/op_model_p4.c

@@ -50,7 +50,7 @@ static inline void setup_num_counters(void)
 #endif
 #endif
 }
 }
 
 
-static int inline addr_increment(void)
+static inline int addr_increment(void)
 {
 {
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
 	return smp_num_siblings == 2 ? 2 : 1;
 	return smp_num_siblings == 2 ? 2 : 1;

+ 1 - 1
arch/xtensa/configs/s6105_defconfig

@@ -598,7 +598,7 @@ CONFIG_DEBUG_NOMMU_REGIONS=y
 # CONFIG_CONTEXT_SWITCH_TRACER is not set
 # CONFIG_CONTEXT_SWITCH_TRACER is not set
 # CONFIG_BOOT_TRACER is not set
 # CONFIG_BOOT_TRACER is not set
 # CONFIG_TRACE_BRANCH_PROFILING is not set
 # CONFIG_TRACE_BRANCH_PROFILING is not set
-# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
+# CONFIG_DYNAMIC_DEBUG is not set
 # CONFIG_SAMPLES is not set
 # CONFIG_SAMPLES is not set
 
 
 #
 #

+ 1 - 1
drivers/atm/firestream.c

@@ -1031,7 +1031,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
 		/* We now use the "submit_command" function to submit commands to
 		/* We now use the "submit_command" function to submit commands to
 		   the firestream. There is a define up near the definition of
 		   the firestream. There is a define up near the definition of
 		   that routine that switches this routine between immediate write
 		   that routine that switches this routine between immediate write
-		   to the immediate comamnd registers and queuing the commands in
+		   to the immediate command registers and queuing the commands in
 		   the HPTXQ for execution. This last technique might be more
 		   the HPTXQ for execution. This last technique might be more
 		   efficient if we know we're going to submit a whole lot of
 		   efficient if we know we're going to submit a whole lot of
 		   commands in one go, but this driver is not setup to be able to
 		   commands in one go, but this driver is not setup to be able to

+ 1 - 1
drivers/block/smart1,2.h

@@ -95,7 +95,7 @@ static unsigned long smart4_completed(ctlr_info_t *h)
  /*
  /*
  *  This hardware returns interrupt pending at a different place and 
  *  This hardware returns interrupt pending at a different place and 
  *  it does not tell us if the fifo is empty, we will have check  
  *  it does not tell us if the fifo is empty, we will have check  
- *  that by getting a 0 back from the comamnd_completed call. 
+ *  that by getting a 0 back from the command_completed call. 
  */
  */
 static unsigned long smart4_intr_pending(ctlr_info_t *h)
 static unsigned long smart4_intr_pending(ctlr_info_t *h)
 {
 {

+ 2 - 2
drivers/bluetooth/btusb.c

@@ -433,7 +433,7 @@ static void btusb_isoc_complete(struct urb *urb)
 	}
 	}
 }
 }
 
 
-static void inline __fill_isoc_descriptor(struct urb *urb, int len, int mtu)
+static inline void __fill_isoc_descriptor(struct urb *urb, int len, int mtu)
 {
 {
 	int i, offset = 0;
 	int i, offset = 0;
 
 
@@ -780,7 +780,7 @@ static void btusb_notify(struct hci_dev *hdev, unsigned int evt)
 	}
 	}
 }
 }
 
 
-static int inline __set_isoc_interface(struct hci_dev *hdev, int altsetting)
+static inline int __set_isoc_interface(struct hci_dev *hdev, int altsetting)
 {
 {
 	struct btusb_data *data = hdev->driver_data;
 	struct btusb_data *data = hdev->driver_data;
 	struct usb_interface *intf = data->isoc;
 	struct usb_interface *intf = data->isoc;

+ 1 - 1
drivers/cpuidle/sysfs.c

@@ -300,7 +300,7 @@ static struct kobj_type ktype_state_cpuidle = {
 	.release = cpuidle_state_sysfs_release,
 	.release = cpuidle_state_sysfs_release,
 };
 };
 
 
-static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
+static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
 {
 {
 	kobject_put(&device->kobjs[i]->kobj);
 	kobject_put(&device->kobjs[i]->kobj);
 	wait_for_completion(&device->kobjs[i]->kobj_unregister);
 	wait_for_completion(&device->kobjs[i]->kobj_unregister);

+ 2 - 2
drivers/dma/coh901318.c

@@ -849,7 +849,7 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id)
 
 
 				/* Must clear TC interrupt before calling
 				/* Must clear TC interrupt before calling
 				 * dma_tc_handle
 				 * dma_tc_handle
-				 * in case tc_handle initate a new dma job
+				 * in case tc_handle initiate a new dma job
 				 */
 				 */
 				__set_bit(i, virtbase + COH901318_TC_INT_CLEAR1);
 				__set_bit(i, virtbase + COH901318_TC_INT_CLEAR1);
 
 
@@ -894,7 +894,7 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id)
 				}
 				}
 				/* Must clear TC interrupt before calling
 				/* Must clear TC interrupt before calling
 				 * dma_tc_handle
 				 * dma_tc_handle
-				 * in case tc_handle initate a new dma job
+				 * in case tc_handle initiate a new dma job
 				 */
 				 */
 				__set_bit(i, virtbase + COH901318_TC_INT_CLEAR2);
 				__set_bit(i, virtbase + COH901318_TC_INT_CLEAR2);
 
 

+ 1 - 1
drivers/dma/shdma.c

@@ -750,7 +750,7 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
 		return;
 		return;
 	}
 	}
 
 
-	/* Find the first not transferred desciptor */
+	/* Find the first not transferred descriptor */
 	list_for_each_entry(desc, &sh_chan->ld_queue, node)
 	list_for_each_entry(desc, &sh_chan->ld_queue, node)
 		if (desc->mark == DESC_SUBMITTED) {
 		if (desc->mark == DESC_SUBMITTED) {
 			dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n",
 			dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n",

+ 1 - 1
drivers/dma/timb_dma.c

@@ -629,7 +629,7 @@ static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 		desc_node)
 		desc_node)
 		list_move(&td_desc->desc_node, &td_chan->free_list);
 		list_move(&td_desc->desc_node, &td_chan->free_list);
 
 
-	/* now tear down the runnning */
+	/* now tear down the running */
 	__td_finish(td_chan);
 	__td_finish(td_chan);
 	spin_unlock_bh(&td_chan->lock);
 	spin_unlock_bh(&td_chan->lock);
 
 

+ 1 - 1
drivers/edac/i7300_edac.c

@@ -162,7 +162,7 @@ static struct edac_pci_ctl_info *i7300_pci;
 #define AMBPRESENT_0	0x64
 #define AMBPRESENT_0	0x64
 #define AMBPRESENT_1	0x66
 #define AMBPRESENT_1	0x66
 
 
-const static u16 mtr_regs[MAX_SLOTS] = {
+static const u16 mtr_regs[MAX_SLOTS] = {
 	0x80, 0x84, 0x88, 0x8c,
 	0x80, 0x84, 0x88, 0x8c,
 	0x82, 0x86, 0x8a, 0x8e
 	0x82, 0x86, 0x8a, 0x8e
 };
 };

+ 44 - 25
drivers/edac/i82975x_edac.c

@@ -160,8 +160,8 @@ NOTE: Only ONE of the three must be enabled
 					 * 3:2  Rank 1 architecture
 					 * 3:2  Rank 1 architecture
 					 * 1:0  Rank 0 architecture
 					 * 1:0  Rank 0 architecture
 					 *
 					 *
-					 * 00 => x16 devices; i.e 4 banks
-					 * 01 => x8  devices; i.e 8 banks
+					 * 00 => 4 banks
+					 * 01 => 8 banks
 					 */
 					 */
 #define I82975X_C0BNKARC	0x10e
 #define I82975X_C0BNKARC	0x10e
 #define I82975X_C1BNKARC	0x18e
 #define I82975X_C1BNKARC	0x18e
@@ -278,6 +278,7 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci,
 		struct i82975x_error_info *info, int handle_errors)
 		struct i82975x_error_info *info, int handle_errors)
 {
 {
 	int row, multi_chan, chan;
 	int row, multi_chan, chan;
+	unsigned long offst, page;
 
 
 	multi_chan = mci->csrows[0].nr_channels - 1;
 	multi_chan = mci->csrows[0].nr_channels - 1;
 
 
@@ -292,17 +293,19 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci,
 		info->errsts = info->errsts2;
 		info->errsts = info->errsts2;
 	}
 	}
 
 
-	chan = info->eap & 1;
-	info->eap >>= 1;
-	if (info->xeap )
-		info->eap |= 0x80000000;
-	info->eap >>= PAGE_SHIFT;
-	row = edac_mc_find_csrow_by_page(mci, info->eap);
+	page = (unsigned long) info->eap;
+	if (info->xeap & 1)
+		page |= 0x100000000ul;
+	chan = page & 1;
+	page >>= 1;
+	offst = page & ((1 << PAGE_SHIFT) - 1);
+	page >>= PAGE_SHIFT;
+	row = edac_mc_find_csrow_by_page(mci, page);
 
 
 	if (info->errsts & 0x0002)
 	if (info->errsts & 0x0002)
-		edac_mc_handle_ue(mci, info->eap, 0, row, "i82975x UE");
+		edac_mc_handle_ue(mci, page, offst , row, "i82975x UE");
 	else
 	else
-		edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row,
+		edac_mc_handle_ce(mci, page, offst, info->derrsyn, row,
 				multi_chan ? chan : 0,
 				multi_chan ? chan : 0,
 				"i82975x CE");
 				"i82975x CE");
 
 
@@ -344,11 +347,7 @@ static int dual_channel_active(void __iomem *mch_window)
 static enum dev_type i82975x_dram_type(void __iomem *mch_window, int rank)
 static enum dev_type i82975x_dram_type(void __iomem *mch_window, int rank)
 {
 {
 	/*
 	/*
-	 * ASUS P5W DH either does not program this register or programs
-	 * it wrong!
-	 * ECC is possible on i92975x ONLY with DEV_X8 which should mean 'val'
-	 * for each rank should be 01b - the LSB of the word should be 0x55;
-	 * but it reads 0!
+	 * ECC is possible on i92975x ONLY with DEV_X8
 	 */
 	 */
 	return DEV_X8;
 	return DEV_X8;
 }
 }
@@ -356,11 +355,15 @@ static enum dev_type i82975x_dram_type(void __iomem *mch_window, int rank)
 static void i82975x_init_csrows(struct mem_ctl_info *mci,
 static void i82975x_init_csrows(struct mem_ctl_info *mci,
 		struct pci_dev *pdev, void __iomem *mch_window)
 		struct pci_dev *pdev, void __iomem *mch_window)
 {
 {
+	static const char *labels[4] = {
+							"DIMM A1", "DIMM A2",
+							"DIMM B1", "DIMM B2"
+						};
 	struct csrow_info *csrow;
 	struct csrow_info *csrow;
 	unsigned long last_cumul_size;
 	unsigned long last_cumul_size;
 	u8 value;
 	u8 value;
 	u32 cumul_size;
 	u32 cumul_size;
-	int index;
+	int index, chan;
 
 
 	last_cumul_size = 0;
 	last_cumul_size = 0;
 
 
@@ -369,11 +372,7 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
 	 * The dram row boundary (DRB) reg values are boundary address
 	 * The dram row boundary (DRB) reg values are boundary address
 	 * for each DRAM row with a granularity of 32 or 64MB (single/dual
 	 * for each DRAM row with a granularity of 32 or 64MB (single/dual
 	 * channel operation).  DRB regs are cumulative; therefore DRB7 will
 	 * channel operation).  DRB regs are cumulative; therefore DRB7 will
-	 * contain the total memory contained in all eight rows.
-	 *
-	 * FIXME:
-	 *  EDAC currently works for Dual-channel Interleaved configuration.
-	 *  Other configurations, which the chip supports, need fixing/testing.
+	 * contain the total memory contained in all rows.
 	 *
 	 *
 	 */
 	 */
 
 
@@ -384,8 +383,26 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
 					((index >= 4) ? 0x80 : 0));
 					((index >= 4) ? 0x80 : 0));
 		cumul_size = value;
 		cumul_size = value;
 		cumul_size <<= (I82975X_DRB_SHIFT - PAGE_SHIFT);
 		cumul_size <<= (I82975X_DRB_SHIFT - PAGE_SHIFT);
+		/*
+		 * Adjust cumul_size w.r.t number of channels
+		 *
+		 */
+		if (csrow->nr_channels > 1)
+			cumul_size <<= 1;
 		debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
 		debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
 			cumul_size);
 			cumul_size);
+
+		/*
+		 * Initialise dram labels
+		 * index values:
+		 *   [0-7] for single-channel; i.e. csrow->nr_channels = 1
+		 *   [0-3] for dual-channel; i.e. csrow->nr_channels = 2
+		 */
+		for (chan = 0; chan < csrow->nr_channels; chan++)
+			strncpy(csrow->channels[chan].label,
+					labels[(index >> 1) + (chan * 2)],
+					EDAC_MC_LABEL_LEN);
+
 		if (cumul_size == last_cumul_size)
 		if (cumul_size == last_cumul_size)
 			continue;	/* not populated */
 			continue;	/* not populated */
 
 
@@ -393,8 +410,8 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
 		csrow->last_page = cumul_size - 1;
 		csrow->last_page = cumul_size - 1;
 		csrow->nr_pages = cumul_size - last_cumul_size;
 		csrow->nr_pages = cumul_size - last_cumul_size;
 		last_cumul_size = cumul_size;
 		last_cumul_size = cumul_size;
-		csrow->grain = 1 << 7;	/* I82975X_EAP has 128B resolution */
-		csrow->mtype = MEM_DDR; /* i82975x supports only DDR2 */
+		csrow->grain = 1 << 6;	/* I82975X_EAP has 64B resolution */
+		csrow->mtype = MEM_DDR2; /* I82975x supports only DDR2 */
 		csrow->dtype = i82975x_dram_type(mch_window, index);
 		csrow->dtype = i82975x_dram_type(mch_window, index);
 		csrow->edac_mode = EDAC_SECDED; /* only supported */
 		csrow->edac_mode = EDAC_SECDED; /* only supported */
 	}
 	}
@@ -515,18 +532,20 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
 
 
 	debugf3("%s(): init mci\n", __func__);
 	debugf3("%s(): init mci\n", __func__);
 	mci->dev = &pdev->dev;
 	mci->dev = &pdev->dev;
-	mci->mtype_cap = MEM_FLAG_DDR;
+	mci->mtype_cap = MEM_FLAG_DDR2;
 	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
 	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
 	mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
 	mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
 	mci->mod_name = EDAC_MOD_STR;
 	mci->mod_name = EDAC_MOD_STR;
 	mci->mod_ver = I82975X_REVISION;
 	mci->mod_ver = I82975X_REVISION;
 	mci->ctl_name = i82975x_devs[dev_idx].ctl_name;
 	mci->ctl_name = i82975x_devs[dev_idx].ctl_name;
+	mci->dev_name = pci_name(pdev);
 	mci->edac_check = i82975x_check;
 	mci->edac_check = i82975x_check;
 	mci->ctl_page_to_phys = NULL;
 	mci->ctl_page_to_phys = NULL;
 	debugf3("%s(): init pvt\n", __func__);
 	debugf3("%s(): init pvt\n", __func__);
 	pvt = (struct i82975x_pvt *) mci->pvt_info;
 	pvt = (struct i82975x_pvt *) mci->pvt_info;
 	pvt->mch_window = mch_window;
 	pvt->mch_window = mch_window;
 	i82975x_init_csrows(mci, pdev, mch_window);
 	i82975x_init_csrows(mci, pdev, mch_window);
+	mci->scrub_mode = SCRUB_HW_SRC;
 	i82975x_get_error_info(mci, &discard);  /* clear counters */
 	i82975x_get_error_info(mci, &discard);  /* clear counters */
 
 
 	/* finalize this instance of memory controller with edac core */
 	/* finalize this instance of memory controller with edac core */
@@ -664,7 +683,7 @@ module_init(i82975x_init);
 module_exit(i82975x_exit);
 module_exit(i82975x_exit);
 
 
 MODULE_LICENSE("GPL");
 MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Arvind R. <arvind@acarlab.com>");
+MODULE_AUTHOR("Arvind R. <arvino55@gmail.com>");
 MODULE_DESCRIPTION("MC support for Intel 82975 memory hub controllers");
 MODULE_DESCRIPTION("MC support for Intel 82975 memory hub controllers");
 
 
 module_param(edac_op_state, int, 0444);
 module_param(edac_op_state, int, 0444);

+ 3 - 1
drivers/firmware/dcdbas.c

@@ -268,8 +268,10 @@ int dcdbas_smi_request(struct smi_cmd *smi_cmd)
 	}
 	}
 
 
 	/* generate SMI */
 	/* generate SMI */
+	/* inb to force posted write through and make SMI happen now */
 	asm volatile (
 	asm volatile (
-		"outb %b0,%w1"
+		"outb %b0,%w1\n"
+		"inb %w1"
 		: /* no output args */
 		: /* no output args */
 		: "a" (smi_cmd->command_code),
 		: "a" (smi_cmd->command_code),
 		  "d" (smi_cmd->command_address),
 		  "d" (smi_cmd->command_address),

+ 1 - 3
drivers/gpu/drm/drm_sman.c

@@ -59,9 +59,7 @@ drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
 {
 {
 	int ret = 0;
 	int ret = 0;
 
 
-	sman->mm = (struct drm_sman_mm *) kcalloc(num_managers,
-						  sizeof(*sman->mm),
-						  GFP_KERNEL);
+	sman->mm = kcalloc(num_managers, sizeof(*sman->mm), GFP_KERNEL);
 	if (!sman->mm) {
 	if (!sman->mm) {
 		ret = -ENOMEM;
 		ret = -ENOMEM;
 		goto out;
 		goto out;

+ 1 - 1
drivers/gpu/drm/radeon/evergreen.c

@@ -2987,7 +2987,7 @@ int evergreen_resume(struct radeon_device *rdev)
 
 
 	r = r600_ib_test(rdev);
 	r = r600_ib_test(rdev);
 	if (r) {
 	if (r) {
-		DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+		DRM_ERROR("radeon: failed testing IB (%d).\n", r);
 		return r;
 		return r;
 	}
 	}
 
 

+ 5 - 5
drivers/gpu/drm/radeon/r100.c

@@ -3617,7 +3617,7 @@ int r100_ib_test(struct radeon_device *rdev)
 	if (i < rdev->usec_timeout) {
 	if (i < rdev->usec_timeout) {
 		DRM_INFO("ib test succeeded in %u usecs\n", i);
 		DRM_INFO("ib test succeeded in %u usecs\n", i);
 	} else {
 	} else {
-		DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
+		DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
 			  scratch, tmp);
 			  scratch, tmp);
 		r = -EINVAL;
 		r = -EINVAL;
 	}
 	}
@@ -3637,13 +3637,13 @@ int r100_ib_init(struct radeon_device *rdev)
 
 
 	r = radeon_ib_pool_init(rdev);
 	r = radeon_ib_pool_init(rdev);
 	if (r) {
 	if (r) {
-		dev_err(rdev->dev, "failled initializing IB pool (%d).\n", r);
+		dev_err(rdev->dev, "failed initializing IB pool (%d).\n", r);
 		r100_ib_fini(rdev);
 		r100_ib_fini(rdev);
 		return r;
 		return r;
 	}
 	}
 	r = r100_ib_test(rdev);
 	r = r100_ib_test(rdev);
 	if (r) {
 	if (r) {
-		dev_err(rdev->dev, "failled testing IB (%d).\n", r);
+		dev_err(rdev->dev, "failed testing IB (%d).\n", r);
 		r100_ib_fini(rdev);
 		r100_ib_fini(rdev);
 		return r;
 		return r;
 	}
 	}
@@ -3799,12 +3799,12 @@ static int r100_startup(struct radeon_device *rdev)
 	/* 1M ring buffer */
 	/* 1M ring buffer */
 	r = r100_cp_init(rdev, 1024 * 1024);
 	r = r100_cp_init(rdev, 1024 * 1024);
 	if (r) {
 	if (r) {
-		dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
 		return r;
 		return r;
 	}
 	}
 	r = r100_ib_init(rdev);
 	r = r100_ib_init(rdev);
 	if (r) {
 	if (r) {
-		dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+		dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
 		return r;
 		return r;
 	}
 	}
 	return 0;
 	return 0;

+ 2 - 2
drivers/gpu/drm/radeon/r300.c

@@ -1401,12 +1401,12 @@ static int r300_startup(struct radeon_device *rdev)
 	/* 1M ring buffer */
 	/* 1M ring buffer */
 	r = r100_cp_init(rdev, 1024 * 1024);
 	r = r100_cp_init(rdev, 1024 * 1024);
 	if (r) {
 	if (r) {
-		dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
 		return r;
 		return r;
 	}
 	}
 	r = r100_ib_init(rdev);
 	r = r100_ib_init(rdev);
 	if (r) {
 	if (r) {
-		dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+		dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
 		return r;
 		return r;
 	}
 	}
 	return 0;
 	return 0;

+ 2 - 2
drivers/gpu/drm/radeon/r420.c

@@ -260,13 +260,13 @@ static int r420_startup(struct radeon_device *rdev)
 	/* 1M ring buffer */
 	/* 1M ring buffer */
 	r = r100_cp_init(rdev, 1024 * 1024);
 	r = r100_cp_init(rdev, 1024 * 1024);
 	if (r) {
 	if (r) {
-		dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
 		return r;
 		return r;
 	}
 	}
 	r420_cp_errata_init(rdev);
 	r420_cp_errata_init(rdev);
 	r = r100_ib_init(rdev);
 	r = r100_ib_init(rdev);
 	if (r) {
 	if (r) {
-		dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+		dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
 		return r;
 		return r;
 	}
 	}
 	return 0;
 	return 0;

+ 2 - 2
drivers/gpu/drm/radeon/r520.c

@@ -193,12 +193,12 @@ static int r520_startup(struct radeon_device *rdev)
 	/* 1M ring buffer */
 	/* 1M ring buffer */
 	r = r100_cp_init(rdev, 1024 * 1024);
 	r = r100_cp_init(rdev, 1024 * 1024);
 	if (r) {
 	if (r) {
-		dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
 		return r;
 		return r;
 	}
 	}
 	r = r100_ib_init(rdev);
 	r = r100_ib_init(rdev);
 	if (r) {
 	if (r) {
-		dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+		dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
 		return r;
 		return r;
 	}
 	}
 	return 0;
 	return 0;

+ 1 - 1
drivers/gpu/drm/radeon/r600.c

@@ -2464,7 +2464,7 @@ int r600_resume(struct radeon_device *rdev)
 
 
 	r = r600_ib_test(rdev);
 	r = r600_ib_test(rdev);
 	if (r) {
 	if (r) {
-		DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+		DRM_ERROR("radeon: failed testing IB (%d).\n", r);
 		return r;
 		return r;
 	}
 	}
 
 

+ 1 - 1
drivers/gpu/drm/radeon/radeon_ring.c

@@ -151,7 +151,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
 	/* 64 dwords should be enough for fence too */
 	/* 64 dwords should be enough for fence too */
 	r = radeon_ring_lock(rdev, 64);
 	r = radeon_ring_lock(rdev, 64);
 	if (r) {
 	if (r) {
-		DRM_ERROR("radeon: scheduling IB failled (%d).\n", r);
+		DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
 		return r;
 		return r;
 	}
 	}
 	radeon_ring_ib_execute(rdev, ib);
 	radeon_ring_ib_execute(rdev, ib);

+ 2 - 2
drivers/gpu/drm/radeon/rs400.c

@@ -412,12 +412,12 @@ static int rs400_startup(struct radeon_device *rdev)
 	/* 1M ring buffer */
 	/* 1M ring buffer */
 	r = r100_cp_init(rdev, 1024 * 1024);
 	r = r100_cp_init(rdev, 1024 * 1024);
 	if (r) {
 	if (r) {
-		dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
 		return r;
 		return r;
 	}
 	}
 	r = r100_ib_init(rdev);
 	r = r100_ib_init(rdev);
 	if (r) {
 	if (r) {
-		dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+		dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
 		return r;
 		return r;
 	}
 	}
 	return 0;
 	return 0;

+ 2 - 2
drivers/gpu/drm/radeon/rs600.c

@@ -865,12 +865,12 @@ static int rs600_startup(struct radeon_device *rdev)
 	/* 1M ring buffer */
 	/* 1M ring buffer */
 	r = r100_cp_init(rdev, 1024 * 1024);
 	r = r100_cp_init(rdev, 1024 * 1024);
 	if (r) {
 	if (r) {
-		dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
 		return r;
 		return r;
 	}
 	}
 	r = r100_ib_init(rdev);
 	r = r100_ib_init(rdev);
 	if (r) {
 	if (r) {
-		dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+		dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
 		return r;
 		return r;
 	}
 	}
 
 

+ 2 - 2
drivers/gpu/drm/radeon/rs690.c

@@ -627,12 +627,12 @@ static int rs690_startup(struct radeon_device *rdev)
 	/* 1M ring buffer */
 	/* 1M ring buffer */
 	r = r100_cp_init(rdev, 1024 * 1024);
 	r = r100_cp_init(rdev, 1024 * 1024);
 	if (r) {
 	if (r) {
-		dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
 		return r;
 		return r;
 	}
 	}
 	r = r100_ib_init(rdev);
 	r = r100_ib_init(rdev);
 	if (r) {
 	if (r) {
-		dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+		dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
 		return r;
 		return r;
 	}
 	}
 
 

+ 2 - 2
drivers/gpu/drm/radeon/rv515.c

@@ -398,12 +398,12 @@ static int rv515_startup(struct radeon_device *rdev)
 	/* 1M ring buffer */
 	/* 1M ring buffer */
 	r = r100_cp_init(rdev, 1024 * 1024);
 	r = r100_cp_init(rdev, 1024 * 1024);
 	if (r) {
 	if (r) {
-		dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
 		return r;
 		return r;
 	}
 	}
 	r = r100_ib_init(rdev);
 	r = r100_ib_init(rdev);
 	if (r) {
 	if (r) {
-		dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+		dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
 		return r;
 		return r;
 	}
 	}
 	return 0;
 	return 0;

+ 1 - 1
drivers/gpu/drm/radeon/rv770.c

@@ -1209,7 +1209,7 @@ int rv770_resume(struct radeon_device *rdev)
 
 
 	r = r600_ib_test(rdev);
 	r = r600_ib_test(rdev);
 	if (r) {
 	if (r) {
-		DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+		DRM_ERROR("radeon: failed testing IB (%d).\n", r);
 		return r;
 		return r;
 	}
 	}
 
 

+ 3 - 3
drivers/isdn/mISDN/hwchannel.c

@@ -206,7 +206,7 @@ recv_Bchannel(struct bchannel *bch, unsigned int id)
 	hh->id = id;
 	hh->id = id;
 	if (bch->rcount >= 64) {
 	if (bch->rcount >= 64) {
 		printk(KERN_WARNING "B-channel %p receive queue overflow, "
 		printk(KERN_WARNING "B-channel %p receive queue overflow, "
-			"fushing!\n", bch);
+			"flushing!\n", bch);
 		skb_queue_purge(&bch->rqueue);
 		skb_queue_purge(&bch->rqueue);
 		bch->rcount = 0;
 		bch->rcount = 0;
 		return;
 		return;
@@ -231,7 +231,7 @@ recv_Bchannel_skb(struct bchannel *bch, struct sk_buff *skb)
 {
 {
 	if (bch->rcount >= 64) {
 	if (bch->rcount >= 64) {
 		printk(KERN_WARNING "B-channel %p receive queue overflow, "
 		printk(KERN_WARNING "B-channel %p receive queue overflow, "
-			"fushing!\n", bch);
+			"flushing!\n", bch);
 		skb_queue_purge(&bch->rqueue);
 		skb_queue_purge(&bch->rqueue);
 		bch->rcount = 0;
 		bch->rcount = 0;
 	}
 	}
@@ -279,7 +279,7 @@ confirm_Bsend(struct bchannel *bch)
 
 
 	if (bch->rcount >= 64) {
 	if (bch->rcount >= 64) {
 		printk(KERN_WARNING "B-channel %p receive queue overflow, "
 		printk(KERN_WARNING "B-channel %p receive queue overflow, "
-			"fushing!\n", bch);
+			"flushing!\n", bch);
 		skb_queue_purge(&bch->rqueue);
 		skb_queue_purge(&bch->rqueue);
 		bch->rcount = 0;
 		bch->rcount = 0;
 	}
 	}

+ 1 - 2
drivers/message/i2o/i2o_config.c

@@ -1044,8 +1044,7 @@ static long i2o_cfg_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
 
 
 static int cfg_open(struct inode *inode, struct file *file)
 static int cfg_open(struct inode *inode, struct file *file)
 {
 {
-	struct i2o_cfg_info *tmp =
-	    (struct i2o_cfg_info *)kmalloc(sizeof(struct i2o_cfg_info),
+	struct i2o_cfg_info *tmp = kmalloc(sizeof(struct i2o_cfg_info),
 					   GFP_KERNEL);
 					   GFP_KERNEL);
 	unsigned long flags;
 	unsigned long flags;
 
 

+ 2 - 3
drivers/mtd/nand/mxc_nand.c

@@ -722,9 +722,8 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
 		/*
 		/*
 		 * MXC NANDFC can only perform full page+spare or
 		 * MXC NANDFC can only perform full page+spare or
 		 * spare-only read/write.  When the upper layers
 		 * spare-only read/write.  When the upper layers
-		 * layers perform a read/write buf operation,
-		 * we will used the saved column address to index into
-		 * the full page.
+		 * perform a read/write buf operation, the saved column
+		  * address is used to index into the full page.
 		 */
 		 */
 		host->send_addr(host, 0, page_addr == -1);
 		host->send_addr(host, 0, page_addr == -1);
 		if (mtd->writesize > 512)
 		if (mtd->writesize > 512)

+ 2 - 2
drivers/net/atl1c/atl1c.h

@@ -265,7 +265,7 @@ struct atl1c_recv_ret_status {
 	__le32	word3;
 	__le32	word3;
 };
 };
 
 
-/* RFD desciptor */
+/* RFD descriptor */
 struct atl1c_rx_free_desc {
 struct atl1c_rx_free_desc {
 	__le64	buffer_addr;
 	__le64	buffer_addr;
 };
 };
@@ -531,7 +531,7 @@ struct atl1c_rfd_ring {
 	struct atl1c_buffer *buffer_info;
 	struct atl1c_buffer *buffer_info;
 };
 };
 
 
-/* receive return desciptor (rrd) ring */
+/* receive return descriptor (rrd) ring */
 struct atl1c_rrd_ring {
 struct atl1c_rrd_ring {
 	void *desc;		/* descriptor ring virtual address */
 	void *desc;		/* descriptor ring virtual address */
 	dma_addr_t dma;		/* descriptor ring physical address */
 	dma_addr_t dma;		/* descriptor ring physical address */

+ 1 - 1
drivers/net/qla3xxx.c

@@ -2460,7 +2460,7 @@ map_error:
  * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
  * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
  * in the IOCB plus a chain of outbound address lists (OAL) that
  * in the IOCB plus a chain of outbound address lists (OAL) that
  * each contain 5 ALPs.  The last ALP of the IOCB (3rd) or OAL (5th)
  * each contain 5 ALPs.  The last ALP of the IOCB (3rd) or OAL (5th)
- * will used to point to an OAL when more ALP entries are required.
+ * will be used to point to an OAL when more ALP entries are required.
  * The IOCB is always the top of the chain followed by one or more
  * The IOCB is always the top of the chain followed by one or more
  * OALs (when necessary).
  * OALs (when necessary).
  */
  */

+ 1 - 1
drivers/net/sungem.h

@@ -843,7 +843,7 @@ struct gem_txd {
 
 
 /* GEM requires that RX descriptors are provided four at a time,
 /* GEM requires that RX descriptors are provided four at a time,
  * aligned.  Also, the RX ring may not wrap around.  This means that
  * aligned.  Also, the RX ring may not wrap around.  This means that
- * there will be at least 4 unused desciptor entries in the middle
+ * there will be at least 4 unused descriptor entries in the middle
  * of the RX ring at all times.
  * of the RX ring at all times.
  *
  *
  * Similar to HME, GEM assumes that it can write garbage bytes before
  * Similar to HME, GEM assumes that it can write garbage bytes before

+ 0 - 1
drivers/platform/x86/acer-wmi.c

@@ -39,7 +39,6 @@
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/input.h>
 #include <linux/input.h>
 #include <linux/input/sparse-keymap.h>
 #include <linux/input/sparse-keymap.h>
-#include <linux/dmi.h>
 
 
 #include <acpi/acpi_drivers.h>
 #include <acpi/acpi_drivers.h>
 
 

+ 1 - 1
drivers/scsi/aic7xxx/aic79xx.h

@@ -672,7 +672,7 @@ struct scb_data {
 /************************ Target Mode Definitions *****************************/
 /************************ Target Mode Definitions *****************************/
 
 
 /*
 /*
- * Connection desciptor for select-in requests in target mode.
+ * Connection descriptor for select-in requests in target mode.
  */
  */
 struct target_cmd {
 struct target_cmd {
 	uint8_t scsiid;		/* Our ID and the initiator's ID */
 	uint8_t scsiid;		/* Our ID and the initiator's ID */

+ 1 - 1
drivers/scsi/aic7xxx/aic7xxx.h

@@ -618,7 +618,7 @@ struct scb_data {
 /************************ Target Mode Definitions *****************************/
 /************************ Target Mode Definitions *****************************/
 
 
 /*
 /*
- * Connection desciptor for select-in requests in target mode.
+ * Connection descriptor for select-in requests in target mode.
  */
  */
 struct target_cmd {
 struct target_cmd {
 	uint8_t scsiid;		/* Our ID and the initiator's ID */
 	uint8_t scsiid;		/* Our ID and the initiator's ID */

+ 1 - 1
drivers/scsi/aic7xxx/aic7xxx_core.c

@@ -4780,7 +4780,7 @@ ahc_init_scbdata(struct ahc_softc *ahc)
 	SLIST_INIT(&scb_data->sg_maps);
 	SLIST_INIT(&scb_data->sg_maps);
 
 
 	/* Allocate SCB resources */
 	/* Allocate SCB resources */
-	scb_data->scbarray = (struct scb *)kmalloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC, GFP_ATOMIC);
+	scb_data->scbarray = kmalloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC, GFP_ATOMIC);
 	if (scb_data->scbarray == NULL)
 	if (scb_data->scbarray == NULL)
 		return (ENOMEM);
 		return (ENOMEM);
 	memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX_ALLOC);
 	memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX_ALLOC);

+ 2 - 2
drivers/scsi/megaraid.c

@@ -1412,7 +1412,7 @@ megaraid_isr_memmapped(int irq, void *devp)
  * @nstatus - number of completed commands
  * @nstatus - number of completed commands
  * @status - status of the last command completed
  * @status - status of the last command completed
  *
  *
- * Complete the comamnds and call the scsi mid-layer callback hooks.
+ * Complete the commands and call the scsi mid-layer callback hooks.
  */
  */
 static void
 static void
 mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
 mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
@@ -4296,7 +4296,7 @@ mega_support_cluster(adapter_t *adapter)
  * @adapter - pointer to our soft state
  * @adapter - pointer to our soft state
  * @dma_handle - DMA address of the buffer
  * @dma_handle - DMA address of the buffer
  *
  *
- * Issue internal comamnds while interrupts are available.
+ * Issue internal commands while interrupts are available.
  * We only issue direct mailbox commands from within the driver. ioctl()
  * We only issue direct mailbox commands from within the driver. ioctl()
  * interface using these routines can issue passthru commands.
  * interface using these routines can issue passthru commands.
  */
  */

+ 1 - 1
drivers/scsi/megaraid/megaraid_sas_base.c

@@ -890,7 +890,7 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance,
  * @instance:				Adapter soft state
  * @instance:				Adapter soft state
  * @cmd_to_abort:			Previously issued cmd to be aborted
  * @cmd_to_abort:			Previously issued cmd to be aborted
  *
  *
- * MFI firmware can abort previously issued AEN comamnd (automatic event
+ * MFI firmware can abort previously issued AEN command (automatic event
  * notification). The megasas_issue_blocked_abort_cmd() issues such abort
  * notification). The megasas_issue_blocked_abort_cmd() issues such abort
  * cmd and waits for return status.
  * cmd and waits for return status.
  * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
  * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs

+ 4 - 6
drivers/scsi/osst.c

@@ -1484,7 +1484,7 @@ static int osst_read_back_buffer_and_rewrite(struct osst_tape * STp, struct osst
 	int			dbg              = debugging;
 	int			dbg              = debugging;
 #endif
 #endif
 
 
-	if ((buffer = (unsigned char *)vmalloc((nframes + 1) * OS_DATA_SIZE)) == NULL)
+	if ((buffer = vmalloc((nframes + 1) * OS_DATA_SIZE)) == NULL)
 		return (-EIO);
 		return (-EIO);
 
 
 	printk(KERN_INFO "%s:I: Reading back %d frames from drive buffer%s\n",
 	printk(KERN_INFO "%s:I: Reading back %d frames from drive buffer%s\n",
@@ -2296,7 +2296,7 @@ static int osst_write_header(struct osst_tape * STp, struct osst_request ** aSRp
 	if (STp->raw) return 0;
 	if (STp->raw) return 0;
 
 
 	if (STp->header_cache == NULL) {
 	if (STp->header_cache == NULL) {
-		if ((STp->header_cache = (os_header_t *)vmalloc(sizeof(os_header_t))) == NULL) {
+		if ((STp->header_cache = vmalloc(sizeof(os_header_t))) == NULL) {
 			printk(KERN_ERR "%s:E: Failed to allocate header cache\n", name);
 			printk(KERN_ERR "%s:E: Failed to allocate header cache\n", name);
 			return (-ENOMEM);
 			return (-ENOMEM);
 		}
 		}
@@ -2484,7 +2484,7 @@ static int __osst_analyze_headers(struct osst_tape * STp, struct osst_request **
 				   name, ppos, update_frame_cntr);
 				   name, ppos, update_frame_cntr);
 #endif
 #endif
 		if (STp->header_cache == NULL) {
 		if (STp->header_cache == NULL) {
-			if ((STp->header_cache = (os_header_t *)vmalloc(sizeof(os_header_t))) == NULL) {
+			if ((STp->header_cache = vmalloc(sizeof(os_header_t))) == NULL) {
 				printk(KERN_ERR "%s:E: Failed to allocate header cache\n", name);
 				printk(KERN_ERR "%s:E: Failed to allocate header cache\n", name);
 				return 0;
 				return 0;
 			}
 			}
@@ -5851,9 +5851,7 @@ static int osst_probe(struct device *dev)
 	/* if this is the first attach, build the infrastructure */
 	/* if this is the first attach, build the infrastructure */
 	write_lock(&os_scsi_tapes_lock);
 	write_lock(&os_scsi_tapes_lock);
 	if (os_scsi_tapes == NULL) {
 	if (os_scsi_tapes == NULL) {
-		os_scsi_tapes =
-			(struct osst_tape **)kmalloc(osst_max_dev * sizeof(struct osst_tape *),
-				   GFP_ATOMIC);
+		os_scsi_tapes = kmalloc(osst_max_dev * sizeof(struct osst_tape *), GFP_ATOMIC);
 		if (os_scsi_tapes == NULL) {
 		if (os_scsi_tapes == NULL) {
 			write_unlock(&os_scsi_tapes_lock);
 			write_unlock(&os_scsi_tapes_lock);
 			printk(KERN_ERR "osst :E: Unable to allocate array for OnStream SCSI tapes.\n");
 			printk(KERN_ERR "osst :E: Unable to allocate array for OnStream SCSI tapes.\n");

+ 1 - 1
drivers/scsi/qla4xxx/ql4_isr.c

@@ -1027,7 +1027,7 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
 					((ddb_entry->default_time2wait +
 					((ddb_entry->default_time2wait +
 					  4) * HZ);
 					  4) * HZ);
 
 
-				DEBUG2(printk("scsi%ld: ddb [%d] initate"
+				DEBUG2(printk("scsi%ld: ddb [%d] initiate"
 					      " RELOGIN after %d seconds\n",
 					      " RELOGIN after %d seconds\n",
 					      ha->host_no,
 					      ha->host_no,
 					      ddb_entry->fw_ddb_index,
 					      ddb_entry->fw_ddb_index,

+ 1 - 1
drivers/scsi/qla4xxx/ql4_os.c

@@ -812,7 +812,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
 					);
 					);
 				start_dpc++;
 				start_dpc++;
 				DEBUG(printk("scsi%ld:%d:%d: ddb [%d] "
 				DEBUG(printk("scsi%ld:%d:%d: ddb [%d] "
-					     "initate relogin after"
+					     "initiate relogin after"
 					     " %d seconds\n",
 					     " %d seconds\n",
 					     ha->host_no, ddb_entry->bus,
 					     ha->host_no, ddb_entry->bus,
 					     ddb_entry->target,
 					     ddb_entry->target,

+ 0 - 1
drivers/target/target_core_hba.c

@@ -37,7 +37,6 @@
 
 
 #include <target/target_core_base.h>
 #include <target/target_core_base.h>
 #include <target/target_core_device.h>
 #include <target/target_core_device.h>
-#include <target/target_core_device.h>
 #include <target/target_core_tpg.h>
 #include <target/target_core_tpg.h>
 #include <target/target_core_transport.h>
 #include <target/target_core_transport.h>
 
 

+ 1 - 1
drivers/tty/hvc/hvcs.c

@@ -292,7 +292,7 @@ struct hvcs_struct {
 	/*
 	/*
 	 * Any variable below the kref is valid before a tty is connected and
 	 * Any variable below the kref is valid before a tty is connected and
 	 * stays valid after the tty is disconnected.  These shouldn't be
 	 * stays valid after the tty is disconnected.  These shouldn't be
-	 * whacked until the koject refcount reaches zero though some entries
+	 * whacked until the kobject refcount reaches zero though some entries
 	 * may be changed via sysfs initiatives.
 	 * may be changed via sysfs initiatives.
 	 */
 	 */
 	struct kref kref; /* ref count & hvcs_struct lifetime */
 	struct kref kref; /* ref count & hvcs_struct lifetime */

+ 0 - 1
drivers/tty/serial/pch_uart.c

@@ -15,7 +15,6 @@
  *Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307, USA.
  *Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307, USA.
  */
  */
 #include <linux/serial_reg.h>
 #include <linux/serial_reg.h>
-#include <linux/pci.h>
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/pci.h>
 #include <linux/serial_core.h>
 #include <linux/serial_core.h>

+ 1 - 1
drivers/watchdog/sbc_epx_c3.c

@@ -220,7 +220,7 @@ module_exit(watchdog_exit);
 MODULE_AUTHOR("Calin A. Culianu <calin@ajvar.org>");
 MODULE_AUTHOR("Calin A. Culianu <calin@ajvar.org>");
 MODULE_DESCRIPTION("Hardware Watchdog Device for Winsystems EPX-C3 SBC.  "
 MODULE_DESCRIPTION("Hardware Watchdog Device for Winsystems EPX-C3 SBC.  "
 	"Note that there is no way to probe for this device -- "
 	"Note that there is no way to probe for this device -- "
-	"so only use it if you are *sure* you are runnning on this specific "
+	"so only use it if you are *sure* you are running on this specific "
 	"SBC system from Winsystems!  It writes to IO ports 0x1ee and 0x1ef!");
 	"SBC system from Winsystems!  It writes to IO ports 0x1ee and 0x1ef!");
 MODULE_LICENSE("GPL");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
 MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);

+ 1 - 1
fs/btrfs/disk-io.c

@@ -2493,7 +2493,7 @@ int close_ctree(struct btrfs_root *root)
 	 * ERROR state on disk.
 	 * ERROR state on disk.
 	 *
 	 *
 	 * 2. when btrfs flips readonly just in btrfs_commit_super,
 	 * 2. when btrfs flips readonly just in btrfs_commit_super,
-	 * and in such case, btrfs cannnot write sb via btrfs_commit_super,
+	 * and in such case, btrfs cannot write sb via btrfs_commit_super,
 	 * and since fs_state has been set BTRFS_SUPER_FLAG_ERROR flag,
 	 * and since fs_state has been set BTRFS_SUPER_FLAG_ERROR flag,
 	 * btrfs will cleanup all FS resources first and write sb then.
 	 * btrfs will cleanup all FS resources first and write sb then.
 	 */
 	 */

+ 2 - 2
fs/dcache.c

@@ -1808,7 +1808,7 @@ struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name,
 	 * false-negative result. d_lookup() protects against concurrent
 	 * false-negative result. d_lookup() protects against concurrent
 	 * renames using rename_lock seqlock.
 	 * renames using rename_lock seqlock.
 	 *
 	 *
-	 * See Documentation/vfs/dcache-locking.txt for more details.
+	 * See Documentation/filesystems/path-lookup.txt for more details.
 	 */
 	 */
 	hlist_bl_for_each_entry_rcu(dentry, node, &b->head, d_hash) {
 	hlist_bl_for_each_entry_rcu(dentry, node, &b->head, d_hash) {
 		struct inode *i;
 		struct inode *i;
@@ -1928,7 +1928,7 @@ struct dentry *__d_lookup(struct dentry *parent, struct qstr *name)
 	 * false-negative result. d_lookup() protects against concurrent
 	 * false-negative result. d_lookup() protects against concurrent
 	 * renames using rename_lock seqlock.
 	 * renames using rename_lock seqlock.
 	 *
 	 *
-	 * See Documentation/vfs/dcache-locking.txt for more details.
+	 * See Documentation/filesystems/path-lookup.txt for more details.
 	 */
 	 */
 	rcu_read_lock();
 	rcu_read_lock();
 	
 	

+ 3 - 3
fs/direct-io.c

@@ -645,11 +645,11 @@ static int dio_send_cur_page(struct dio *dio)
 		/*
 		/*
 		 * See whether this new request is contiguous with the old.
 		 * See whether this new request is contiguous with the old.
 		 *
 		 *
-		 * Btrfs cannot handl having logically non-contiguous requests
-		 * submitted.  For exmple if you have
+		 * Btrfs cannot handle having logically non-contiguous requests
+		 * submitted.  For example if you have
 		 *
 		 *
 		 * Logical:  [0-4095][HOLE][8192-12287]
 		 * Logical:  [0-4095][HOLE][8192-12287]
-		 * Phyiscal: [0-4095]      [4096-8181]
+		 * Physical: [0-4095]      [4096-8191]
 		 *
 		 *
 		 * We cannot submit those pages together as one BIO.  So if our
 		 * We cannot submit those pages together as one BIO.  So if our
 		 * current logical offset in the file does not equal what would
 		 * current logical offset in the file does not equal what would

+ 6 - 6
fs/eventpoll.c

@@ -62,7 +62,7 @@
  * This mutex is acquired by ep_free() during the epoll file
  * This mutex is acquired by ep_free() during the epoll file
  * cleanup path and it is also acquired by eventpoll_release_file()
  * cleanup path and it is also acquired by eventpoll_release_file()
  * if a file has been pushed inside an epoll set and it is then
  * if a file has been pushed inside an epoll set and it is then
- * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL).
+ * close()d without a previous call to epoll_ctl(EPOLL_CTL_DEL).
  * It is also acquired when inserting an epoll fd onto another epoll
  * It is also acquired when inserting an epoll fd onto another epoll
  * fd. We do this so that we walk the epoll tree and ensure that this
  * fd. We do this so that we walk the epoll tree and ensure that this
  * insertion does not create a cycle of epoll file descriptors, which
  * insertion does not create a cycle of epoll file descriptors, which
@@ -152,11 +152,11 @@ struct epitem {
 
 
 /*
 /*
  * This structure is stored inside the "private_data" member of the file
  * This structure is stored inside the "private_data" member of the file
- * structure and rapresent the main data sructure for the eventpoll
+ * structure and represents the main data structure for the eventpoll
  * interface.
  * interface.
  */
  */
 struct eventpoll {
 struct eventpoll {
-	/* Protect the this structure access */
+	/* Protect the access to this structure */
 	spinlock_t lock;
 	spinlock_t lock;
 
 
 	/*
 	/*
@@ -793,7 +793,7 @@ static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
 
 
 /*
 /*
  * This is the callback that is passed to the wait queue wakeup
  * This is the callback that is passed to the wait queue wakeup
- * machanism. It is called by the stored file descriptors when they
+ * mechanism. It is called by the stored file descriptors when they
  * have events to report.
  * have events to report.
  */
  */
 static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key)
 static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key)
@@ -824,9 +824,9 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k
 		goto out_unlock;
 		goto out_unlock;
 
 
 	/*
 	/*
-	 * If we are trasfering events to userspace, we can hold no locks
+	 * If we are transferring events to userspace, we can hold no locks
 	 * (because we're accessing user memory, and because of linux f_op->poll()
 	 * (because we're accessing user memory, and because of linux f_op->poll()
-	 * semantics). All the events that happens during that period of time are
+	 * semantics). All the events that happen during that period of time are
 	 * chained in ep->ovflist and requeued later on.
 	 * chained in ep->ovflist and requeued later on.
 	 */
 	 */
 	if (unlikely(ep->ovflist != EP_UNACTIVE_PTR)) {
 	if (unlikely(ep->ovflist != EP_UNACTIVE_PTR)) {

+ 2 - 2
fs/ext4/extents.c

@@ -131,7 +131,7 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
 		 * fragmenting the file system's free space.  Maybe we
 		 * fragmenting the file system's free space.  Maybe we
 		 * should have some hueristics or some way to allow
 		 * should have some hueristics or some way to allow
 		 * userspace to pass a hint to file system,
 		 * userspace to pass a hint to file system,
-		 * especiially if the latter case turns out to be
+		 * especially if the latter case turns out to be
 		 * common.
 		 * common.
 		 */
 		 */
 		ex = path[depth].p_ext;
 		ex = path[depth].p_ext;
@@ -2844,7 +2844,7 @@ fix_extent_len:
  * ext4_get_blocks_dio_write() when DIO to write
  * ext4_get_blocks_dio_write() when DIO to write
  * to an uninitialized extent.
  * to an uninitialized extent.
  *
  *
- * Writing to an uninitized extent may result in splitting the uninitialized
+ * Writing to an uninitialized extent may result in splitting the uninitialized
  * extent into multiple /initialized uninitialized extents (up to three)
  * extent into multiple /initialized uninitialized extents (up to three)
  * There are three possibilities:
  * There are three possibilities:
  *   a> There is no split required: Entire extent should be uninitialized
  *   a> There is no split required: Entire extent should be uninitialized

+ 1 - 1
fs/fuse/cuse.c

@@ -458,7 +458,7 @@ static void cuse_fc_release(struct fuse_conn *fc)
  * @file: file struct being opened
  * @file: file struct being opened
  *
  *
  * Userland CUSE server can create a CUSE device by opening /dev/cuse
  * Userland CUSE server can create a CUSE device by opening /dev/cuse
- * and replying to the initilaization request kernel sends.  This
+ * and replying to the initialization request kernel sends.  This
  * function is responsible for handling CUSE device initialization.
  * function is responsible for handling CUSE device initialization.
  * Because the fd opened by this function is used during
  * Because the fd opened by this function is used during
  * initialization, this function only creates cuse_conn and sends
  * initialization, this function only creates cuse_conn and sends

+ 1 - 1
fs/notify/fanotify/fanotify_user.c

@@ -876,7 +876,7 @@ SYSCALL_ALIAS(sys_fanotify_mark, SyS_fanotify_mark);
 #endif
 #endif
 
 
 /*
 /*
- * fanotify_user_setup - Our initialization function.  Note that we cannnot return
+ * fanotify_user_setup - Our initialization function.  Note that we cannot return
  * error because we have compiled-in VFS hooks.  So an (unlikely) failure here
  * error because we have compiled-in VFS hooks.  So an (unlikely) failure here
  * must result in panic().
  * must result in panic().
  */
  */

+ 1 - 1
fs/notify/inotify/inotify_user.c

@@ -841,7 +841,7 @@ out:
 }
 }
 
 
 /*
 /*
- * inotify_user_setup - Our initialization function.  Note that we cannnot return
+ * inotify_user_setup - Our initialization function.  Note that we cannot return
  * error because we have compiled-in VFS hooks.  So an (unlikely) failure here
  * error because we have compiled-in VFS hooks.  So an (unlikely) failure here
  * must result in panic().
  * must result in panic().
  */
  */

+ 1 - 1
fs/ocfs2/dir.c

@@ -354,7 +354,7 @@ static inline int ocfs2_match(int len,
 /*
 /*
  * Returns 0 if not found, -1 on failure, and 1 on success
  * Returns 0 if not found, -1 on failure, and 1 on success
  */
  */
-static int inline ocfs2_search_dirblock(struct buffer_head *bh,
+static inline int ocfs2_search_dirblock(struct buffer_head *bh,
 					struct inode *dir,
 					struct inode *dir,
 					const char *name, int namelen,
 					const char *name, int namelen,
 					unsigned long offset,
 					unsigned long offset,

+ 2 - 2
include/asm-generic/user.h

@@ -1,8 +1,8 @@
 #ifndef __ASM_GENERIC_USER_H
 #ifndef __ASM_GENERIC_USER_H
 #define __ASM_GENERIC_USER_H
 #define __ASM_GENERIC_USER_H
 /*
 /*
- * This file may define a 'struct user' structure. However, it it only
- * used for a.out file, which are not supported on new architectures.
+ * This file may define a 'struct user' structure. However, it is only
+ * used for a.out files, which are not supported on new architectures.
  */
  */
 
 
 #endif	/* __ASM_GENERIC_USER_H */
 #endif	/* __ASM_GENERIC_USER_H */

+ 1 - 1
include/linux/mmzone.h

@@ -472,7 +472,7 @@ static inline int zone_is_oom_locked(const struct zone *zone)
 #ifdef CONFIG_NUMA
 #ifdef CONFIG_NUMA
 
 
 /*
 /*
- * The NUMA zonelists are doubled becausse we need zonelists that restrict the
+ * The NUMA zonelists are doubled because we need zonelists that restrict the
  * allocations to a single node for GFP_THISNODE.
  * allocations to a single node for GFP_THISNODE.
  *
  *
  * [0]	: Zonelist with fallback
  * [0]	: Zonelist with fallback

+ 3 - 3
init/Kconfig

@@ -745,9 +745,9 @@ config BLK_CGROUP
 
 
 	This option only enables generic Block IO controller infrastructure.
 	This option only enables generic Block IO controller infrastructure.
 	One needs to also enable actual IO controlling logic/policy. For
 	One needs to also enable actual IO controlling logic/policy. For
-	enabling proportional weight division of disk bandwidth in CFQ seti
-	CONFIG_CFQ_GROUP_IOSCHED=y and for enabling throttling policy set
-	CONFIG_BLK_THROTTLE=y.
+	enabling proportional weight division of disk bandwidth in CFQ, set
+	CONFIG_CFQ_GROUP_IOSCHED=y; for enabling throttling policy, set
+	CONFIG_BLK_DEV_THROTTLING=y.
 
 
 	See Documentation/cgroups/blkio-controller.txt for more information.
 	See Documentation/cgroups/blkio-controller.txt for more information.
 
 

+ 1 - 1
kernel/trace/ring_buffer.c

@@ -668,7 +668,7 @@ static struct list_head *rb_list_head(struct list_head *list)
  * the reader page). But if the next page is a header page,
  * the reader page). But if the next page is a header page,
  * its flags will be non zero.
  * its flags will be non zero.
  */
  */
-static int inline
+static inline int
 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
 		struct buffer_page *page, struct list_head *list)
 		struct buffer_page *page, struct list_head *list)
 {
 {

+ 3 - 3
mm/memory.c

@@ -2172,10 +2172,10 @@ EXPORT_SYMBOL_GPL(apply_to_page_range);
  * handle_pte_fault chooses page fault handler according to an entry
  * handle_pte_fault chooses page fault handler according to an entry
  * which was read non-atomically.  Before making any commitment, on
  * which was read non-atomically.  Before making any commitment, on
  * those architectures or configurations (e.g. i386 with PAE) which
  * those architectures or configurations (e.g. i386 with PAE) which
- * might give a mix of unmatched parts, do_swap_page and do_file_page
+ * might give a mix of unmatched parts, do_swap_page and do_nonlinear_fault
  * must check under lock before unmapping the pte and proceeding
  * must check under lock before unmapping the pte and proceeding
  * (but do_wp_page is only called after already making such a check;
  * (but do_wp_page is only called after already making such a check;
- * and do_anonymous_page and do_no_page can safely check later on).
+ * and do_anonymous_page can safely check later on).
  */
  */
 static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
 static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
 				pte_t *page_table, pte_t orig_pte)
 				pte_t *page_table, pte_t orig_pte)
@@ -2371,7 +2371,7 @@ reuse:
 		 * bit after it clear all dirty ptes, but before a racing
 		 * bit after it clear all dirty ptes, but before a racing
 		 * do_wp_page installs a dirty pte.
 		 * do_wp_page installs a dirty pte.
 		 *
 		 *
-		 * do_no_page is protected similarly.
+		 * __do_fault is protected similarly.
 		 */
 		 */
 		if (!page_mkwrite) {
 		if (!page_mkwrite) {
 			wait_on_page_locked(dirty_page);
 			wait_on_page_locked(dirty_page);

+ 1 - 1
mm/mempolicy.c

@@ -993,7 +993,7 @@ int do_migrate_pages(struct mm_struct *mm,
 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
 	 * that not only moved, but what's better, moved to an empty slot
 	 * that not only moved, but what's better, moved to an empty slot
 	 * (d is not set in tmp), then we break out then, with that pair.
 	 * (d is not set in tmp), then we break out then, with that pair.
-	 * Otherwise when we finish scannng from_tmp, we at least have the
+	 * Otherwise when we finish scanning from_tmp, we at least have the
 	 * most recent <s, d> pair that moved.  If we get all the way through
 	 * most recent <s, d> pair that moved.  If we get all the way through
 	 * the scan of tmp without finding any node that moved, much less
 	 * the scan of tmp without finding any node that moved, much less
 	 * moved to an empty node, then there is nothing left worth migrating.
 	 * moved to an empty node, then there is nothing left worth migrating.

+ 1 - 1
mm/shmem.c

@@ -779,7 +779,7 @@ static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
 			 * If truncating down to a partial page, then
 			 * If truncating down to a partial page, then
 			 * if that page is already allocated, hold it
 			 * if that page is already allocated, hold it
 			 * in memory until the truncation is over, so
 			 * in memory until the truncation is over, so
-			 * truncate_partial_page cannnot miss it were
+			 * truncate_partial_page cannot miss it were
 			 * it assigned to swap.
 			 * it assigned to swap.
 			 */
 			 */
 			if (newsize & (PAGE_CACHE_SIZE-1)) {
 			if (newsize & (PAGE_CACHE_SIZE-1)) {

+ 2 - 2
net/core/dev_addr_lists.c

@@ -357,8 +357,8 @@ EXPORT_SYMBOL(dev_addr_add_multiple);
 /**
 /**
  *	dev_addr_del_multiple - Delete device addresses by another device
  *	dev_addr_del_multiple - Delete device addresses by another device
  *	@to_dev: device where the addresses will be deleted
  *	@to_dev: device where the addresses will be deleted
- *	@from_dev: device by which addresses the addresses will be deleted
- *	@addr_type: address type - 0 means type will used from from_dev
+ *	@from_dev: device supplying the addresses to be deleted
+ *	@addr_type: address type - 0 means type will be used from from_dev
  *
  *
  *	Deletes addresses in to device by the list of addresses in from device.
  *	Deletes addresses in to device by the list of addresses in from device.
  *
  *

+ 1 - 1
net/ipv6/inet6_hashtables.c

@@ -124,7 +124,7 @@ out:
 }
 }
 EXPORT_SYMBOL(__inet6_lookup_established);
 EXPORT_SYMBOL(__inet6_lookup_established);
 
 
-static int inline compute_score(struct sock *sk, struct net *net,
+static inline int compute_score(struct sock *sk, struct net *net,
 				const unsigned short hnum,
 				const unsigned short hnum,
 				const struct in6_addr *daddr,
 				const struct in6_addr *daddr,
 				const int dif)
 				const int dif)

+ 1 - 1
net/mac80211/tx.c

@@ -169,7 +169,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
 	return cpu_to_le16(dur);
 	return cpu_to_le16(dur);
 }
 }
 
 
-static int inline is_ieee80211_device(struct ieee80211_local *local,
+static inline int is_ieee80211_device(struct ieee80211_local *local,
 				      struct net_device *dev)
 				      struct net_device *dev)
 {
 {
 	return local == wdev_priv(dev->ieee80211_ptr);
 	return local == wdev_priv(dev->ieee80211_ptr);

+ 2 - 2
sound/pci/au88x0/au88x0.h

@@ -211,7 +211,7 @@ static void vortex_adbdma_startfifo(vortex_t * vortex, int adbdma);
 //static void vortex_adbdma_stopfifo(vortex_t *vortex, int adbdma);
 //static void vortex_adbdma_stopfifo(vortex_t *vortex, int adbdma);
 static void vortex_adbdma_pausefifo(vortex_t * vortex, int adbdma);
 static void vortex_adbdma_pausefifo(vortex_t * vortex, int adbdma);
 static void vortex_adbdma_resumefifo(vortex_t * vortex, int adbdma);
 static void vortex_adbdma_resumefifo(vortex_t * vortex, int adbdma);
-static int inline vortex_adbdma_getlinearpos(vortex_t * vortex, int adbdma);
+static inline int vortex_adbdma_getlinearpos(vortex_t * vortex, int adbdma);
 static void vortex_adbdma_resetup(vortex_t *vortex, int adbdma);
 static void vortex_adbdma_resetup(vortex_t *vortex, int adbdma);
 
 
 #ifndef CHIP_AU8810
 #ifndef CHIP_AU8810
@@ -219,7 +219,7 @@ static void vortex_wtdma_startfifo(vortex_t * vortex, int wtdma);
 static void vortex_wtdma_stopfifo(vortex_t * vortex, int wtdma);
 static void vortex_wtdma_stopfifo(vortex_t * vortex, int wtdma);
 static void vortex_wtdma_pausefifo(vortex_t * vortex, int wtdma);
 static void vortex_wtdma_pausefifo(vortex_t * vortex, int wtdma);
 static void vortex_wtdma_resumefifo(vortex_t * vortex, int wtdma);
 static void vortex_wtdma_resumefifo(vortex_t * vortex, int wtdma);
-static int inline vortex_wtdma_getlinearpos(vortex_t * vortex, int wtdma);
+static inline int vortex_wtdma_getlinearpos(vortex_t * vortex, int wtdma);
 #endif
 #endif
 
 
 /* global stuff. */
 /* global stuff. */

+ 2 - 2
sound/pci/au88x0/au88x0_core.c

@@ -1249,7 +1249,7 @@ static void vortex_adbdma_resetup(vortex_t *vortex, int adbdma) {
 	}
 	}
 }
 }
 
 
-static int inline vortex_adbdma_getlinearpos(vortex_t * vortex, int adbdma)
+static inline int vortex_adbdma_getlinearpos(vortex_t * vortex, int adbdma)
 {
 {
 	stream_t *dma = &vortex->dma_adb[adbdma];
 	stream_t *dma = &vortex->dma_adb[adbdma];
 	int temp, page, delta;
 	int temp, page, delta;
@@ -1506,7 +1506,7 @@ static int vortex_wtdma_getcursubuffer(vortex_t * vortex, int wtdma)
 		 POS_SHIFT) & POS_MASK);
 		 POS_SHIFT) & POS_MASK);
 }
 }
 #endif
 #endif
-static int inline vortex_wtdma_getlinearpos(vortex_t * vortex, int wtdma)
+static inline int vortex_wtdma_getlinearpos(vortex_t * vortex, int wtdma)
 {
 {
 	stream_t *dma = &vortex->dma_wt[wtdma];
 	stream_t *dma = &vortex->dma_wt[wtdma];
 	int temp;
 	int temp;