Эх сурвалжийг харах

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bunk/trivial

* git://git.kernel.org/pub/scm/linux/kernel/git/bunk/trivial: (48 commits)
  Documentation: fix minor kernel-doc warnings
  BUG_ON() Conversion in drivers/net/
  BUG_ON() Conversion in drivers/s390/net/lcs.c
  BUG_ON() Conversion in mm/slab.c
  BUG_ON() Conversion in mm/highmem.c
  BUG_ON() Conversion in kernel/signal.c
  BUG_ON() Conversion in kernel/signal.c
  BUG_ON() Conversion in kernel/ptrace.c
  BUG_ON() Conversion in ipc/shm.c
  BUG_ON() Conversion in fs/freevxfs/
  BUG_ON() Conversion in fs/udf/
  BUG_ON() Conversion in fs/sysv/
  BUG_ON() Conversion in fs/inode.c
  BUG_ON() Conversion in fs/fcntl.c
  BUG_ON() Conversion in fs/dquot.c
  BUG_ON() Conversion in md/raid10.c
  BUG_ON() Conversion in md/raid6main.c
  BUG_ON() Conversion in md/raid5.c
  Fix minor documentation typo
  BFP->BPF in Documentation/networking/tuntap.txt
  ...
Linus Torvalds 19 жил өмнө
parent
commit
63589ed078
68 өөрчлөгдсөн 262 нэмэгдсэн , 369 устгасан
  1. 1 1
      Documentation/DocBook/Makefile
  2. 0 1
      Documentation/DocBook/kernel-api.tmpl
  3. 1 1
      Documentation/acpi-hotkey.txt
  4. 110 82
      Documentation/fujitsu/frv/kernel-ABI.txt
  5. 14 20
      Documentation/kernel-parameters.txt
  6. 1 1
      Documentation/networking/packet_mmap.txt
  7. 1 1
      Documentation/networking/tuntap.txt
  8. 1 1
      arch/i386/kernel/crash.c
  9. 1 1
      block/ll_rw_blk.c
  10. 1 2
      drivers/md/dm-target.c
  11. 2 4
      drivers/md/raid1.c
  12. 2 4
      drivers/md/raid10.c
  13. 12 22
      drivers/md/raid5.c
  14. 10 19
      drivers/md/raid6main.c
  15. 0 21
      drivers/mtd/chips/Kconfig
  16. 4 8
      drivers/net/8139cp.c
  17. 1 2
      drivers/net/arcnet/arcnet.c
  18. 1 2
      drivers/net/b44.c
  19. 1 2
      drivers/net/chelsio/sge.c
  20. 1 2
      drivers/net/e1000/e1000_main.c
  21. 1 2
      drivers/net/eql.c
  22. 1 2
      drivers/net/irda/sa1100_ir.c
  23. 1 3
      drivers/net/ne2k-pci.c
  24. 1 2
      drivers/net/ns83820.c
  25. 1 2
      drivers/net/starfire.c
  26. 5 10
      drivers/net/tg3.c
  27. 1 2
      drivers/net/tokenring/abyss.c
  28. 1 2
      drivers/net/tokenring/madgemc.c
  29. 3 6
      drivers/net/wireless/ipw2200.c
  30. 1 2
      drivers/net/yellowfin.c
  31. 3 5
      drivers/s390/block/dasd_erp.c
  32. 1 1
      drivers/s390/char/sclp_rw.c
  33. 4 9
      drivers/s390/char/tape_block.c
  34. 5 8
      drivers/s390/net/lcs.c
  35. 1 1
      drivers/scsi/aic7xxx/Kconfig.aic7xxx
  36. 1 1
      drivers/serial/jsm/jsm.h
  37. 1 1
      drivers/serial/jsm/jsm_driver.c
  38. 1 1
      drivers/serial/jsm/jsm_neo.c
  39. 1 2
      fs/direct-io.c
  40. 2 4
      fs/dquot.c
  41. 1 1
      fs/exec.c
  42. 1 2
      fs/fcntl.c
  43. 3 6
      fs/freevxfs/vxfs_olt.c
  44. 2 4
      fs/hfsplus/bnode.c
  45. 1 2
      fs/hfsplus/btree.c
  46. 5 10
      fs/inode.c
  47. 1 2
      fs/jffs2/background.c
  48. 2 4
      fs/smbfs/file.c
  49. 1 1
      fs/sysfs/dir.c
  50. 1 2
      fs/sysfs/inode.c
  51. 2 4
      fs/sysv/dir.c
  52. 2 4
      fs/udf/inode.c
  53. 1 1
      include/linux/fs.h
  54. 1 1
      include/linux/hrtimer.h
  55. 7 8
      ipc/shm.c
  56. 2 4
      ipc/util.c
  57. 1 1
      kernel/power/Kconfig
  58. 2 4
      kernel/printk.c
  59. 1 2
      kernel/ptrace.c
  60. 2 4
      kernel/signal.c
  61. 4 4
      kernel/time.c
  62. 1 2
      kernel/timer.c
  63. 5 10
      mm/highmem.c
  64. 3 6
      mm/mmap.c
  65. 1 1
      mm/page-writeback.c
  66. 6 12
      mm/slab.c
  67. 1 2
      mm/swap_state.c
  68. 1 2
      mm/vmalloc.c

+ 1 - 1
Documentation/DocBook/Makefile

@@ -2,7 +2,7 @@
 # This makefile is used to generate the kernel documentation,
 # This makefile is used to generate the kernel documentation,
 # primarily based on in-line comments in various source files.
 # primarily based on in-line comments in various source files.
 # See Documentation/kernel-doc-nano-HOWTO.txt for instruction in how
 # See Documentation/kernel-doc-nano-HOWTO.txt for instruction in how
-# to ducument the SRC - and how to read it.
+# to document the SRC - and how to read it.
 # To add a new book the only step required is to add the book to the
 # To add a new book the only step required is to add the book to the
 # list of DOCBOOKS.
 # list of DOCBOOKS.
 
 

+ 0 - 1
Documentation/DocBook/kernel-api.tmpl

@@ -322,7 +322,6 @@ X!Earch/i386/kernel/mca.c
   <chapter id="sysfs">
   <chapter id="sysfs">
      <title>The Filesystem for Exporting Kernel Objects</title>
      <title>The Filesystem for Exporting Kernel Objects</title>
 !Efs/sysfs/file.c
 !Efs/sysfs/file.c
-!Efs/sysfs/dir.c
 !Efs/sysfs/symlink.c
 !Efs/sysfs/symlink.c
 !Efs/sysfs/bin.c
 !Efs/sysfs/bin.c
   </chapter>
   </chapter>

+ 1 - 1
Documentation/acpi-hotkey.txt

@@ -30,7 +30,7 @@ specific hotkey(event))
 echo "event_num:event_type:event_argument" > 
 echo "event_num:event_type:event_argument" > 
 	/proc/acpi/hotkey/action.
 	/proc/acpi/hotkey/action.
 The result of the execution of this aml method is 
 The result of the execution of this aml method is 
-attached to /proc/acpi/hotkey/poll_method, which is dnyamically
+attached to /proc/acpi/hotkey/poll_method, which is dynamically
 created.  Please use command "cat /proc/acpi/hotkey/polling_method" 
 created.  Please use command "cat /proc/acpi/hotkey/polling_method" 
 to retrieve it.
 to retrieve it.
 
 

+ 110 - 82
Documentation/fujitsu/frv/kernel-ABI.txt

@@ -1,17 +1,19 @@
-				 =================================
-				 INTERNAL KERNEL ABI FOR FR-V ARCH
-				 =================================
-
-The internal FRV kernel ABI is not quite the same as the userspace ABI. A number of the registers
-are used for special purposed, and the ABI is not consistent between modules vs core, and MMU vs
-no-MMU.
-
-This partly stems from the fact that FRV CPUs do not have a separate supervisor stack pointer, and
-most of them do not have any scratch registers, thus requiring at least one general purpose
-register to be clobbered in such an event. Also, within the kernel core, it is possible to simply
-jump or call directly between functions using a relative offset. This cannot be extended to modules
-for the displacement is likely to be too far. Thus in modules the address of a function to call
-must be calculated in a register and then used, requiring two extra instructions.
+			=================================
+			INTERNAL KERNEL ABI FOR FR-V ARCH
+			=================================
+
+The internal FRV kernel ABI is not quite the same as the userspace ABI. A
+number of the registers are used for special purposed, and the ABI is not
+consistent between modules vs core, and MMU vs no-MMU.
+
+This partly stems from the fact that FRV CPUs do not have a separate
+supervisor stack pointer, and most of them do not have any scratch
+registers, thus requiring at least one general purpose register to be
+clobbered in such an event. Also, within the kernel core, it is possible to
+simply jump or call directly between functions using a relative offset.
+This cannot be extended to modules for the displacement is likely to be too
+far. Thus in modules the address of a function to call must be calculated
+in a register and then used, requiring two extra instructions.
 
 
 This document has the following sections:
 This document has the following sections:
 
 
@@ -39,7 +41,8 @@ When a system call is made, the following registers are effective:
 CPU OPERATING MODES
 CPU OPERATING MODES
 ===================
 ===================
 
 
-The FR-V CPU has three basic operating modes. In order of increasing capability:
+The FR-V CPU has three basic operating modes. In order of increasing
+capability:
 
 
   (1) User mode.
   (1) User mode.
 
 
@@ -47,42 +50,46 @@ The FR-V CPU has three basic operating modes. In order of increasing capability:
 
 
   (2) Kernel mode.
   (2) Kernel mode.
 
 
-      Normal kernel mode. There are many additional control registers available that may be
-      accessed in this mode, in addition to all the stuff available to user mode. This has two
-      submodes:
+      Normal kernel mode. There are many additional control registers
+      available that may be accessed in this mode, in addition to all the
+      stuff available to user mode. This has two submodes:
 
 
       (a) Exceptions enabled (PSR.T == 1).
       (a) Exceptions enabled (PSR.T == 1).
 
 
-      	  Exceptions will invoke the appropriate normal kernel mode handler. On entry to the
-      	  handler, the PSR.T bit will be cleared.
+	  Exceptions will invoke the appropriate normal kernel mode
+	  handler. On entry to the handler, the PSR.T bit will be cleared.
 
 
       (b) Exceptions disabled (PSR.T == 0).
       (b) Exceptions disabled (PSR.T == 0).
 
 
-      	  No exceptions or interrupts may happen. Any mandatory exceptions will cause the CPU to
-      	  halt unless the CPU is told to jump into debug mode instead.
+	  No exceptions or interrupts may happen. Any mandatory exceptions
+	  will cause the CPU to halt unless the CPU is told to jump into
+	  debug mode instead.
 
 
   (3) Debug mode.
   (3) Debug mode.
 
 
-      No exceptions may happen in this mode. Memory protection and management exceptions will be
-      flagged for later consideration, but the exception handler won't be invoked. Debugging traps
-      such as hardware breakpoints and watchpoints will be ignored. This mode is entered only by
-      debugging events obtained from the other two modes.
+      No exceptions may happen in this mode. Memory protection and
+      management exceptions will be flagged for later consideration, but
+      the exception handler won't be invoked. Debugging traps such as
+      hardware breakpoints and watchpoints will be ignored. This mode is
+      entered only by debugging events obtained from the other two modes.
 
 
-      All kernel mode registers may be accessed, plus a few extra debugging specific registers.
+      All kernel mode registers may be accessed, plus a few extra debugging
+      specific registers.
 
 
 
 
 =================================
 =================================
 INTERNAL KERNEL-MODE REGISTER ABI
 INTERNAL KERNEL-MODE REGISTER ABI
 =================================
 =================================
 
 
-There are a number of permanent register assignments that are set up by entry.S in the exception
-prologue. Note that there is a complete set of exception prologues for each of user->kernel
-transition and kernel->kernel transition. There are also user->debug and kernel->debug mode
-transition prologues.
+There are a number of permanent register assignments that are set up by
+entry.S in the exception prologue. Note that there is a complete set of
+exception prologues for each of user->kernel transition and kernel->kernel
+transition. There are also user->debug and kernel->debug mode transition
+prologues.
 
 
 
 
 	REGISTER	FLAVOUR	USE
 	REGISTER	FLAVOUR	USE
-	===============	=======	====================================================
+	===============	=======	==============================================
 	GR1			Supervisor stack pointer
 	GR1			Supervisor stack pointer
 	GR15			Current thread info pointer
 	GR15			Current thread info pointer
 	GR16			GP-Rel base register for small data
 	GR16			GP-Rel base register for small data
@@ -92,10 +99,12 @@ transition prologues.
 	GR31		NOMMU	Destroyed by debug mode entry
 	GR31		NOMMU	Destroyed by debug mode entry
 	GR31		MMU	Destroyed by TLB miss kernel mode entry
 	GR31		MMU	Destroyed by TLB miss kernel mode entry
 	CCR.ICC2		Virtual interrupt disablement tracking
 	CCR.ICC2		Virtual interrupt disablement tracking
-	CCCR.CC3		Cleared by exception prologue (atomic op emulation)
+	CCCR.CC3		Cleared by exception prologue 
+				(atomic op emulation)
 	SCR0		MMU	See mmu-layout.txt.
 	SCR0		MMU	See mmu-layout.txt.
 	SCR1		MMU	See mmu-layout.txt.
 	SCR1		MMU	See mmu-layout.txt.
-	SCR2		MMU	Save for EAR0 (destroyed by icache insns in debug mode)
+	SCR2		MMU	Save for EAR0 (destroyed by icache insns 
+					       in debug mode)
 	SCR3		MMU	Save for GR31 during debug exceptions
 	SCR3		MMU	Save for GR31 during debug exceptions
 	DAMR/IAMR	NOMMU	Fixed memory protection layout.
 	DAMR/IAMR	NOMMU	Fixed memory protection layout.
 	DAMR/IAMR	MMU	See mmu-layout.txt.
 	DAMR/IAMR	MMU	See mmu-layout.txt.
@@ -104,18 +113,21 @@ transition prologues.
 Certain registers are also used or modified across function calls:
 Certain registers are also used or modified across function calls:
 
 
 	REGISTER	CALL				RETURN
 	REGISTER	CALL				RETURN
-	===============	===============================	===============================
+	===============	===============================	======================
 	GR0		Fixed Zero			-
 	GR0		Fixed Zero			-
 	GR2		Function call frame pointer
 	GR2		Function call frame pointer
 	GR3		Special				Preserved
 	GR3		Special				Preserved
 	GR3-GR7		-				Clobbered
 	GR3-GR7		-				Clobbered
-	GR8		Function call arg #1		Return value (or clobbered)
-	GR9		Function call arg #2		Return value MSW (or clobbered)
+	GR8		Function call arg #1		Return value 
+							(or clobbered)
+	GR9		Function call arg #2		Return value MSW 
+							(or clobbered)
 	GR10-GR13	Function call arg #3-#6		Clobbered
 	GR10-GR13	Function call arg #3-#6		Clobbered
 	GR14		-				Clobbered
 	GR14		-				Clobbered
 	GR15-GR16	Special				Preserved
 	GR15-GR16	Special				Preserved
 	GR17-GR27	-				Preserved
 	GR17-GR27	-				Preserved
-	GR28-GR31	Special				Only accessed explicitly
+	GR28-GR31	Special				Only accessed 
+							explicitly
 	LR		Return address after CALL	Clobbered
 	LR		Return address after CALL	Clobbered
 	CCR/CCCR	-				Mostly Clobbered
 	CCR/CCCR	-				Mostly Clobbered
 
 
@@ -124,46 +136,53 @@ Certain registers are also used or modified across function calls:
 INTERNAL DEBUG-MODE REGISTER ABI
 INTERNAL DEBUG-MODE REGISTER ABI
 ================================
 ================================
 
 
-This is the same as the kernel-mode register ABI for functions calls. The difference is that in
-debug-mode there's a different stack and a different exception frame. Almost all the global
-registers from kernel-mode (including the stack pointer) may be changed.
+This is the same as the kernel-mode register ABI for functions calls. The
+difference is that in debug-mode there's a different stack and a different
+exception frame. Almost all the global registers from kernel-mode
+(including the stack pointer) may be changed.
 
 
 	REGISTER	FLAVOUR	USE
 	REGISTER	FLAVOUR	USE
-	===============	=======	====================================================
+	===============	=======	==============================================
 	GR1			Debug stack pointer
 	GR1			Debug stack pointer
 	GR16			GP-Rel base register for small data
 	GR16			GP-Rel base register for small data
-	GR31			Current debug exception frame pointer (__debug_frame)
+	GR31			Current debug exception frame pointer 
+				(__debug_frame)
 	SCR3		MMU	Saved value of GR31
 	SCR3		MMU	Saved value of GR31
 
 
 
 
-Note that debug mode is able to interfere with the kernel's emulated atomic ops, so it must be
-exceedingly careful not to do any that would interact with the main kernel in this regard. Hence
-the debug mode code (gdbstub) is almost completely self-contained. The only external code used is
-the sprintf family of functions.
+Note that debug mode is able to interfere with the kernel's emulated atomic
+ops, so it must be exceedingly careful not to do any that would interact
+with the main kernel in this regard. Hence the debug mode code (gdbstub) is
+almost completely self-contained. The only external code used is the
+sprintf family of functions.
 
 
-Futhermore, break.S is so complicated because single-step mode does not switch off on entry to an
-exception. That means unless manually disabled, single-stepping will blithely go on stepping into
-things like interrupts. See gdbstub.txt for more information.
+Futhermore, break.S is so complicated because single-step mode does not
+switch off on entry to an exception. That means unless manually disabled,
+single-stepping will blithely go on stepping into things like interrupts.
+See gdbstub.txt for more information.
 
 
 
 
 ==========================
 ==========================
 VIRTUAL INTERRUPT HANDLING
 VIRTUAL INTERRUPT HANDLING
 ==========================
 ==========================
 
 
-Because accesses to the PSR is so slow, and to disable interrupts we have to access it twice (once
-to read and once to write), we don't actually disable interrupts at all if we don't have to. What
-we do instead is use the ICC2 condition code flags to note virtual disablement, such that if we
-then do take an interrupt, we note the flag, really disable interrupts, set another flag and resume
-execution at the point the interrupt happened. Setting condition flags as a side effect of an
-arithmetic or logical instruction is really fast. This use of the ICC2 only occurs within the
+Because accesses to the PSR is so slow, and to disable interrupts we have
+to access it twice (once to read and once to write), we don't actually
+disable interrupts at all if we don't have to. What we do instead is use
+the ICC2 condition code flags to note virtual disablement, such that if we
+then do take an interrupt, we note the flag, really disable interrupts, set
+another flag and resume execution at the point the interrupt happened.
+Setting condition flags as a side effect of an arithmetic or logical
+instruction is really fast. This use of the ICC2 only occurs within the
 kernel - it does not affect userspace.
 kernel - it does not affect userspace.
 
 
 The flags we use are:
 The flags we use are:
 
 
  (*) CCR.ICC2.Z [Zero flag]
  (*) CCR.ICC2.Z [Zero flag]
 
 
-     Set to virtually disable interrupts, clear when interrupts are virtually enabled. Can be
-     modified by logical instructions without affecting the Carry flag.
+     Set to virtually disable interrupts, clear when interrupts are
+     virtually enabled. Can be modified by logical instructions without
+     affecting the Carry flag.
 
 
  (*) CCR.ICC2.C [Carry flag]
  (*) CCR.ICC2.C [Carry flag]
 
 
@@ -176,8 +195,9 @@ What happens is this:
 
 
 	ICC2.Z is 0, ICC2.C is 1.
 	ICC2.Z is 0, ICC2.C is 1.
 
 
- (2) An interrupt occurs. The exception prologue examines ICC2.Z and determines that nothing needs
-     doing. This is done simply with an unlikely BEQ instruction.
+ (2) An interrupt occurs. The exception prologue examines ICC2.Z and
+     determines that nothing needs doing. This is done simply with an
+     unlikely BEQ instruction.
 
 
  (3) The interrupts are disabled (local_irq_disable)
  (3) The interrupts are disabled (local_irq_disable)
 
 
@@ -187,48 +207,56 @@ What happens is this:
 
 
 	ICC2.Z would be set to 0.
 	ICC2.Z would be set to 0.
 
 
-     A TIHI #2 instruction (trap #2 if condition HI - Z==0 && C==0) would be used to trap if
-     interrupts were now virtually enabled, but physically disabled - which they're not, so the
-     trap isn't taken. The kernel would then be back to state (1).
+     A TIHI #2 instruction (trap #2 if condition HI - Z==0 && C==0) would
+     be used to trap if interrupts were now virtually enabled, but
+     physically disabled - which they're not, so the trap isn't taken. The
+     kernel would then be back to state (1).
 
 
- (5) An interrupt occurs. The exception prologue examines ICC2.Z and determines that the interrupt
-     shouldn't actually have happened. It jumps aside, and there disabled interrupts by setting
-     PSR.PIL to 14 and then it clears ICC2.C.
+ (5) An interrupt occurs. The exception prologue examines ICC2.Z and
+     determines that the interrupt shouldn't actually have happened. It
+     jumps aside, and there disabled interrupts by setting PSR.PIL to 14
+     and then it clears ICC2.C.
 
 
  (6) If interrupts were then saved and disabled again (local_irq_save):
  (6) If interrupts were then saved and disabled again (local_irq_save):
 
 
-	ICC2.Z would be shifted into the save variable and masked off (giving a 1).
+	ICC2.Z would be shifted into the save variable and masked off 
+	(giving a 1).
 
 
-	ICC2.Z would then be set to 1 (thus unchanged), and ICC2.C would be unaffected (ie: 0).
+	ICC2.Z would then be set to 1 (thus unchanged), and ICC2.C would be
+	unaffected (ie: 0).
 
 
  (7) If interrupts were then restored from state (6) (local_irq_restore):
  (7) If interrupts were then restored from state (6) (local_irq_restore):
 
 
-	ICC2.Z would be set to indicate the result of XOR'ing the saved value (ie: 1) with 1, which
-	gives a result of 0 - thus leaving ICC2.Z set.
+	ICC2.Z would be set to indicate the result of XOR'ing the saved
+	value (ie: 1) with 1, which gives a result of 0 - thus leaving
+	ICC2.Z set.
 
 
 	ICC2.C would remain unaffected (ie: 0).
 	ICC2.C would remain unaffected (ie: 0).
 
 
-     A TIHI #2 instruction would be used to again assay the current state, but this would do
-     nothing as Z==1.
+     A TIHI #2 instruction would be used to again assay the current state,
+     but this would do nothing as Z==1.
 
 
  (8) If interrupts were then enabled (local_irq_enable):
  (8) If interrupts were then enabled (local_irq_enable):
 
 
-	ICC2.Z would be cleared. ICC2.C would be left unaffected. Both flags would now be 0.
+	ICC2.Z would be cleared. ICC2.C would be left unaffected. Both
+	flags would now be 0.
 
 
-     A TIHI #2 instruction again issued to assay the current state would then trap as both Z==0
-     [interrupts virtually enabled] and C==0 [interrupts really disabled] would then be true.
+     A TIHI #2 instruction again issued to assay the current state would
+     then trap as both Z==0 [interrupts virtually enabled] and C==0
+     [interrupts really disabled] would then be true.
 
 
- (9) The trap #2 handler would simply enable hardware interrupts (set PSR.PIL to 0), set ICC2.C to
-     1 and return.
+ (9) The trap #2 handler would simply enable hardware interrupts 
+     (set PSR.PIL to 0), set ICC2.C to 1 and return.
 
 
 (10) Immediately upon returning, the pending interrupt would be taken.
 (10) Immediately upon returning, the pending interrupt would be taken.
 
 
-(11) The interrupt handler would take the path of actually processing the interrupt (ICC2.Z is
-     clear, BEQ fails as per step (2)).
+(11) The interrupt handler would take the path of actually processing the
+     interrupt (ICC2.Z is clear, BEQ fails as per step (2)).
 
 
-(12) The interrupt handler would then set ICC2.C to 1 since hardware interrupts are definitely
-     enabled - or else the kernel wouldn't be here.
+(12) The interrupt handler would then set ICC2.C to 1 since hardware
+     interrupts are definitely enabled - or else the kernel wouldn't be here.
 
 
 (13) On return from the interrupt handler, things would be back to state (1).
 (13) On return from the interrupt handler, things would be back to state (1).
 
 
-This trap (#2) is only available in kernel mode. In user mode it will result in SIGILL.
+This trap (#2) is only available in kernel mode. In user mode it will
+result in SIGILL.

+ 14 - 20
Documentation/kernel-parameters.txt

@@ -1,4 +1,4 @@
-February 2003             Kernel Parameters                     v2.5.59
+                          Kernel Parameters
                           ~~~~~~~~~~~~~~~~~
                           ~~~~~~~~~~~~~~~~~
 
 
 The following is a consolidated list of the kernel parameters as implemented
 The following is a consolidated list of the kernel parameters as implemented
@@ -17,9 +17,17 @@ are specified on the kernel command line with the module name plus
 
 
 	usbcore.blinkenlights=1
 	usbcore.blinkenlights=1
 
 
-The text in square brackets at the beginning of the description states the
-restrictions on the kernel for the said kernel parameter to be valid. The
-restrictions referred to are that the relevant option is valid if:
+This document may not be entirely up to date and comprehensive. The command
+"modinfo -p ${modulename}" shows a current list of all parameters of a loadable
+module. Loadable modules, after being loaded into the running kernel, also
+reveal their parameters in /sys/module/${modulename}/parameters/. Some of these
+parameters may be changed at runtime by the command
+"echo -n ${value} > /sys/module/${modulename}/parameters/${parm}".
+
+The parameters listed below are only valid if certain kernel build options were
+enabled and if respective hardware is present. The text in square brackets at
+the beginning of each description states the restrictions within which a
+parameter is applicable:
 
 
 	ACPI	ACPI support is enabled.
 	ACPI	ACPI support is enabled.
 	ALSA	ALSA sound support is enabled.
 	ALSA	ALSA sound support is enabled.
@@ -1046,10 +1054,10 @@ running once the system is up.
 	noltlbs		[PPC] Do not use large page/tlb entries for kernel
 	noltlbs		[PPC] Do not use large page/tlb entries for kernel
 			lowmem mapping on PPC40x.
 			lowmem mapping on PPC40x.
 
 
-	nomce		[IA-32] Machine Check Exception
-
 	nomca		[IA-64] Disable machine check abort handling
 	nomca		[IA-64] Disable machine check abort handling
 
 
+	nomce		[IA-32] Machine Check Exception
+
 	noresidual	[PPC] Don't use residual data on PReP machines.
 	noresidual	[PPC] Don't use residual data on PReP machines.
 
 
 	noresume	[SWSUSP] Disables resume and restores original swap
 	noresume	[SWSUSP] Disables resume and restores original swap
@@ -1682,20 +1690,6 @@ running once the system is up.
 
 
 
 
 ______________________________________________________________________
 ______________________________________________________________________
-Changelog:
-
-2000-06-??	Mr. Unknown
-	The last known update (for 2.4.0) - the changelog was not kept before.
-
-2002-11-24	Petr Baudis <pasky@ucw.cz>
-		Randy Dunlap <randy.dunlap@verizon.net>
-	Update for 2.5.49, description for most of the options introduced,
-	references to other documentation (C files, READMEs, ..), added S390,
-	PPC, SPARC, MTD, ALSA and OSS category. Minor corrections and
-	reformatting.
-
-2005-10-19	Randy Dunlap <rdunlap@xenotime.net>
-	Lots of typos, whitespace, some reformatting.
 
 
 TODO:
 TODO:
 
 

+ 1 - 1
Documentation/networking/packet_mmap.txt

@@ -254,7 +254,7 @@ and, the number of frames be
 
 
 	<block number> * <block size> / <frame size>
 	<block number> * <block size> / <frame size>
 
 
-Suposse the following parameters, which apply for 2.6 kernel and an
+Suppose the following parameters, which apply for 2.6 kernel and an
 i386 architecture:
 i386 architecture:
 
 
 	<size-max> = 131072 bytes
 	<size-max> = 131072 bytes

+ 1 - 1
Documentation/networking/tuntap.txt

@@ -138,7 +138,7 @@ This means that you have to read/write IP packets when you are using tun and
 ethernet frames when using tap.
 ethernet frames when using tap.
 
 
 5. What is the difference between BPF and TUN/TAP driver?
 5. What is the difference between BPF and TUN/TAP driver?
-BFP is an advanced packet filter. It can be attached to existing
+BPF is an advanced packet filter. It can be attached to existing
 network interface. It does not provide a virtual network interface.
 network interface. It does not provide a virtual network interface.
 A TUN/TAP driver does provide a virtual network interface and it is possible
 A TUN/TAP driver does provide a virtual network interface and it is possible
 to attach BPF to this interface.
 to attach BPF to this interface.

+ 1 - 1
arch/i386/kernel/crash.c

@@ -69,7 +69,7 @@ static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
 	 * for the data I pass, and I need tags
 	 * for the data I pass, and I need tags
 	 * on the data to indicate what information I have
 	 * on the data to indicate what information I have
 	 * squirrelled away.  ELF notes happen to provide
 	 * squirrelled away.  ELF notes happen to provide
-	 * all of that that no need to invent something new.
+	 * all of that, so there is no need to invent something new.
 	 */
 	 */
 	buf = (u32*)per_cpu_ptr(crash_notes, cpu);
 	buf = (u32*)per_cpu_ptr(crash_notes, cpu);
 	if (!buf)
 	if (!buf)

+ 1 - 1
block/ll_rw_blk.c

@@ -1740,7 +1740,7 @@ EXPORT_SYMBOL(blk_run_queue);
 
 
 /**
 /**
  * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed
  * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed
- * @q:    the request queue to be released
+ * @kobj:    the kobj belonging of the request queue to be released
  *
  *
  * Description:
  * Description:
  *     blk_cleanup_queue is the pair to blk_init_queue() or
  *     blk_cleanup_queue is the pair to blk_init_queue() or

+ 1 - 2
drivers/md/dm-target.c

@@ -78,8 +78,7 @@ void dm_put_target_type(struct target_type *t)
 	if (--ti->use == 0)
 	if (--ti->use == 0)
 		module_put(ti->tt.module);
 		module_put(ti->tt.module);
 
 
-	if (ti->use < 0)
-		BUG();
+	BUG_ON(ti->use < 0);
 	up_read(&_lock);
 	up_read(&_lock);
 
 
 	return;
 	return;

+ 2 - 4
drivers/md/raid1.c

@@ -1558,8 +1558,7 @@ static int init_resync(conf_t *conf)
 	int buffs;
 	int buffs;
 
 
 	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
 	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
-	if (conf->r1buf_pool)
-		BUG();
+	BUG_ON(conf->r1buf_pool);
 	conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
 	conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
 					  conf->poolinfo);
 					  conf->poolinfo);
 	if (!conf->r1buf_pool)
 	if (!conf->r1buf_pool)
@@ -1732,8 +1731,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
 			    !conf->fullsync &&
 			    !conf->fullsync &&
 			    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
 			    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
 				break;
 				break;
-			if (sync_blocks < (PAGE_SIZE>>9))
-				BUG();
+			BUG_ON(sync_blocks < (PAGE_SIZE>>9));
 			if (len > (sync_blocks<<9))
 			if (len > (sync_blocks<<9))
 				len = sync_blocks<<9;
 				len = sync_blocks<<9;
 		}
 		}

+ 2 - 4
drivers/md/raid10.c

@@ -1117,8 +1117,7 @@ static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
 	for (i=0; i<conf->copies; i++)
 	for (i=0; i<conf->copies; i++)
 		if (r10_bio->devs[i].bio == bio)
 		if (r10_bio->devs[i].bio == bio)
 			break;
 			break;
-	if (i == conf->copies)
-		BUG();
+	BUG_ON(i == conf->copies);
 	update_head_pos(i, r10_bio);
 	update_head_pos(i, r10_bio);
 	d = r10_bio->devs[i].devnum;
 	d = r10_bio->devs[i].devnum;
 
 
@@ -1518,8 +1517,7 @@ static int init_resync(conf_t *conf)
 	int buffs;
 	int buffs;
 
 
 	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
 	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
-	if (conf->r10buf_pool)
-		BUG();
+	BUG_ON(conf->r10buf_pool);
 	conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
 	conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
 	if (!conf->r10buf_pool)
 	if (!conf->r10buf_pool)
 		return -ENOMEM;
 		return -ENOMEM;

+ 12 - 22
drivers/md/raid5.c

@@ -73,10 +73,8 @@ static void print_raid5_conf (raid5_conf_t *conf);
 static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
 static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
 {
 {
 	if (atomic_dec_and_test(&sh->count)) {
 	if (atomic_dec_and_test(&sh->count)) {
-		if (!list_empty(&sh->lru))
-			BUG();
-		if (atomic_read(&conf->active_stripes)==0)
-			BUG();
+		BUG_ON(!list_empty(&sh->lru));
+		BUG_ON(atomic_read(&conf->active_stripes)==0);
 		if (test_bit(STRIPE_HANDLE, &sh->state)) {
 		if (test_bit(STRIPE_HANDLE, &sh->state)) {
 			if (test_bit(STRIPE_DELAYED, &sh->state))
 			if (test_bit(STRIPE_DELAYED, &sh->state))
 				list_add_tail(&sh->lru, &conf->delayed_list);
 				list_add_tail(&sh->lru, &conf->delayed_list);
@@ -184,10 +182,8 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int
 	raid5_conf_t *conf = sh->raid_conf;
 	raid5_conf_t *conf = sh->raid_conf;
 	int i;
 	int i;
 
 
-	if (atomic_read(&sh->count) != 0)
-		BUG();
-	if (test_bit(STRIPE_HANDLE, &sh->state))
-		BUG();
+	BUG_ON(atomic_read(&sh->count) != 0);
+	BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
 	
 	
 	CHECK_DEVLOCK();
 	CHECK_DEVLOCK();
 	PRINTK("init_stripe called, stripe %llu\n", 
 	PRINTK("init_stripe called, stripe %llu\n", 
@@ -269,8 +265,7 @@ static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector
 				init_stripe(sh, sector, pd_idx, disks);
 				init_stripe(sh, sector, pd_idx, disks);
 		} else {
 		} else {
 			if (atomic_read(&sh->count)) {
 			if (atomic_read(&sh->count)) {
-				if (!list_empty(&sh->lru))
-					BUG();
+			  BUG_ON(!list_empty(&sh->lru));
 			} else {
 			} else {
 				if (!test_bit(STRIPE_HANDLE, &sh->state))
 				if (!test_bit(STRIPE_HANDLE, &sh->state))
 					atomic_inc(&conf->active_stripes);
 					atomic_inc(&conf->active_stripes);
@@ -465,8 +460,7 @@ static int drop_one_stripe(raid5_conf_t *conf)
 	spin_unlock_irq(&conf->device_lock);
 	spin_unlock_irq(&conf->device_lock);
 	if (!sh)
 	if (!sh)
 		return 0;
 		return 0;
-	if (atomic_read(&sh->count))
-		BUG();
+	BUG_ON(atomic_read(&sh->count));
 	shrink_buffers(sh, conf->pool_size);
 	shrink_buffers(sh, conf->pool_size);
 	kmem_cache_free(conf->slab_cache, sh);
 	kmem_cache_free(conf->slab_cache, sh);
 	atomic_dec(&conf->active_stripes);
 	atomic_dec(&conf->active_stripes);
@@ -882,8 +876,7 @@ static void compute_parity(struct stripe_head *sh, int method)
 	ptr[0] = page_address(sh->dev[pd_idx].page);
 	ptr[0] = page_address(sh->dev[pd_idx].page);
 	switch(method) {
 	switch(method) {
 	case READ_MODIFY_WRITE:
 	case READ_MODIFY_WRITE:
-		if (!test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags))
-			BUG();
+		BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags));
 		for (i=disks ; i-- ;) {
 		for (i=disks ; i-- ;) {
 			if (i==pd_idx)
 			if (i==pd_idx)
 				continue;
 				continue;
@@ -896,7 +889,7 @@ static void compute_parity(struct stripe_head *sh, int method)
 				if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
 				if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
 					wake_up(&conf->wait_for_overlap);
 					wake_up(&conf->wait_for_overlap);
 
 
-				if (sh->dev[i].written) BUG();
+				BUG_ON(sh->dev[i].written);
 				sh->dev[i].written = chosen;
 				sh->dev[i].written = chosen;
 				check_xor();
 				check_xor();
 			}
 			}
@@ -912,7 +905,7 @@ static void compute_parity(struct stripe_head *sh, int method)
 				if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
 				if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
 					wake_up(&conf->wait_for_overlap);
 					wake_up(&conf->wait_for_overlap);
 
 
-				if (sh->dev[i].written) BUG();
+				BUG_ON(sh->dev[i].written);
 				sh->dev[i].written = chosen;
 				sh->dev[i].written = chosen;
 			}
 			}
 		break;
 		break;
@@ -995,8 +988,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
 	if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
 	if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
 		goto overlap;
 		goto overlap;
 
 
-	if (*bip && bi->bi_next && (*bip) != bi->bi_next)
-		BUG();
+	BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
 	if (*bip)
 	if (*bip)
 		bi->bi_next = *bip;
 		bi->bi_next = *bip;
 	*bip = bi;
 	*bip = bi;
@@ -1430,8 +1422,7 @@ static void handle_stripe(struct stripe_head *sh)
 		set_bit(STRIPE_HANDLE, &sh->state);
 		set_bit(STRIPE_HANDLE, &sh->state);
 		if (failed == 0) {
 		if (failed == 0) {
 			char *pagea;
 			char *pagea;
-			if (uptodate != disks)
-				BUG();
+			BUG_ON(uptodate != disks);
 			compute_parity(sh, CHECK_PARITY);
 			compute_parity(sh, CHECK_PARITY);
 			uptodate--;
 			uptodate--;
 			pagea = page_address(sh->dev[sh->pd_idx].page);
 			pagea = page_address(sh->dev[sh->pd_idx].page);
@@ -2096,8 +2087,7 @@ static void raid5d (mddev_t *mddev)
 
 
 		list_del_init(first);
 		list_del_init(first);
 		atomic_inc(&sh->count);
 		atomic_inc(&sh->count);
-		if (atomic_read(&sh->count)!= 1)
-			BUG();
+		BUG_ON(atomic_read(&sh->count)!= 1);
 		spin_unlock_irq(&conf->device_lock);
 		spin_unlock_irq(&conf->device_lock);
 		
 		
 		handled++;
 		handled++;

+ 10 - 19
drivers/md/raid6main.c

@@ -91,10 +91,8 @@ static void print_raid6_conf (raid6_conf_t *conf);
 static void __release_stripe(raid6_conf_t *conf, struct stripe_head *sh)
 static void __release_stripe(raid6_conf_t *conf, struct stripe_head *sh)
 {
 {
 	if (atomic_dec_and_test(&sh->count)) {
 	if (atomic_dec_and_test(&sh->count)) {
-		if (!list_empty(&sh->lru))
-			BUG();
-		if (atomic_read(&conf->active_stripes)==0)
-			BUG();
+		BUG_ON(!list_empty(&sh->lru));
+		BUG_ON(atomic_read(&conf->active_stripes)==0);
 		if (test_bit(STRIPE_HANDLE, &sh->state)) {
 		if (test_bit(STRIPE_HANDLE, &sh->state)) {
 			if (test_bit(STRIPE_DELAYED, &sh->state))
 			if (test_bit(STRIPE_DELAYED, &sh->state))
 				list_add_tail(&sh->lru, &conf->delayed_list);
 				list_add_tail(&sh->lru, &conf->delayed_list);
@@ -202,10 +200,8 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx)
 	raid6_conf_t *conf = sh->raid_conf;
 	raid6_conf_t *conf = sh->raid_conf;
 	int disks = conf->raid_disks, i;
 	int disks = conf->raid_disks, i;
 
 
-	if (atomic_read(&sh->count) != 0)
-		BUG();
-	if (test_bit(STRIPE_HANDLE, &sh->state))
-		BUG();
+	BUG_ON(atomic_read(&sh->count) != 0);
+	BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
 
 
 	CHECK_DEVLOCK();
 	CHECK_DEVLOCK();
 	PRINTK("init_stripe called, stripe %llu\n",
 	PRINTK("init_stripe called, stripe %llu\n",
@@ -284,13 +280,11 @@ static struct stripe_head *get_active_stripe(raid6_conf_t *conf, sector_t sector
 				init_stripe(sh, sector, pd_idx);
 				init_stripe(sh, sector, pd_idx);
 		} else {
 		} else {
 			if (atomic_read(&sh->count)) {
 			if (atomic_read(&sh->count)) {
-				if (!list_empty(&sh->lru))
-					BUG();
+				BUG_ON(!list_empty(&sh->lru));
 			} else {
 			} else {
 				if (!test_bit(STRIPE_HANDLE, &sh->state))
 				if (!test_bit(STRIPE_HANDLE, &sh->state))
 					atomic_inc(&conf->active_stripes);
 					atomic_inc(&conf->active_stripes);
-				if (list_empty(&sh->lru))
-					BUG();
+				BUG_ON(list_empty(&sh->lru));
 				list_del_init(&sh->lru);
 				list_del_init(&sh->lru);
 			}
 			}
 		}
 		}
@@ -353,8 +347,7 @@ static int drop_one_stripe(raid6_conf_t *conf)
 	spin_unlock_irq(&conf->device_lock);
 	spin_unlock_irq(&conf->device_lock);
 	if (!sh)
 	if (!sh)
 		return 0;
 		return 0;
-	if (atomic_read(&sh->count))
-		BUG();
+	BUG_ON(atomic_read(&sh->count));
 	shrink_buffers(sh, conf->raid_disks);
 	shrink_buffers(sh, conf->raid_disks);
 	kmem_cache_free(conf->slab_cache, sh);
 	kmem_cache_free(conf->slab_cache, sh);
 	atomic_dec(&conf->active_stripes);
 	atomic_dec(&conf->active_stripes);
@@ -780,7 +773,7 @@ static void compute_parity(struct stripe_head *sh, int method)
 				if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
 				if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
 					wake_up(&conf->wait_for_overlap);
 					wake_up(&conf->wait_for_overlap);
 
 
-				if (sh->dev[i].written) BUG();
+				BUG_ON(sh->dev[i].written);
 				sh->dev[i].written = chosen;
 				sh->dev[i].written = chosen;
 			}
 			}
 		break;
 		break;
@@ -970,8 +963,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
 	if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
 	if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
 		goto overlap;
 		goto overlap;
 
 
-	if (*bip && bi->bi_next && (*bip) != bi->bi_next)
-		BUG();
+	BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
 	if (*bip)
 	if (*bip)
 		bi->bi_next = *bip;
 		bi->bi_next = *bip;
 	*bip = bi;
 	*bip = bi;
@@ -1906,8 +1898,7 @@ static void raid6d (mddev_t *mddev)
 
 
 		list_del_init(first);
 		list_del_init(first);
 		atomic_inc(&sh->count);
 		atomic_inc(&sh->count);
-		if (atomic_read(&sh->count)!= 1)
-			BUG();
+		BUG_ON(atomic_read(&sh->count)!= 1);
 		spin_unlock_irq(&conf->device_lock);
 		spin_unlock_irq(&conf->device_lock);
 
 
 		handled++;
 		handled++;

+ 0 - 21
drivers/mtd/chips/Kconfig

@@ -200,27 +200,6 @@ config MTD_CFI_AMDSTD
 	  provides support for one of those command sets, used on chips
 	  provides support for one of those command sets, used on chips
 	  including the AMD Am29LV320.
 	  including the AMD Am29LV320.
 
 
-config MTD_CFI_AMDSTD_RETRY
-	int "Retry failed commands (erase/program)"
-	depends on MTD_CFI_AMDSTD
-	default "0"
-	help
-	  Some chips, when attached to a shared bus, don't properly filter
-	  bus traffic that is destined to other devices.  This broken
-	  behavior causes erase and program sequences to be aborted when
-	  the sequences are mixed with traffic for other devices.
-
-	  SST49LF040 (and related) chips are know to be broken.
-
-config MTD_CFI_AMDSTD_RETRY_MAX
-	int "Max retries of failed commands (erase/program)"
-	depends on MTD_CFI_AMDSTD_RETRY
-	default "0"
-	help
-	  If you have an SST49LF040 (or related chip) then this value should
-	  be set to at least 1.  This can also be adjusted at driver load
-	  time with the retry_cmd_max module parameter.
-
 config MTD_CFI_STAA
 config MTD_CFI_STAA
 	tristate "Support for ST (Advanced Architecture) flash chips"
 	tristate "Support for ST (Advanced Architecture) flash chips"
 	depends on MTD_GEN_PROBE
 	depends on MTD_GEN_PROBE

+ 4 - 8
drivers/net/8139cp.c

@@ -539,8 +539,7 @@ rx_status_loop:
 		unsigned buflen;
 		unsigned buflen;
 
 
 		skb = cp->rx_skb[rx_tail].skb;
 		skb = cp->rx_skb[rx_tail].skb;
-		if (!skb)
-			BUG();
+		BUG_ON(!skb);
 
 
 		desc = &cp->rx_ring[rx_tail];
 		desc = &cp->rx_ring[rx_tail];
 		status = le32_to_cpu(desc->opts1);
 		status = le32_to_cpu(desc->opts1);
@@ -723,8 +722,7 @@ static void cp_tx (struct cp_private *cp)
 			break;
 			break;
 
 
 		skb = cp->tx_skb[tx_tail].skb;
 		skb = cp->tx_skb[tx_tail].skb;
-		if (!skb)
-			BUG();
+		BUG_ON(!skb);
 
 
 		pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping,
 		pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping,
 				 cp->tx_skb[tx_tail].len, PCI_DMA_TODEVICE);
 				 cp->tx_skb[tx_tail].len, PCI_DMA_TODEVICE);
@@ -1550,8 +1548,7 @@ static void cp_get_ethtool_stats (struct net_device *dev,
 	tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
 	tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
 	tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
 	tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
 	tmp_stats[i++] = cp->cp_stats.rx_frags;
 	tmp_stats[i++] = cp->cp_stats.rx_frags;
-	if (i != CP_NUM_STATS)
-		BUG();
+	BUG_ON(i != CP_NUM_STATS);
 
 
 	pci_free_consistent(cp->pdev, sizeof(*nic_stats), nic_stats, dma);
 	pci_free_consistent(cp->pdev, sizeof(*nic_stats), nic_stats, dma);
 }
 }
@@ -1856,8 +1853,7 @@ static void cp_remove_one (struct pci_dev *pdev)
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct cp_private *cp = netdev_priv(dev);
 	struct cp_private *cp = netdev_priv(dev);
 
 
-	if (!dev)
-		BUG();
+	BUG_ON(!dev);
 	unregister_netdev(dev);
 	unregister_netdev(dev);
 	iounmap(cp->regs);
 	iounmap(cp->regs);
 	if (cp->wol_enabled) pci_set_power_state (pdev, PCI_D0);
 	if (cp->wol_enabled) pci_set_power_state (pdev, PCI_D0);

+ 1 - 2
drivers/net/arcnet/arcnet.c

@@ -765,8 +765,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id, struct pt_regs *regs)
 	BUGMSG(D_DURING, "in arcnet_interrupt\n");
 	BUGMSG(D_DURING, "in arcnet_interrupt\n");
 	
 	
 	lp = dev->priv;
 	lp = dev->priv;
-	if (!lp)
-		BUG();
+	BUG_ON(!lp);
 		
 		
 	spin_lock(&lp->lock);
 	spin_lock(&lp->lock);
 
 

+ 1 - 2
drivers/net/b44.c

@@ -608,8 +608,7 @@ static void b44_tx(struct b44 *bp)
 		struct ring_info *rp = &bp->tx_buffers[cons];
 		struct ring_info *rp = &bp->tx_buffers[cons];
 		struct sk_buff *skb = rp->skb;
 		struct sk_buff *skb = rp->skb;
 
 
-		if (unlikely(skb == NULL))
-			BUG();
+		BUG_ON(skb == NULL);
 
 
 		pci_unmap_single(bp->pdev,
 		pci_unmap_single(bp->pdev,
 				 pci_unmap_addr(rp, mapping),
 				 pci_unmap_addr(rp, mapping),

+ 1 - 2
drivers/net/chelsio/sge.c

@@ -1093,8 +1093,7 @@ static int process_responses(struct adapter *adapter, int budget)
 		if (likely(e->DataValid)) {
 		if (likely(e->DataValid)) {
 			struct freelQ *fl = &sge->freelQ[e->FreelistQid];
 			struct freelQ *fl = &sge->freelQ[e->FreelistQid];
 
 
-			if (unlikely(!e->Sop || !e->Eop))
-				BUG();
+			BUG_ON(!e->Sop || !e->Eop);
 			if (unlikely(e->Offload))
 			if (unlikely(e->Offload))
 				unexpected_offload(adapter, fl);
 				unexpected_offload(adapter, fl);
 			else
 			else

+ 1 - 2
drivers/net/e1000/e1000_main.c

@@ -3308,8 +3308,7 @@ e1000_clean(struct net_device *poll_dev, int *budget)
 
 
 	while (poll_dev != &adapter->polling_netdev[i]) {
 	while (poll_dev != &adapter->polling_netdev[i]) {
 		i++;
 		i++;
-		if (unlikely(i == adapter->num_rx_queues))
-			BUG();
+		BUG_ON(i == adapter->num_rx_queues);
 	}
 	}
 
 
 	if (likely(adapter->num_tx_queues == 1)) {
 	if (likely(adapter->num_tx_queues == 1)) {

+ 1 - 2
drivers/net/eql.c

@@ -203,8 +203,7 @@ static int eql_open(struct net_device *dev)
 	printk(KERN_INFO "%s: remember to turn off Van-Jacobson compression on "
 	printk(KERN_INFO "%s: remember to turn off Van-Jacobson compression on "
 	       "your slave devices.\n", dev->name);
 	       "your slave devices.\n", dev->name);
 
 
-	if (!list_empty(&eql->queue.all_slaves))
-		BUG();
+	BUG_ON(!list_empty(&eql->queue.all_slaves));
 
 
 	eql->min_slaves = 1;
 	eql->min_slaves = 1;
 	eql->max_slaves = EQL_DEFAULT_MAX_SLAVES; /* 4 usually... */
 	eql->max_slaves = EQL_DEFAULT_MAX_SLAVES; /* 4 usually... */

+ 1 - 2
drivers/net/irda/sa1100_ir.c

@@ -695,8 +695,7 @@ static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
 		/*
 		/*
 		 * We must not be transmitting...
 		 * We must not be transmitting...
 		 */
 		 */
-		if (si->txskb)
-			BUG();
+		BUG_ON(si->txskb);
 
 
 		netif_stop_queue(dev);
 		netif_stop_queue(dev);
 
 

+ 1 - 3
drivers/net/ne2k-pci.c

@@ -645,9 +645,7 @@ static void __devexit ne2k_pci_remove_one (struct pci_dev *pdev)
 {
 {
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct net_device *dev = pci_get_drvdata(pdev);
 
 
-	if (!dev)
-		BUG();
-
+	BUG_ON(!dev);
 	unregister_netdev(dev);
 	unregister_netdev(dev);
 	release_region(dev->base_addr, NE_IO_EXTENT);
 	release_region(dev->base_addr, NE_IO_EXTENT);
 	free_netdev(dev);
 	free_netdev(dev);

+ 1 - 2
drivers/net/ns83820.c

@@ -568,8 +568,7 @@ static inline int ns83820_add_rx_skb(struct ns83820 *dev, struct sk_buff *skb)
 #endif
 #endif
 
 
 	sg = dev->rx_info.descs + (next_empty * DESC_SIZE);
 	sg = dev->rx_info.descs + (next_empty * DESC_SIZE);
-	if (unlikely(NULL != dev->rx_info.skbs[next_empty]))
-		BUG();
+	BUG_ON(NULL != dev->rx_info.skbs[next_empty]);
 	dev->rx_info.skbs[next_empty] = skb;
 	dev->rx_info.skbs[next_empty] = skb;
 
 
 	dev->rx_info.next_empty = (next_empty + 1) % NR_RX_DESC;
 	dev->rx_info.next_empty = (next_empty + 1) % NR_RX_DESC;

+ 1 - 2
drivers/net/starfire.c

@@ -2122,8 +2122,7 @@ static void __devexit starfire_remove_one (struct pci_dev *pdev)
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct netdev_private *np = netdev_priv(dev);
 	struct netdev_private *np = netdev_priv(dev);
 
 
-	if (!dev)
-		BUG();
+	BUG_ON(!dev);
 
 
 	unregister_netdev(dev);
 	unregister_netdev(dev);
 
 

+ 5 - 10
drivers/net/tg3.c

@@ -2959,9 +2959,7 @@ static void tg3_tx(struct tg3 *tp)
 		struct sk_buff *skb = ri->skb;
 		struct sk_buff *skb = ri->skb;
 		int i;
 		int i;
 
 
-		if (unlikely(skb == NULL))
-			BUG();
-
+		BUG_ON(skb == NULL);
 		pci_unmap_single(tp->pdev,
 		pci_unmap_single(tp->pdev,
 				 pci_unmap_addr(ri, mapping),
 				 pci_unmap_addr(ri, mapping),
 				 skb_headlen(skb),
 				 skb_headlen(skb),
@@ -2972,12 +2970,10 @@ static void tg3_tx(struct tg3 *tp)
 		sw_idx = NEXT_TX(sw_idx);
 		sw_idx = NEXT_TX(sw_idx);
 
 
 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-			if (unlikely(sw_idx == hw_idx))
-				BUG();
+			BUG_ON(sw_idx == hw_idx);
 
 
 			ri = &tp->tx_buffers[sw_idx];
 			ri = &tp->tx_buffers[sw_idx];
-			if (unlikely(ri->skb != NULL))
-				BUG();
+			BUG_ON(ri->skb != NULL);
 
 
 			pci_unmap_page(tp->pdev,
 			pci_unmap_page(tp->pdev,
 				       pci_unmap_addr(ri, mapping),
 				       pci_unmap_addr(ri, mapping),
@@ -4928,9 +4924,8 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
 {
 {
 	int i;
 	int i;
 
 
-	if (offset == TX_CPU_BASE &&
-	    (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
-		BUG();
+	BUG_ON(offset == TX_CPU_BASE &&
+	    (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
 
 
 	if (offset == RX_CPU_BASE) {
 	if (offset == RX_CPU_BASE) {
 		for (i = 0; i < 10000; i++) {
 		for (i = 0; i < 10000; i++) {

+ 1 - 2
drivers/net/tokenring/abyss.c

@@ -438,8 +438,7 @@ static void __devexit abyss_detach (struct pci_dev *pdev)
 {
 {
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct net_device *dev = pci_get_drvdata(pdev);
 	
 	
-	if (!dev)
-		BUG();
+	BUG_ON(!dev);
 	unregister_netdev(dev);
 	unregister_netdev(dev);
 	release_region(dev->base_addr-0x10, ABYSS_IO_EXTENT);
 	release_region(dev->base_addr-0x10, ABYSS_IO_EXTENT);
 	free_irq(dev->irq, dev);
 	free_irq(dev->irq, dev);

+ 1 - 2
drivers/net/tokenring/madgemc.c

@@ -735,8 +735,7 @@ static int __devexit madgemc_remove(struct device *device)
 	struct net_local *tp;
 	struct net_local *tp;
         struct card_info *card;
         struct card_info *card;
 
 
-	if (!dev)
-		BUG();
+	BUG_ON(!dev);
 
 
 	tp = dev->priv;
 	tp = dev->priv;
 	card = tp->tmspriv;
 	card = tp->tmspriv;

+ 3 - 6
drivers/net/wireless/ipw2200.c

@@ -5573,8 +5573,7 @@ static void ipw_adhoc_create(struct ipw_priv *priv,
 	case IEEE80211_52GHZ_BAND:
 	case IEEE80211_52GHZ_BAND:
 		network->mode = IEEE_A;
 		network->mode = IEEE_A;
 		i = ieee80211_channel_to_index(priv->ieee, priv->channel);
 		i = ieee80211_channel_to_index(priv->ieee, priv->channel);
-		if (i == -1)
-			BUG();
+		BUG_ON(i == -1);
 		if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
 		if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
 			IPW_WARNING("Overriding invalid channel\n");
 			IPW_WARNING("Overriding invalid channel\n");
 			priv->channel = geo->a[0].channel;
 			priv->channel = geo->a[0].channel;
@@ -5587,8 +5586,7 @@ static void ipw_adhoc_create(struct ipw_priv *priv,
 		else
 		else
 			network->mode = IEEE_B;
 			network->mode = IEEE_B;
 		i = ieee80211_channel_to_index(priv->ieee, priv->channel);
 		i = ieee80211_channel_to_index(priv->ieee, priv->channel);
-		if (i == -1)
-			BUG();
+		BUG_ON(i == -1);
 		if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
 		if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
 			IPW_WARNING("Overriding invalid channel\n");
 			IPW_WARNING("Overriding invalid channel\n");
 			priv->channel = geo->bg[0].channel;
 			priv->channel = geo->bg[0].channel;
@@ -6715,8 +6713,7 @@ static int ipw_qos_association(struct ipw_priv *priv,
 
 
 	switch (priv->ieee->iw_mode) {
 	switch (priv->ieee->iw_mode) {
 	case IW_MODE_ADHOC:
 	case IW_MODE_ADHOC:
-		if (!(network->capability & WLAN_CAPABILITY_IBSS))
-			BUG();
+		BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
 
 
 		qos_data = &ibss_data;
 		qos_data = &ibss_data;
 		break;
 		break;

+ 1 - 2
drivers/net/yellowfin.c

@@ -1441,8 +1441,7 @@ static void __devexit yellowfin_remove_one (struct pci_dev *pdev)
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct yellowfin_private *np;
 	struct yellowfin_private *np;
 
 
-	if (!dev)
-		BUG();
+	BUG_ON(!dev);
 	np = netdev_priv(dev);
 	np = netdev_priv(dev);
 
 
         pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status, 
         pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status, 

+ 3 - 5
drivers/s390/block/dasd_erp.c

@@ -32,9 +32,8 @@ dasd_alloc_erp_request(char *magic, int cplength, int datasize,
 	int size;
 	int size;
 
 
 	/* Sanity checks */
 	/* Sanity checks */
-	if ( magic == NULL || datasize > PAGE_SIZE ||
-	     (cplength*sizeof(struct ccw1)) > PAGE_SIZE)
-		BUG();
+	BUG_ON( magic == NULL || datasize > PAGE_SIZE ||
+	     (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
 
 
 	size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
 	size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
 	if (cplength > 0)
 	if (cplength > 0)
@@ -125,8 +124,7 @@ dasd_default_erp_postaction(struct dasd_ccw_req * cqr)
 	struct dasd_device *device;
 	struct dasd_device *device;
 	int success;
 	int success;
 
 
-	if (cqr->refers == NULL || cqr->function == NULL)
-		BUG();
+	BUG_ON(cqr->refers == NULL || cqr->function == NULL);
 
 
 	device = cqr->device;
 	device = cqr->device;
 	success = cqr->status == DASD_CQR_DONE;
 	success = cqr->status == DASD_CQR_DONE;

+ 1 - 1
drivers/s390/char/sclp_rw.c

@@ -24,7 +24,7 @@
 
 
 /*
 /*
  * The room for the SCCB (only for writing) is not equal to a pages size
  * The room for the SCCB (only for writing) is not equal to a pages size
- * (as it is specified as the maximum size in the the SCLP ducumentation)
+ * (as it is specified as the maximum size in the the SCLP documentation)
  * because of the additional data structure described above.
  * because of the additional data structure described above.
  */
  */
 #define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer))
 #define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer))

+ 4 - 9
drivers/s390/char/tape_block.c

@@ -198,9 +198,7 @@ tapeblock_request_fn(request_queue_t *queue)
 
 
 	device = (struct tape_device *) queue->queuedata;
 	device = (struct tape_device *) queue->queuedata;
 	DBF_LH(6, "tapeblock_request_fn(device=%p)\n", device);
 	DBF_LH(6, "tapeblock_request_fn(device=%p)\n", device);
-	if (device == NULL)
-		BUG();
-
+	BUG_ON(device == NULL);
 	tapeblock_trigger_requeue(device);
 	tapeblock_trigger_requeue(device);
 }
 }
 
 
@@ -307,8 +305,7 @@ tapeblock_revalidate_disk(struct gendisk *disk)
 	int			rc;
 	int			rc;
 
 
 	device = (struct tape_device *) disk->private_data;
 	device = (struct tape_device *) disk->private_data;
-	if (!device)
-		BUG();
+	BUG_ON(!device);
 
 
 	if (!device->blk_data.medium_changed)
 	if (!device->blk_data.medium_changed)
 		return 0;
 		return 0;
@@ -440,11 +437,9 @@ tapeblock_ioctl(
 
 
 	rc     = 0;
 	rc     = 0;
 	disk   = inode->i_bdev->bd_disk;
 	disk   = inode->i_bdev->bd_disk;
-	if (!disk)
-		BUG();
+	BUG_ON(!disk);
 	device = disk->private_data;
 	device = disk->private_data;
-	if (!device)
-		BUG();
+	BUG_ON(!device);
 	minor  = iminor(inode);
 	minor  = iminor(inode);
 
 
 	DBF_LH(6, "tapeblock_ioctl(0x%0x)\n", command);
 	DBF_LH(6, "tapeblock_ioctl(0x%0x)\n", command);

+ 5 - 8
drivers/s390/net/lcs.c

@@ -675,9 +675,8 @@ lcs_ready_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
 	int index, rc;
 	int index, rc;
 
 
 	LCS_DBF_TEXT(5, trace, "rdybuff");
 	LCS_DBF_TEXT(5, trace, "rdybuff");
-	if (buffer->state != BUF_STATE_LOCKED &&
-	    buffer->state != BUF_STATE_PROCESSED)
-		BUG();
+	BUG_ON(buffer->state != BUF_STATE_LOCKED &&
+		buffer->state != BUF_STATE_PROCESSED);
 	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
 	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
 	buffer->state = BUF_STATE_READY;
 	buffer->state = BUF_STATE_READY;
 	index = buffer - channel->iob;
 	index = buffer - channel->iob;
@@ -701,8 +700,7 @@ __lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
 	int index, prev, next;
 	int index, prev, next;
 
 
 	LCS_DBF_TEXT(5, trace, "prcsbuff");
 	LCS_DBF_TEXT(5, trace, "prcsbuff");
-	if (buffer->state != BUF_STATE_READY)
-		BUG();
+	BUG_ON(buffer->state != BUF_STATE_READY);
 	buffer->state = BUF_STATE_PROCESSED;
 	buffer->state = BUF_STATE_PROCESSED;
 	index = buffer - channel->iob;
 	index = buffer - channel->iob;
 	prev = (index - 1) & (LCS_NUM_BUFFS - 1);
 	prev = (index - 1) & (LCS_NUM_BUFFS - 1);
@@ -734,9 +732,8 @@ lcs_release_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
 	unsigned long flags;
 	unsigned long flags;
 
 
 	LCS_DBF_TEXT(5, trace, "relbuff");
 	LCS_DBF_TEXT(5, trace, "relbuff");
-	if (buffer->state != BUF_STATE_LOCKED &&
-	    buffer->state != BUF_STATE_PROCESSED)
-		BUG();
+	BUG_ON(buffer->state != BUF_STATE_LOCKED &&
+		buffer->state != BUF_STATE_PROCESSED);
 	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
 	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
 	buffer->state = BUF_STATE_EMPTY;
 	buffer->state = BUF_STATE_EMPTY;
 	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
 	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);

+ 1 - 1
drivers/scsi/aic7xxx/Kconfig.aic7xxx

@@ -86,7 +86,7 @@ config AIC7XXX_DEBUG_MASK
         default "0"
         default "0"
         help
         help
 	Bit mask of debug options that is only valid if the
 	Bit mask of debug options that is only valid if the
-	CONFIG_AIC7XXX_DEBUG_ENBLE option is enabled.  The bits in this mask
+	CONFIG_AIC7XXX_DEBUG_ENABLE option is enabled.  The bits in this mask
 	are defined in the drivers/scsi/aic7xxx/aic7xxx.h - search for the
 	are defined in the drivers/scsi/aic7xxx/aic7xxx.h - search for the
 	variable ahc_debug in that file to find them.
 	variable ahc_debug in that file to find them.
 
 

+ 1 - 1
drivers/serial/jsm/jsm.h

@@ -20,7 +20,7 @@
  *
  *
  * Contact Information:
  * Contact Information:
  * Scott H Kilau <Scott_Kilau@digi.com>
  * Scott H Kilau <Scott_Kilau@digi.com>
- * Wendy Xiong   <wendyx@us.ltcfwd.linux.ibm.com>
+ * Wendy Xiong   <wendyx@us.ibm.com>
  *
  *
  ***********************************************************************/
  ***********************************************************************/
 
 

+ 1 - 1
drivers/serial/jsm/jsm_driver.c

@@ -20,7 +20,7 @@
  *
  *
  * Contact Information:
  * Contact Information:
  * Scott H Kilau <Scott_Kilau@digi.com>
  * Scott H Kilau <Scott_Kilau@digi.com>
- * Wendy Xiong   <wendyx@us.ltcfwd.linux.ibm.com>
+ * Wendy Xiong   <wendyx@us.ibm.com>
  *
  *
  *
  *
  ***********************************************************************/
  ***********************************************************************/

+ 1 - 1
drivers/serial/jsm/jsm_neo.c

@@ -20,7 +20,7 @@
  *
  *
  * Contact Information:
  * Contact Information:
  * Scott H Kilau <Scott_Kilau@digi.com>
  * Scott H Kilau <Scott_Kilau@digi.com>
- * Wendy Xiong   <wendyx@us.ltcfwd.linux.ibm.com>
+ * Wendy Xiong   <wendyx@us.ibm.com>
  *
  *
  ***********************************************************************/
  ***********************************************************************/
 #include <linux/delay.h>	/* For udelay */
 #include <linux/delay.h>	/* For udelay */

+ 1 - 2
fs/direct-io.c

@@ -929,8 +929,7 @@ do_holes:
 			block_in_page += this_chunk_blocks;
 			block_in_page += this_chunk_blocks;
 			dio->blocks_available -= this_chunk_blocks;
 			dio->blocks_available -= this_chunk_blocks;
 next_block:
 next_block:
-			if (dio->block_in_file > dio->final_block_in_request)
-				BUG();
+			BUG_ON(dio->block_in_file > dio->final_block_in_request);
 			if (dio->block_in_file == dio->final_block_in_request)
 			if (dio->block_in_file == dio->final_block_in_request)
 				break;
 				break;
 		}
 		}

+ 2 - 4
fs/dquot.c

@@ -590,8 +590,7 @@ we_slept:
 	atomic_dec(&dquot->dq_count);
 	atomic_dec(&dquot->dq_count);
 #ifdef __DQUOT_PARANOIA
 #ifdef __DQUOT_PARANOIA
 	/* sanity check */
 	/* sanity check */
-	if (!list_empty(&dquot->dq_free))
-		BUG();
+	BUG_ON(!list_empty(&dquot->dq_free));
 #endif
 #endif
 	put_dquot_last(dquot);
 	put_dquot_last(dquot);
 	spin_unlock(&dq_list_lock);
 	spin_unlock(&dq_list_lock);
@@ -666,8 +665,7 @@ we_slept:
 		return NODQUOT;
 		return NODQUOT;
 	}
 	}
 #ifdef __DQUOT_PARANOIA
 #ifdef __DQUOT_PARANOIA
-	if (!dquot->dq_sb)	/* Has somebody invalidated entry under us? */
-		BUG();
+	BUG_ON(!dquot->dq_sb);	/* Has somebody invalidated entry under us? */
 #endif
 #endif
 
 
 	return dquot;
 	return dquot;

+ 1 - 1
fs/exec.c

@@ -561,7 +561,7 @@ static int exec_mmap(struct mm_struct *mm)
 	arch_pick_mmap_layout(mm);
 	arch_pick_mmap_layout(mm);
 	if (old_mm) {
 	if (old_mm) {
 		up_read(&old_mm->mmap_sem);
 		up_read(&old_mm->mmap_sem);
-		if (active_mm != old_mm) BUG();
+		BUG_ON(active_mm != old_mm);
 		mmput(old_mm);
 		mmput(old_mm);
 		return 0;
 		return 0;
 	}
 	}

+ 1 - 2
fs/fcntl.c

@@ -453,8 +453,7 @@ static void send_sigio_to_task(struct task_struct *p,
 			/* Make sure we are called with one of the POLL_*
 			/* Make sure we are called with one of the POLL_*
 			   reasons, otherwise we could leak kernel stack into
 			   reasons, otherwise we could leak kernel stack into
 			   userspace.  */
 			   userspace.  */
-			if ((reason & __SI_MASK) != __SI_POLL)
-				BUG();
+			BUG_ON((reason & __SI_MASK) != __SI_POLL);
 			if (reason - POLL_IN >= NSIGPOLL)
 			if (reason - POLL_IN >= NSIGPOLL)
 				si.si_band  = ~0L;
 				si.si_band  = ~0L;
 			else
 			else

+ 3 - 6
fs/freevxfs/vxfs_olt.c

@@ -42,24 +42,21 @@
 static inline void
 static inline void
 vxfs_get_fshead(struct vxfs_oltfshead *fshp, struct vxfs_sb_info *infp)
 vxfs_get_fshead(struct vxfs_oltfshead *fshp, struct vxfs_sb_info *infp)
 {
 {
-	if (infp->vsi_fshino)
-		BUG();
+	BUG_ON(infp->vsi_fshino);
 	infp->vsi_fshino = fshp->olt_fsino[0];
 	infp->vsi_fshino = fshp->olt_fsino[0];
 }
 }
 
 
 static inline void
 static inline void
 vxfs_get_ilist(struct vxfs_oltilist *ilistp, struct vxfs_sb_info *infp)
 vxfs_get_ilist(struct vxfs_oltilist *ilistp, struct vxfs_sb_info *infp)
 {
 {
-	if (infp->vsi_iext)
-		BUG();
+	BUG_ON(infp->vsi_iext);
 	infp->vsi_iext = ilistp->olt_iext[0]; 
 	infp->vsi_iext = ilistp->olt_iext[0]; 
 }
 }
 
 
 static inline u_long
 static inline u_long
 vxfs_oblock(struct super_block *sbp, daddr_t block, u_long bsize)
 vxfs_oblock(struct super_block *sbp, daddr_t block, u_long bsize)
 {
 {
-	if (sbp->s_blocksize % bsize)
-		BUG();
+	BUG_ON(sbp->s_blocksize % bsize);
 	return (block * (sbp->s_blocksize / bsize));
 	return (block * (sbp->s_blocksize / bsize));
 }
 }
 
 

+ 2 - 4
fs/hfsplus/bnode.c

@@ -466,8 +466,7 @@ void hfs_bnode_unhash(struct hfs_bnode *node)
 	for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
 	for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
 	     *p && *p != node; p = &(*p)->next_hash)
 	     *p && *p != node; p = &(*p)->next_hash)
 		;
 		;
-	if (!*p)
-		BUG();
+	BUG_ON(!*p);
 	*p = node->next_hash;
 	*p = node->next_hash;
 	node->tree->node_hash_cnt--;
 	node->tree->node_hash_cnt--;
 }
 }
@@ -622,8 +621,7 @@ void hfs_bnode_put(struct hfs_bnode *node)
 
 
 		dprint(DBG_BNODE_REFS, "put_node(%d:%d): %d\n",
 		dprint(DBG_BNODE_REFS, "put_node(%d:%d): %d\n",
 		       node->tree->cnid, node->this, atomic_read(&node->refcnt));
 		       node->tree->cnid, node->this, atomic_read(&node->refcnt));
-		if (!atomic_read(&node->refcnt))
-			BUG();
+		BUG_ON(!atomic_read(&node->refcnt));
 		if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
 		if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
 			return;
 			return;
 		for (i = 0; i < tree->pages_per_bnode; i++) {
 		for (i = 0; i < tree->pages_per_bnode; i++) {

+ 1 - 2
fs/hfsplus/btree.c

@@ -269,8 +269,7 @@ void hfs_bmap_free(struct hfs_bnode *node)
 	u8 *data, byte, m;
 	u8 *data, byte, m;
 
 
 	dprint(DBG_BNODE_MOD, "btree_free_node: %u\n", node->this);
 	dprint(DBG_BNODE_MOD, "btree_free_node: %u\n", node->this);
-	if (!node->this)
-		BUG();
+	BUG_ON(!node->this);
 	tree = node->tree;
 	tree = node->tree;
 	nidx = node->this;
 	nidx = node->this;
 	node = hfs_bnode_find(tree, 0);
 	node = hfs_bnode_find(tree, 0);

+ 5 - 10
fs/inode.c

@@ -172,8 +172,7 @@ static struct inode *alloc_inode(struct super_block *sb)
 
 
 void destroy_inode(struct inode *inode) 
 void destroy_inode(struct inode *inode) 
 {
 {
-	if (inode_has_buffers(inode))
-		BUG();
+	BUG_ON(inode_has_buffers(inode));
 	security_inode_free(inode);
 	security_inode_free(inode);
 	if (inode->i_sb->s_op->destroy_inode)
 	if (inode->i_sb->s_op->destroy_inode)
 		inode->i_sb->s_op->destroy_inode(inode);
 		inode->i_sb->s_op->destroy_inode(inode);
@@ -249,12 +248,9 @@ void clear_inode(struct inode *inode)
 	might_sleep();
 	might_sleep();
 	invalidate_inode_buffers(inode);
 	invalidate_inode_buffers(inode);
        
        
-	if (inode->i_data.nrpages)
-		BUG();
-	if (!(inode->i_state & I_FREEING))
-		BUG();
-	if (inode->i_state & I_CLEAR)
-		BUG();
+	BUG_ON(inode->i_data.nrpages);
+	BUG_ON(!(inode->i_state & I_FREEING));
+	BUG_ON(inode->i_state & I_CLEAR);
 	wait_on_inode(inode);
 	wait_on_inode(inode);
 	DQUOT_DROP(inode);
 	DQUOT_DROP(inode);
 	if (inode->i_sb && inode->i_sb->s_op->clear_inode)
 	if (inode->i_sb && inode->i_sb->s_op->clear_inode)
@@ -1054,8 +1050,7 @@ void generic_delete_inode(struct inode *inode)
 	hlist_del_init(&inode->i_hash);
 	hlist_del_init(&inode->i_hash);
 	spin_unlock(&inode_lock);
 	spin_unlock(&inode_lock);
 	wake_up_inode(inode);
 	wake_up_inode(inode);
-	if (inode->i_state != I_CLEAR)
-		BUG();
+	BUG_ON(inode->i_state != I_CLEAR);
 	destroy_inode(inode);
 	destroy_inode(inode);
 }
 }
 
 

+ 1 - 2
fs/jffs2/background.c

@@ -35,8 +35,7 @@ int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c)
 	pid_t pid;
 	pid_t pid;
 	int ret = 0;
 	int ret = 0;
 
 
-	if (c->gc_task)
-		BUG();
+	BUG_ON(c->gc_task);
 
 
 	init_completion(&c->gc_thread_start);
 	init_completion(&c->gc_thread_start);
 	init_completion(&c->gc_thread_exit);
 	init_completion(&c->gc_thread_exit);

+ 2 - 4
fs/smbfs/file.c

@@ -178,11 +178,9 @@ smb_writepage(struct page *page, struct writeback_control *wbc)
 	unsigned offset = PAGE_CACHE_SIZE;
 	unsigned offset = PAGE_CACHE_SIZE;
 	int err;
 	int err;
 
 
-	if (!mapping)
-		BUG();
+	BUG_ON(!mapping);
 	inode = mapping->host;
 	inode = mapping->host;
-	if (!inode)
-		BUG();
+	BUG_ON(!inode);
 
 
 	end_index = inode->i_size >> PAGE_CACHE_SHIFT;
 	end_index = inode->i_size >> PAGE_CACHE_SHIFT;
 
 

+ 1 - 1
fs/sysfs/dir.c

@@ -50,7 +50,7 @@ static struct sysfs_dirent * sysfs_new_dirent(struct sysfs_dirent * parent_sd,
 	return sd;
 	return sd;
 }
 }
 
 
-/**
+/*
  *
  *
  * Return -EEXIST if there is already a sysfs element with the same name for
  * Return -EEXIST if there is already a sysfs element with the same name for
  * the same parent.
  * the same parent.

+ 1 - 2
fs/sysfs/inode.c

@@ -175,8 +175,7 @@ const unsigned char * sysfs_get_name(struct sysfs_dirent *sd)
 	struct bin_attribute * bin_attr;
 	struct bin_attribute * bin_attr;
 	struct sysfs_symlink  * sl;
 	struct sysfs_symlink  * sl;
 
 
-	if (!sd || !sd->s_element)
-		BUG();
+	BUG_ON(!sd || !sd->s_element);
 
 
 	switch (sd->s_type) {
 	switch (sd->s_type) {
 		case SYSFS_DIR:
 		case SYSFS_DIR:

+ 2 - 4
fs/sysv/dir.c

@@ -253,8 +253,7 @@ int sysv_delete_entry(struct sysv_dir_entry *de, struct page *page)
 
 
 	lock_page(page);
 	lock_page(page);
 	err = mapping->a_ops->prepare_write(NULL, page, from, to);
 	err = mapping->a_ops->prepare_write(NULL, page, from, to);
-	if (err)
-		BUG();
+	BUG_ON(err);
 	de->inode = 0;
 	de->inode = 0;
 	err = dir_commit_chunk(page, from, to);
 	err = dir_commit_chunk(page, from, to);
 	dir_put_page(page);
 	dir_put_page(page);
@@ -353,8 +352,7 @@ void sysv_set_link(struct sysv_dir_entry *de, struct page *page,
 
 
 	lock_page(page);
 	lock_page(page);
 	err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
 	err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
-	if (err)
-		BUG();
+	BUG_ON(err);
 	de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
 	de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
 	err = dir_commit_chunk(page, from, to);
 	err = dir_commit_chunk(page, from, to);
 	dir_put_page(page);
 	dir_put_page(page);

+ 2 - 4
fs/udf/inode.c

@@ -312,12 +312,10 @@ static int udf_get_block(struct inode *inode, sector_t block, struct buffer_head
 	err = 0;
 	err = 0;
 
 
 	bh = inode_getblk(inode, block, &err, &phys, &new);
 	bh = inode_getblk(inode, block, &err, &phys, &new);
-	if (bh)
-		BUG();
+	BUG_ON(bh);
 	if (err)
 	if (err)
 		goto abort;
 		goto abort;
-	if (!phys)
-		BUG();
+	BUG_ON(!phys);
 
 
 	if (new)
 	if (new)
 		set_buffer_new(bh_result);
 		set_buffer_new(bh_result);

+ 1 - 1
include/linux/fs.h

@@ -864,7 +864,7 @@ struct super_block {
 	 */
 	 */
 	struct mutex s_vfs_rename_mutex;	/* Kludge */
 	struct mutex s_vfs_rename_mutex;	/* Kludge */
 
 
-	/* Granuality of c/m/atime in ns.
+	/* Granularity of c/m/atime in ns.
 	   Cannot be worse than a second */
 	   Cannot be worse than a second */
 	u32		   s_time_gran;
 	u32		   s_time_gran;
 };
 };

+ 1 - 1
include/linux/hrtimer.h

@@ -80,7 +80,7 @@ struct hrtimer_sleeper {
  * @first:		pointer to the timer node which expires first
  * @first:		pointer to the timer node which expires first
  * @resolution:		the resolution of the clock, in nanoseconds
  * @resolution:		the resolution of the clock, in nanoseconds
  * @get_time:		function to retrieve the current time of the clock
  * @get_time:		function to retrieve the current time of the clock
- * @get_sofirq_time:	function to retrieve the current time from the softirq
+ * @get_softirq_time:	function to retrieve the current time from the softirq
  * @curr_timer:		the timer which is executing a callback right now
  * @curr_timer:		the timer which is executing a callback right now
  * @softirq_time:	the time when running the hrtimer queue in the softirq
  * @softirq_time:	the time when running the hrtimer queue in the softirq
  */
  */

+ 7 - 8
ipc/shm.c

@@ -91,8 +91,8 @@ static inline int shm_addid(struct shmid_kernel *shp)
 static inline void shm_inc (int id) {
 static inline void shm_inc (int id) {
 	struct shmid_kernel *shp;
 	struct shmid_kernel *shp;
 
 
-	if(!(shp = shm_lock(id)))
-		BUG();
+	shp = shm_lock(id);
+	BUG_ON(!shp);
 	shp->shm_atim = get_seconds();
 	shp->shm_atim = get_seconds();
 	shp->shm_lprid = current->tgid;
 	shp->shm_lprid = current->tgid;
 	shp->shm_nattch++;
 	shp->shm_nattch++;
@@ -142,8 +142,8 @@ static void shm_close (struct vm_area_struct *shmd)
 
 
 	mutex_lock(&shm_ids.mutex);
 	mutex_lock(&shm_ids.mutex);
 	/* remove from the list of attaches of the shm segment */
 	/* remove from the list of attaches of the shm segment */
-	if(!(shp = shm_lock(id)))
-		BUG();
+	shp = shm_lock(id);
+	BUG_ON(!shp);
 	shp->shm_lprid = current->tgid;
 	shp->shm_lprid = current->tgid;
 	shp->shm_dtim = get_seconds();
 	shp->shm_dtim = get_seconds();
 	shp->shm_nattch--;
 	shp->shm_nattch--;
@@ -283,8 +283,7 @@ asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
 		err = -EEXIST;
 		err = -EEXIST;
 	} else {
 	} else {
 		shp = shm_lock(id);
 		shp = shm_lock(id);
-		if(shp==NULL)
-			BUG();
+		BUG_ON(shp==NULL);
 		if (shp->shm_segsz < size)
 		if (shp->shm_segsz < size)
 			err = -EINVAL;
 			err = -EINVAL;
 		else if (ipcperms(&shp->shm_perm, shmflg))
 		else if (ipcperms(&shp->shm_perm, shmflg))
@@ -774,8 +773,8 @@ invalid:
 	up_write(&current->mm->mmap_sem);
 	up_write(&current->mm->mmap_sem);
 
 
 	mutex_lock(&shm_ids.mutex);
 	mutex_lock(&shm_ids.mutex);
-	if(!(shp = shm_lock(shmid)))
-		BUG();
+	shp = shm_lock(shmid);
+	BUG_ON(!shp);
 	shp->shm_nattch--;
 	shp->shm_nattch--;
 	if(shp->shm_nattch == 0 &&
 	if(shp->shm_nattch == 0 &&
 	   shp->shm_perm.mode & SHM_DEST)
 	   shp->shm_perm.mode & SHM_DEST)

+ 2 - 4
ipc/util.c

@@ -266,8 +266,7 @@ struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id)
 {
 {
 	struct kern_ipc_perm* p;
 	struct kern_ipc_perm* p;
 	int lid = id % SEQ_MULTIPLIER;
 	int lid = id % SEQ_MULTIPLIER;
-	if(lid >= ids->entries->size)
-		BUG();
+	BUG_ON(lid >= ids->entries->size);
 
 
 	/* 
 	/* 
 	 * do not need a rcu_dereference()() here to force ordering
 	 * do not need a rcu_dereference()() here to force ordering
@@ -275,8 +274,7 @@ struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id)
 	 */	
 	 */	
 	p = ids->entries->p[lid];
 	p = ids->entries->p[lid];
 	ids->entries->p[lid] = NULL;
 	ids->entries->p[lid] = NULL;
-	if(p==NULL)
-		BUG();
+	BUG_ON(p==NULL);
 	ids->in_use--;
 	ids->in_use--;
 
 
 	if (lid == ids->max_id) {
 	if (lid == ids->max_id) {

+ 1 - 1
kernel/power/Kconfig

@@ -41,7 +41,7 @@ config SOFTWARE_SUSPEND
 	depends on PM && SWAP && (X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP)
 	depends on PM && SWAP && (X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP)
 	---help---
 	---help---
 	  Enable the possibility of suspending the machine.
 	  Enable the possibility of suspending the machine.
-	  It doesn't need APM.
+	  It doesn't need ACPI or APM.
 	  You may suspend your machine by 'swsusp' or 'shutdown -z <time>' 
 	  You may suspend your machine by 'swsusp' or 'shutdown -z <time>' 
 	  (patch for sysvinit needed). 
 	  (patch for sysvinit needed). 
 
 

+ 2 - 4
kernel/printk.c

@@ -360,8 +360,7 @@ static void call_console_drivers(unsigned long start, unsigned long end)
 	unsigned long cur_index, start_print;
 	unsigned long cur_index, start_print;
 	static int msg_level = -1;
 	static int msg_level = -1;
 
 
-	if (((long)(start - end)) > 0)
-		BUG();
+	BUG_ON(((long)(start - end)) > 0);
 
 
 	cur_index = start;
 	cur_index = start;
 	start_print = start;
 	start_print = start;
@@ -708,8 +707,7 @@ int __init add_preferred_console(char *name, int idx, char *options)
  */
  */
 void acquire_console_sem(void)
 void acquire_console_sem(void)
 {
 {
-	if (in_interrupt())
-		BUG();
+	BUG_ON(in_interrupt());
 	down(&console_sem);
 	down(&console_sem);
 	console_locked = 1;
 	console_locked = 1;
 	console_may_schedule = 1;
 	console_may_schedule = 1;

+ 1 - 2
kernel/ptrace.c

@@ -30,8 +30,7 @@
  */
  */
 void __ptrace_link(task_t *child, task_t *new_parent)
 void __ptrace_link(task_t *child, task_t *new_parent)
 {
 {
-	if (!list_empty(&child->ptrace_list))
-		BUG();
+	BUG_ON(!list_empty(&child->ptrace_list));
 	if (child->parent == new_parent)
 	if (child->parent == new_parent)
 		return;
 		return;
 	list_add(&child->ptrace_list, &child->parent->ptrace_children);
 	list_add(&child->ptrace_list, &child->parent->ptrace_children);

+ 2 - 4
kernel/signal.c

@@ -769,8 +769,7 @@ specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
 {
 {
 	int ret = 0;
 	int ret = 0;
 
 
-	if (!irqs_disabled())
-		BUG();
+	BUG_ON(!irqs_disabled());
 	assert_spin_locked(&t->sighand->siglock);
 	assert_spin_locked(&t->sighand->siglock);
 
 
 	/* Short-circuit ignored signals.  */
 	/* Short-circuit ignored signals.  */
@@ -1384,8 +1383,7 @@ send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
 		 * the overrun count.  Other uses should not try to
 		 * the overrun count.  Other uses should not try to
 		 * send the signal multiple times.
 		 * send the signal multiple times.
 		 */
 		 */
-		if (q->info.si_code != SI_TIMER)
-			BUG();
+		BUG_ON(q->info.si_code != SI_TIMER);
 		q->info.si_overrun++;
 		q->info.si_overrun++;
 		goto out;
 		goto out;
 	} 
 	} 

+ 4 - 4
kernel/time.c

@@ -410,7 +410,7 @@ EXPORT_SYMBOL(current_kernel_time);
  * current_fs_time - Return FS time
  * current_fs_time - Return FS time
  * @sb: Superblock.
  * @sb: Superblock.
  *
  *
- * Return the current time truncated to the time granuality supported by
+ * Return the current time truncated to the time granularity supported by
  * the fs.
  * the fs.
  */
  */
 struct timespec current_fs_time(struct super_block *sb)
 struct timespec current_fs_time(struct super_block *sb)
@@ -421,11 +421,11 @@ struct timespec current_fs_time(struct super_block *sb)
 EXPORT_SYMBOL(current_fs_time);
 EXPORT_SYMBOL(current_fs_time);
 
 
 /**
 /**
- * timespec_trunc - Truncate timespec to a granuality
+ * timespec_trunc - Truncate timespec to a granularity
  * @t: Timespec
  * @t: Timespec
- * @gran: Granuality in ns.
+ * @gran: Granularity in ns.
  *
  *
- * Truncate a timespec to a granuality. gran must be smaller than a second.
+ * Truncate a timespec to a granularity. gran must be smaller than a second.
  * Always rounds down.
  * Always rounds down.
  *
  *
  * This function should be only used for timestamps returned by
  * This function should be only used for timestamps returned by

+ 1 - 2
kernel/timer.c

@@ -1479,8 +1479,7 @@ register_time_interpolator(struct time_interpolator *ti)
 	unsigned long flags;
 	unsigned long flags;
 
 
 	/* Sanity check */
 	/* Sanity check */
-	if (ti->frequency == 0 || ti->mask == 0)
-		BUG();
+	BUG_ON(ti->frequency == 0 || ti->mask == 0);
 
 
 	ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency;
 	ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency;
 	spin_lock(&time_interpolator_lock);
 	spin_lock(&time_interpolator_lock);

+ 5 - 10
mm/highmem.c

@@ -74,8 +74,7 @@ static void flush_all_zero_pkmaps(void)
 		pkmap_count[i] = 0;
 		pkmap_count[i] = 0;
 
 
 		/* sanity check */
 		/* sanity check */
-		if (pte_none(pkmap_page_table[i]))
-			BUG();
+		BUG_ON(pte_none(pkmap_page_table[i]));
 
 
 		/*
 		/*
 		 * Don't need an atomic fetch-and-clear op here;
 		 * Don't need an atomic fetch-and-clear op here;
@@ -158,8 +157,7 @@ void fastcall *kmap_high(struct page *page)
 	if (!vaddr)
 	if (!vaddr)
 		vaddr = map_new_virtual(page);
 		vaddr = map_new_virtual(page);
 	pkmap_count[PKMAP_NR(vaddr)]++;
 	pkmap_count[PKMAP_NR(vaddr)]++;
-	if (pkmap_count[PKMAP_NR(vaddr)] < 2)
-		BUG();
+	BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
 	spin_unlock(&kmap_lock);
 	spin_unlock(&kmap_lock);
 	return (void*) vaddr;
 	return (void*) vaddr;
 }
 }
@@ -174,8 +172,7 @@ void fastcall kunmap_high(struct page *page)
 
 
 	spin_lock(&kmap_lock);
 	spin_lock(&kmap_lock);
 	vaddr = (unsigned long)page_address(page);
 	vaddr = (unsigned long)page_address(page);
-	if (!vaddr)
-		BUG();
+	BUG_ON(!vaddr);
 	nr = PKMAP_NR(vaddr);
 	nr = PKMAP_NR(vaddr);
 
 
 	/*
 	/*
@@ -220,8 +217,7 @@ static __init int init_emergency_pool(void)
 		return 0;
 		return 0;
 
 
 	page_pool = mempool_create_page_pool(POOL_SIZE, 0);
 	page_pool = mempool_create_page_pool(POOL_SIZE, 0);
-	if (!page_pool)
-		BUG();
+	BUG_ON(!page_pool);
 	printk("highmem bounce pool size: %d pages\n", POOL_SIZE);
 	printk("highmem bounce pool size: %d pages\n", POOL_SIZE);
 
 
 	return 0;
 	return 0;
@@ -264,8 +260,7 @@ int init_emergency_isa_pool(void)
 
 
 	isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
 	isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
 				       mempool_free_pages, (void *) 0);
 				       mempool_free_pages, (void *) 0);
-	if (!isa_page_pool)
-		BUG();
+	BUG_ON(!isa_page_pool);
 
 
 	printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE);
 	printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE);
 	return 0;
 	return 0;

+ 3 - 6
mm/mmap.c

@@ -294,8 +294,7 @@ void validate_mm(struct mm_struct *mm)
 	i = browse_rb(&mm->mm_rb);
 	i = browse_rb(&mm->mm_rb);
 	if (i != mm->map_count)
 	if (i != mm->map_count)
 		printk("map_count %d rb %d\n", mm->map_count, i), bug = 1;
 		printk("map_count %d rb %d\n", mm->map_count, i), bug = 1;
-	if (bug)
-		BUG();
+	BUG_ON(bug);
 }
 }
 #else
 #else
 #define validate_mm(mm) do { } while (0)
 #define validate_mm(mm) do { } while (0)
@@ -432,8 +431,7 @@ __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
 	struct rb_node ** rb_link, * rb_parent;
 	struct rb_node ** rb_link, * rb_parent;
 
 
 	__vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent);
 	__vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent);
-	if (__vma && __vma->vm_start < vma->vm_end)
-		BUG();
+	BUG_ON(__vma && __vma->vm_start < vma->vm_end);
 	__vma_link(mm, vma, prev, rb_link, rb_parent);
 	__vma_link(mm, vma, prev, rb_link, rb_parent);
 	mm->map_count++;
 	mm->map_count++;
 }
 }
@@ -813,8 +811,7 @@ try_prev:
 	 * (e.g. stash info in next's anon_vma_node when assigning
 	 * (e.g. stash info in next's anon_vma_node when assigning
 	 * an anon_vma, or when trying vma_merge).  Another time.
 	 * an anon_vma, or when trying vma_merge).  Another time.
 	 */
 	 */
-	if (find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma)
-		BUG();
+	BUG_ON(find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma);
 	if (!near)
 	if (!near)
 		goto none;
 		goto none;
 
 

+ 1 - 1
mm/page-writeback.c

@@ -258,7 +258,7 @@ static void balance_dirty_pages(struct address_space *mapping)
 /**
 /**
  * balance_dirty_pages_ratelimited_nr - balance dirty memory state
  * balance_dirty_pages_ratelimited_nr - balance dirty memory state
  * @mapping: address_space which was dirtied
  * @mapping: address_space which was dirtied
- * @nr_pages: number of pages which the caller has just dirtied
+ * @nr_pages_dirtied: number of pages which the caller has just dirtied
  *
  *
  * Processes which are dirtying memory should call in here once for each page
  * Processes which are dirtying memory should call in here once for each page
  * which was newly dirtied.  The function will periodically check the system's
  * which was newly dirtied.  The function will periodically check the system's

+ 6 - 12
mm/slab.c

@@ -1297,8 +1297,7 @@ void __init kmem_cache_init(void)
 		if (cache_cache.num)
 		if (cache_cache.num)
 			break;
 			break;
 	}
 	}
-	if (!cache_cache.num)
-		BUG();
+	BUG_ON(!cache_cache.num);
 	cache_cache.gfporder = order;
 	cache_cache.gfporder = order;
 	cache_cache.colour = left_over / cache_cache.colour_off;
 	cache_cache.colour = left_over / cache_cache.colour_off;
 	cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
 	cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
@@ -1974,8 +1973,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
 	 * Always checks flags, a caller might be expecting debug support which
 	 * Always checks flags, a caller might be expecting debug support which
 	 * isn't available.
 	 * isn't available.
 	 */
 	 */
-	if (flags & ~CREATE_MASK)
-		BUG();
+	BUG_ON(flags & ~CREATE_MASK);
 
 
 	/*
 	/*
 	 * Check that size is in terms of words.  This is needed to avoid
 	 * Check that size is in terms of words.  This is needed to avoid
@@ -2206,8 +2204,7 @@ static int __node_shrink(struct kmem_cache *cachep, int node)
 
 
 		slabp = list_entry(l3->slabs_free.prev, struct slab, list);
 		slabp = list_entry(l3->slabs_free.prev, struct slab, list);
 #if DEBUG
 #if DEBUG
-		if (slabp->inuse)
-			BUG();
+		BUG_ON(slabp->inuse);
 #endif
 #endif
 		list_del(&slabp->list);
 		list_del(&slabp->list);
 
 
@@ -2248,8 +2245,7 @@ static int __cache_shrink(struct kmem_cache *cachep)
  */
  */
 int kmem_cache_shrink(struct kmem_cache *cachep)
 int kmem_cache_shrink(struct kmem_cache *cachep)
 {
 {
-	if (!cachep || in_interrupt())
-		BUG();
+	BUG_ON(!cachep || in_interrupt());
 
 
 	return __cache_shrink(cachep);
 	return __cache_shrink(cachep);
 }
 }
@@ -2277,8 +2273,7 @@ int kmem_cache_destroy(struct kmem_cache *cachep)
 	int i;
 	int i;
 	struct kmem_list3 *l3;
 	struct kmem_list3 *l3;
 
 
-	if (!cachep || in_interrupt())
-		BUG();
+	BUG_ON(!cachep || in_interrupt());
 
 
 	/* Don't let CPUs to come and go */
 	/* Don't let CPUs to come and go */
 	lock_cpu_hotplug();
 	lock_cpu_hotplug();
@@ -2477,8 +2472,7 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
 	 * Be lazy and only check for valid flags here,  keeping it out of the
 	 * Be lazy and only check for valid flags here,  keeping it out of the
 	 * critical path in kmem_cache_alloc().
 	 * critical path in kmem_cache_alloc().
 	 */
 	 */
-	if (flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW))
-		BUG();
+	BUG_ON(flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW));
 	if (flags & SLAB_NO_GROW)
 	if (flags & SLAB_NO_GROW)
 		return 0;
 		return 0;
 
 

+ 1 - 2
mm/swap_state.c

@@ -148,8 +148,7 @@ int add_to_swap(struct page * page, gfp_t gfp_mask)
 	swp_entry_t entry;
 	swp_entry_t entry;
 	int err;
 	int err;
 
 
-	if (!PageLocked(page))
-		BUG();
+	BUG_ON(!PageLocked(page));
 
 
 	for (;;) {
 	for (;;) {
 		entry = get_swap_page();
 		entry = get_swap_page();

+ 1 - 2
mm/vmalloc.c

@@ -321,8 +321,7 @@ void __vunmap(void *addr, int deallocate_pages)
 		int i;
 		int i;
 
 
 		for (i = 0; i < area->nr_pages; i++) {
 		for (i = 0; i < area->nr_pages; i++) {
-			if (unlikely(!area->pages[i]))
-				BUG();
+			BUG_ON(!area->pages[i]);
 			__free_page(area->pages[i]);
 			__free_page(area->pages[i]);
 		}
 		}