浏览代码

Merge 3.1-rc1 into usb-linus

Gives us a good starting point to base patches off of.

Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Greg Kroah-Hartman 14 年之前
父节点
当前提交
b6741d1fe9
共有 100 个文件被更改,包括 1585 次插入790 次删除
  1. 6 0
      Documentation/ABI/testing/pstore
  2. 17 0
      Documentation/ABI/testing/sysfs-platform-ideapad-laptop
  3. 7 16
      Documentation/CodingStyle
  4. 9 2
      Documentation/acpi/apei/einj.txt
  5. 20 1
      Documentation/device-mapper/dm-crypt.txt
  6. 42 6
      Documentation/device-mapper/dm-flakey.txt
  7. 88 50
      Documentation/device-mapper/dm-raid.txt
  8. 1 1
      Documentation/devicetree/bindings/gpio/gpio_keys.txt
  9. 11 0
      Documentation/devicetree/bindings/input/fsl-mma8450.txt
  10. 164 70
      Documentation/dmaengine.txt
  11. 1 2
      Documentation/fault-injection/fault-injection.txt
  12. 11 9
      Documentation/feature-removal-schedule.txt
  13. 10 3
      Documentation/frv/booting.txt
  14. 1 0
      Documentation/ioctl/ioctl-number.txt
  15. 11 0
      Documentation/kernel-parameters.txt
  16. 14 0
      Documentation/m68k/kernel-options.txt
  17. 1 1
      Documentation/networking/bonding.txt
  18. 5 5
      Documentation/power/runtime_pm.txt
  19. 12 6
      MAINTAINERS
  20. 2 2
      Makefile
  21. 3 0
      arch/Kconfig
  22. 1 0
      arch/alpha/Kconfig
  23. 0 3
      arch/arm/kernel/armksyms.c
  24. 3 1
      arch/arm/kernel/process.c
  25. 1 1
      arch/arm/lib/Makefile
  26. 0 211
      arch/arm/lib/sha1.S
  27. 1 0
      arch/avr32/Kconfig
  28. 3 3
      arch/cris/arch-v10/drivers/sync_serial.c
  29. 3 0
      arch/cris/arch-v10/kernel/irq.c
  30. 3 3
      arch/cris/include/asm/thread_info.h
  31. 1 0
      arch/frv/Kconfig
  32. 5 0
      arch/ia64/Kconfig
  33. 55 0
      arch/ia64/include/asm/gpio.h
  34. 1 1
      arch/ia64/kernel/efi.c
  35. 1 0
      arch/m68k/Kconfig
  36. 1 0
      arch/parisc/Kconfig
  37. 2 2
      arch/parisc/include/asm/atomic.h
  38. 60 6
      arch/parisc/include/asm/futex.h
  39. 2 1
      arch/parisc/include/asm/unistd.h
  40. 1 0
      arch/parisc/kernel/syscall_table.S
  41. 1 0
      arch/powerpc/Kconfig
  42. 5 4
      arch/s390/Kconfig
  43. 1 0
      arch/s390/include/asm/ipl.h
  44. 9 2
      arch/s390/include/asm/lowcore.h
  45. 0 2
      arch/s390/include/asm/processor.h
  46. 1 0
      arch/s390/include/asm/system.h
  47. 4 6
      arch/s390/kernel/asm-offsets.c
  48. 36 0
      arch/s390/kernel/base.S
  49. 12 31
      arch/s390/kernel/compat_signal.c
  50. 28 0
      arch/s390/kernel/entry.S
  51. 20 0
      arch/s390/kernel/entry64.S
  52. 44 1
      arch/s390/kernel/ipl.c
  53. 60 20
      arch/s390/kernel/reipl64.S
  54. 24 1
      arch/s390/kernel/setup.c
  55. 19 42
      arch/s390/kernel/signal.c
  56. 16 8
      arch/s390/kernel/smp.c
  57. 16 0
      arch/s390/mm/maccess.c
  58. 1 0
      arch/s390/mm/pgtable.c
  59. 1 0
      arch/sh/Kconfig
  60. 4 2
      arch/sh/kernel/idle.c
  61. 1 0
      arch/sparc/Kconfig
  62. 5 0
      arch/sparc/include/asm/Kbuild
  63. 8 41
      arch/sparc/include/asm/bitops_64.h
  64. 0 1
      arch/sparc/include/asm/div64.h
  65. 29 36
      arch/sparc/include/asm/elf_64.h
  66. 13 0
      arch/sparc/include/asm/hypervisor.h
  67. 0 1
      arch/sparc/include/asm/irq_regs.h
  68. 0 6
      arch/sparc/include/asm/local.h
  69. 0 1
      arch/sparc/include/asm/local64.h
  70. 24 27
      arch/sparc/include/asm/tsb.h
  71. 1 0
      arch/sparc/kernel/cpu.c
  72. 28 2
      arch/sparc/kernel/ds.c
  73. 14 0
      arch/sparc/kernel/entry.h
  74. 1 1
      arch/sparc/kernel/head_64.S
  75. 6 0
      arch/sparc/kernel/hvapi.c
  76. 7 0
      arch/sparc/kernel/hvcalls.S
  77. 15 0
      arch/sparc/kernel/kernel.h
  78. 12 12
      arch/sparc/kernel/ktlb.S
  79. 23 7
      arch/sparc/kernel/mdesc.c
  80. 186 0
      arch/sparc/kernel/setup_64.c
  81. 11 0
      arch/sparc/kernel/sparc_ksyms_64.c
  82. 2 7
      arch/sparc/kernel/sstate.c
  83. 4 11
      arch/sparc/kernel/unaligned_64.c
  84. 20 1
      arch/sparc/kernel/vmlinux.lds.S
  85. 2 2
      arch/sparc/lib/Makefile
  86. 0 61
      arch/sparc/lib/NG2page.S
  87. 75 39
      arch/sparc/lib/NGpage.S
  88. 1 1
      arch/sparc/lib/atomic32.c
  89. 84 0
      arch/sparc/lib/ffs.S
  90. 51 0
      arch/sparc/lib/hweight.S
  91. 41 1
      arch/sparc/mm/init_64.c
  92. 1 0
      arch/tile/Kconfig
  93. 38 0
      arch/tile/include/asm/Kbuild
  94. 0 1
      arch/tile/include/asm/bug.h
  95. 0 1
      arch/tile/include/asm/bugs.h
  96. 0 1
      arch/tile/include/asm/cputime.h
  97. 0 1
      arch/tile/include/asm/device.h
  98. 0 1
      arch/tile/include/asm/div64.h
  99. 0 1
      arch/tile/include/asm/emergency-restart.h
  100. 0 1
      arch/tile/include/asm/errno.h

+ 6 - 0
Documentation/ABI/testing/pstore

@@ -39,3 +39,9 @@ Description:	Generic interface to platform dependent persistent storage.
 		multiple) files based on the record size of the underlying
 		multiple) files based on the record size of the underlying
 		persistent storage until at least this amount is reached.
 		persistent storage until at least this amount is reached.
 		Default is 10 Kbytes.
 		Default is 10 Kbytes.
+
+		Pstore only supports one backend at a time. If multiple
+		backends are available, the preferred backend may be
+		set by passing the pstore.backend= argument to the kernel at
+		boot time.
+

+ 17 - 0
Documentation/ABI/testing/sysfs-platform-ideapad-laptop

@@ -4,3 +4,20 @@ KernelVersion:	2.6.37
 Contact:	"Ike Panhc <ike.pan@canonical.com>"
 Contact:	"Ike Panhc <ike.pan@canonical.com>"
 Description:
 Description:
 		Control the power of camera module. 1 means on, 0 means off.
 		Control the power of camera module. 1 means on, 0 means off.
+
+What:		/sys/devices/platform/ideapad/cfg
+Date:		Jun 2011
+KernelVersion:	3.1
+Contact:	"Ike Panhc <ike.pan@canonical.com>"
+Description:
+		Ideapad capability bits.
+		Bit 8-10: 1 - Intel graphic only
+		          2 - ATI graphic only
+		          3 - Nvidia graphic only
+		          4 - Intel and ATI graphic
+		          5 - Intel and Nvidia graphic
+		Bit 16: Bluetooth exist (1 for exist)
+		Bit 17: 3G exist (1 for exist)
+		Bit 18: Wifi exist (1 for exist)
+		Bit 19: Camera exist (1 for exist)
+

+ 7 - 16
Documentation/CodingStyle

@@ -80,22 +80,13 @@ available tools.
 The limit on the length of lines is 80 columns and this is a strongly
 The limit on the length of lines is 80 columns and this is a strongly
 preferred limit.
 preferred limit.
 
 
-Statements longer than 80 columns will be broken into sensible chunks.
-Descendants are always substantially shorter than the parent and are placed
-substantially to the right. The same applies to function headers with a long
-argument list. Long strings are as well broken into shorter strings. The
-only exception to this is where exceeding 80 columns significantly increases
-readability and does not hide information.
-
-void fun(int a, int b, int c)
-{
-	if (condition)
-		printk(KERN_WARNING "Warning this is a long printk with "
-						"3 parameters a: %u b: %u "
-						"c: %u \n", a, b, c);
-	else
-		next_statement;
-}
+Statements longer than 80 columns will be broken into sensible chunks, unless
+exceeding 80 columns significantly increases readability and does not hide
+information. Descendants are always substantially shorter than the parent and
+are placed substantially to the right. The same applies to function headers
+with a long argument list. However, never break user-visible strings such as
+printk messages, because that breaks the ability to grep for them.
+
 
 
 		Chapter 3: Placing Braces and Spaces
 		Chapter 3: Placing Braces and Spaces
 
 

+ 9 - 2
Documentation/acpi/apei/einj.txt

@@ -48,12 +48,19 @@ directory apei/einj. The following files are provided.
 - param1
 - param1
   This file is used to set the first error parameter value. Effect of
   This file is used to set the first error parameter value. Effect of
   parameter depends on error_type specified. For memory error, this is
   parameter depends on error_type specified. For memory error, this is
-  physical memory address.
+  physical memory address.  Only available if param_extension module
+  parameter is specified.
 
 
 - param2
 - param2
   This file is used to set the second error parameter value. Effect of
   This file is used to set the second error parameter value. Effect of
   parameter depends on error_type specified. For memory error, this is
   parameter depends on error_type specified. For memory error, this is
-  physical memory address mask.
+  physical memory address mask.  Only available if param_extension
+  module parameter is specified.
+
+Injecting parameter support is a BIOS version specific extension, that
+is, it only works on some BIOS version.  If you want to use it, please
+make sure your BIOS version has the proper support and specify
+"param_extension=y" in module parameter.
 
 
 For more information about EINJ, please refer to ACPI specification
 For more information about EINJ, please refer to ACPI specification
 version 4.0, section 17.5.
 version 4.0, section 17.5.

+ 20 - 1
Documentation/device-mapper/dm-crypt.txt

@@ -4,7 +4,8 @@ dm-crypt
 Device-Mapper's "crypt" target provides transparent encryption of block devices
 Device-Mapper's "crypt" target provides transparent encryption of block devices
 using the kernel crypto API.
 using the kernel crypto API.
 
 
-Parameters: <cipher> <key> <iv_offset> <device path> <offset>
+Parameters: <cipher> <key> <iv_offset> <device path> \
+	      <offset> [<#opt_params> <opt_params>]
 
 
 <cipher>
 <cipher>
     Encryption cipher and an optional IV generation mode.
     Encryption cipher and an optional IV generation mode.
@@ -37,6 +38,24 @@ Parameters: <cipher> <key> <iv_offset> <device path> <offset>
 <offset>
 <offset>
     Starting sector within the device where the encrypted data begins.
     Starting sector within the device where the encrypted data begins.
 
 
+<#opt_params>
+    Number of optional parameters. If there are no optional parameters,
+    the optional paramaters section can be skipped or #opt_params can be zero.
+    Otherwise #opt_params is the number of following arguments.
+
+    Example of optional parameters section:
+        1 allow_discards
+
+allow_discards
+    Block discard requests (a.k.a. TRIM) are passed through the crypt device.
+    The default is to ignore discard requests.
+
+    WARNING: Assess the specific security risks carefully before enabling this
+    option.  For example, allowing discards on encrypted devices may lead to
+    the leak of information about the ciphertext device (filesystem type,
+    used space etc.) if the discarded blocks can be located easily on the
+    device later.
+
 Example scripts
 Example scripts
 ===============
 ===============
 LUKS (Linux Unified Key Setup) is now the preferred way to set up disk
 LUKS (Linux Unified Key Setup) is now the preferred way to set up disk

+ 42 - 6
Documentation/device-mapper/dm-flakey.txt

@@ -1,17 +1,53 @@
 dm-flakey
 dm-flakey
 =========
 =========
 
 
-This target is the same as the linear target except that it returns I/O
-errors periodically.  It's been found useful in simulating failing
-devices for testing purposes.
+This target is the same as the linear target except that it exhibits
+unreliable behaviour periodically.  It's been found useful in simulating
+failing devices for testing purposes.
 
 
 Starting from the time the table is loaded, the device is available for
 Starting from the time the table is loaded, the device is available for
-<up interval> seconds, then returns errors for <down interval> seconds,
-and then this cycle repeats.
+<up interval> seconds, then exhibits unreliable behaviour for <down
+interval> seconds, and then this cycle repeats.
 
 
-Parameters: <dev path> <offset> <up interval> <down interval>
+Also, consider using this in combination with the dm-delay target too,
+which can delay reads and writes and/or send them to different
+underlying devices.
+
+Table parameters
+----------------
+  <dev path> <offset> <up interval> <down interval> \
+    [<num_features> [<feature arguments>]]
+
+Mandatory parameters:
     <dev path>: Full pathname to the underlying block-device, or a
     <dev path>: Full pathname to the underlying block-device, or a
                 "major:minor" device-number.
                 "major:minor" device-number.
     <offset>: Starting sector within the device.
     <offset>: Starting sector within the device.
     <up interval>: Number of seconds device is available.
     <up interval>: Number of seconds device is available.
     <down interval>: Number of seconds device returns errors.
     <down interval>: Number of seconds device returns errors.
+
+Optional feature parameters:
+  If no feature parameters are present, during the periods of
+  unreliability, all I/O returns errors.
+
+  drop_writes:
+	All write I/O is silently ignored.
+	Read I/O is handled correctly.
+
+  corrupt_bio_byte <Nth_byte> <direction> <value> <flags>:
+	During <down interval>, replace <Nth_byte> of the data of
+	each matching bio with <value>.
+
+    <Nth_byte>: The offset of the byte to replace.
+		Counting starts at 1, to replace the first byte.
+    <direction>: Either 'r' to corrupt reads or 'w' to corrupt writes.
+		 'w' is incompatible with drop_writes.
+    <value>: The value (from 0-255) to write.
+    <flags>: Perform the replacement only if bio->bi_rw has all the
+	     selected flags set.
+
+Examples:
+  corrupt_bio_byte 32 r 1 0
+	- replaces the 32nd byte of READ bios with the value 1
+
+  corrupt_bio_byte 224 w 0 32
+	- replaces the 224th byte of REQ_META (=32) bios with the value 0

+ 88 - 50
Documentation/device-mapper/dm-raid.txt

@@ -1,70 +1,108 @@
-Device-mapper RAID (dm-raid) is a bridge from DM to MD.  It
-provides a way to use device-mapper interfaces to access the MD RAID
-drivers.
+dm-raid
+-------
 
 
-As with all device-mapper targets, the nominal public interfaces are the
-constructor (CTR) tables and the status outputs (both STATUSTYPE_INFO
-and STATUSTYPE_TABLE).  The CTR table looks like the following:
+The device-mapper RAID (dm-raid) target provides a bridge from DM to MD.
+It allows the MD RAID drivers to be accessed using a device-mapper
+interface.
 
 
-1: <s> <l> raid \
-2:      <raid_type> <#raid_params> <raid_params> \
-3:      <#raid_devs> <meta_dev1> <dev1> .. <meta_devN> <devN>
-
-Line 1 contains the standard first three arguments to any device-mapper
-target - the start, length, and target type fields.  The target type in
-this case is "raid".
-
-Line 2 contains the arguments that define the particular raid
-type/personality/level, the required arguments for that raid type, and
-any optional arguments.  Possible raid types include: raid4, raid5_la,
-raid5_ls, raid5_rs, raid6_zr, raid6_nr, and raid6_nc.  (raid1 is
-planned for the future.)  The list of required and optional parameters
-is the same for all the current raid types.  The required parameters are
-positional, while the optional parameters are given as key/value pairs.
-The possible parameters are as follows:
- <chunk_size>           Chunk size in sectors.
- [[no]sync]             Force/Prevent RAID initialization
- [rebuild <idx>]        Rebuild the drive indicated by the index
- [daemon_sleep <ms>]    Time between bitmap daemon work to clear bits
- [min_recovery_rate <kB/sec/disk>]      Throttle RAID initialization
- [max_recovery_rate <kB/sec/disk>]      Throttle RAID initialization
- [max_write_behind <sectors>]           See '-write-behind=' (man mdadm)
- [stripe_cache <sectors>]               Stripe cache size for higher RAIDs
-
-Line 3 contains the list of devices that compose the array in
-metadata/data device pairs.  If the metadata is stored separately, a '-'
-is given for the metadata device position.  If a drive has failed or is
-missing at creation time, a '-' can be given for both the metadata and
-data drives for a given position.
-
-NB. Currently all metadata devices must be specified as '-'.
-
-Examples:
-# RAID4 - 4 data drives, 1 parity
+The target is named "raid" and it accepts the following parameters:
+
+  <raid_type> <#raid_params> <raid_params> \
+    <#raid_devs> <metadata_dev0> <dev0> [.. <metadata_devN> <devN>]
+
+<raid_type>:
+  raid1		RAID1 mirroring
+  raid4		RAID4 dedicated parity disk
+  raid5_la	RAID5 left asymmetric
+		- rotating parity 0 with data continuation
+  raid5_ra	RAID5 right asymmetric
+		- rotating parity N with data continuation
+  raid5_ls	RAID5 left symmetric
+		- rotating parity 0 with data restart
+  raid5_rs 	RAID5 right symmetric
+		- rotating parity N with data restart
+  raid6_zr	RAID6 zero restart
+		- rotating parity zero (left-to-right) with data restart
+  raid6_nr	RAID6 N restart
+		- rotating parity N (right-to-left) with data restart
+  raid6_nc	RAID6 N continue
+		- rotating parity N (right-to-left) with data continuation
+
+  Refererence: Chapter 4 of
+  http://www.snia.org/sites/default/files/SNIA_DDF_Technical_Position_v2.0.pdf
+
+<#raid_params>: The number of parameters that follow.
+
+<raid_params> consists of
+    Mandatory parameters:
+        <chunk_size>: Chunk size in sectors.  This parameter is often known as
+		      "stripe size".  It is the only mandatory parameter and
+		      is placed first.
+
+    followed by optional parameters (in any order):
+	[sync|nosync]   Force or prevent RAID initialization.
+
+	[rebuild <idx>]	Rebuild drive number idx (first drive is 0).
+
+	[daemon_sleep <ms>]
+		Interval between runs of the bitmap daemon that
+		clear bits.  A longer interval means less bitmap I/O but
+		resyncing after a failure is likely to take longer.
+
+	[min_recovery_rate <kB/sec/disk>]  Throttle RAID initialization
+	[max_recovery_rate <kB/sec/disk>]  Throttle RAID initialization
+	[write_mostly <idx>]		   Drive index is write-mostly
+	[max_write_behind <sectors>]       See '-write-behind=' (man mdadm)
+	[stripe_cache <sectors>]           Stripe cache size (higher RAIDs only)
+	[region_size <sectors>]
+		The region_size multiplied by the number of regions is the
+		logical size of the array.  The bitmap records the device
+		synchronisation state for each region.
+
+<#raid_devs>: The number of devices composing the array.
+	Each device consists of two entries.  The first is the device
+	containing the metadata (if any); the second is the one containing the
+	data.
+
+	If a drive has failed or is missing at creation time, a '-' can be
+	given for both the metadata and data drives for a given position.
+
+
+Example tables
+--------------
+# RAID4 - 4 data drives, 1 parity (no metadata devices)
 # No metadata devices specified to hold superblock/bitmap info
 # No metadata devices specified to hold superblock/bitmap info
 # Chunk size of 1MiB
 # Chunk size of 1MiB
 # (Lines separated for easy reading)
 # (Lines separated for easy reading)
+
 0 1960893648 raid \
 0 1960893648 raid \
         raid4 1 2048 \
         raid4 1 2048 \
         5 - 8:17 - 8:33 - 8:49 - 8:65 - 8:81
         5 - 8:17 - 8:33 - 8:49 - 8:65 - 8:81
 
 
-# RAID4 - 4 data drives, 1 parity (no metadata devices)
+# RAID4 - 4 data drives, 1 parity (with metadata devices)
 # Chunk size of 1MiB, force RAID initialization,
 # Chunk size of 1MiB, force RAID initialization,
 #       min recovery rate at 20 kiB/sec/disk
 #       min recovery rate at 20 kiB/sec/disk
+
 0 1960893648 raid \
 0 1960893648 raid \
-        raid4 4 2048 min_recovery_rate 20 sync\
-        5 - 8:17 - 8:33 - 8:49 - 8:65 - 8:81
+        raid4 4 2048 sync min_recovery_rate 20 \
+        5 8:17 8:18 8:33 8:34 8:49 8:50 8:65 8:66 8:81 8:82
 
 
-Performing a 'dmsetup table' should display the CTR table used to
-construct the mapping (with possible reordering of optional
-parameters).
+'dmsetup table' displays the table used to construct the mapping.
+The optional parameters are always printed in the order listed
+above with "sync" or "nosync" always output ahead of the other
+arguments, regardless of the order used when originally loading the table.
+Arguments that can be repeated are ordered by value.
 
 
-Performing a 'dmsetup status' will yield information on the state and
-health of the array.  The output is as follows:
+'dmsetup status' yields information on the state and health of the
+array.
+The output is as follows:
 1: <s> <l> raid \
 1: <s> <l> raid \
 2:      <raid_type> <#devices> <1 health char for each dev> <resync_ratio>
 2:      <raid_type> <#devices> <1 health char for each dev> <resync_ratio>
 
 
-Line 1 is standard DM output.  Line 2 is best shown by example:
+Line 1 is the standard output produced by device-mapper.
+Line 2 is produced by the raid target, and best explained by example:
         0 1960893648 raid raid4 5 AAAAA 2/490221568
         0 1960893648 raid raid4 5 AAAAA 2/490221568
 Here we can see the RAID type is raid4, there are 5 devices - all of
 Here we can see the RAID type is raid4, there are 5 devices - all of
 which are 'A'live, and the array is 2/490221568 complete with recovery.
 which are 'A'live, and the array is 2/490221568 complete with recovery.
+Faulty or missing devices are marked 'D'.  Devices that are out-of-sync
+are marked 'a'.

+ 1 - 1
Documentation/devicetree/bindings/gpio/gpio_keys.txt

@@ -10,7 +10,7 @@ Optional properties:
 Each button (key) is represented as a sub-node of "gpio-keys":
 Each button (key) is represented as a sub-node of "gpio-keys":
 Subnode properties:
 Subnode properties:
 
 
-	- gpios: OF devcie-tree gpio specificatin.
+	- gpios: OF device-tree gpio specification.
 	- label: Descriptive name of the key.
 	- label: Descriptive name of the key.
 	- linux,code: Keycode to emit.
 	- linux,code: Keycode to emit.
 
 

+ 11 - 0
Documentation/devicetree/bindings/input/fsl-mma8450.txt

@@ -0,0 +1,11 @@
+* Freescale MMA8450 3-Axis Accelerometer
+
+Required properties:
+- compatible : "fsl,mma8450".
+
+Example:
+
+accelerometer: mma8450@1c {
+	compatible = "fsl,mma8450";
+	reg = <0x1c>;
+};

+ 164 - 70
Documentation/dmaengine.txt

@@ -10,87 +10,181 @@ NOTE: For DMA Engine usage in async_tx please see:
 Below is a guide to device driver writers on how to use the Slave-DMA API of the
 Below is a guide to device driver writers on how to use the Slave-DMA API of the
 DMA Engine. This is applicable only for slave DMA usage only.
 DMA Engine. This is applicable only for slave DMA usage only.
 
 
-The slave DMA usage consists of following steps
+The slave DMA usage consists of following steps:
 1. Allocate a DMA slave channel
 1. Allocate a DMA slave channel
 2. Set slave and controller specific parameters
 2. Set slave and controller specific parameters
 3. Get a descriptor for transaction
 3. Get a descriptor for transaction
-4. Submit the transaction and wait for callback notification
+4. Submit the transaction
+5. Issue pending requests and wait for callback notification
 
 
 1. Allocate a DMA slave channel
 1. Allocate a DMA slave channel
-Channel allocation is slightly different in the slave DMA context, client
-drivers typically need a channel from a particular DMA controller only and even
-in some cases a specific channel is desired. To request a channel
-dma_request_channel() API is used.
-
-Interface:
-struct dma_chan *dma_request_channel(dma_cap_mask_t mask,
-		dma_filter_fn filter_fn,
-		void *filter_param);
-where dma_filter_fn is defined as:
-typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
-
-When the optional 'filter_fn' parameter is set to NULL dma_request_channel
-simply returns the first channel that satisfies the capability mask.  Otherwise,
-when the mask parameter is insufficient for specifying the necessary channel,
-the filter_fn routine can be used to disposition the available channels in the
-system. The filter_fn routine is called once for each free channel in the
-system.  Upon seeing a suitable channel filter_fn returns DMA_ACK which flags
-that channel to be the return value from dma_request_channel.  A channel
-allocated via this interface is exclusive to the caller, until
-dma_release_channel() is called.
+
+   Channel allocation is slightly different in the slave DMA context,
+   client drivers typically need a channel from a particular DMA
+   controller only and even in some cases a specific channel is desired.
+   To request a channel dma_request_channel() API is used.
+
+   Interface:
+	struct dma_chan *dma_request_channel(dma_cap_mask_t mask,
+			dma_filter_fn filter_fn,
+			void *filter_param);
+   where dma_filter_fn is defined as:
+	typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
+
+   The 'filter_fn' parameter is optional, but highly recommended for
+   slave and cyclic channels as they typically need to obtain a specific
+   DMA channel.
+
+   When the optional 'filter_fn' parameter is NULL, dma_request_channel()
+   simply returns the first channel that satisfies the capability mask.
+
+   Otherwise, the 'filter_fn' routine will be called once for each free
+   channel which has a capability in 'mask'.  'filter_fn' is expected to
+   return 'true' when the desired DMA channel is found.
+
+   A channel allocated via this interface is exclusive to the caller,
+   until dma_release_channel() is called.
 
 
 2. Set slave and controller specific parameters
 2. Set slave and controller specific parameters
-Next step is always to pass some specific information to the DMA driver. Most of
-the generic information which a slave DMA can use is in struct dma_slave_config.
-It allows the clients to specify DMA direction, DMA addresses, bus widths, DMA
-burst lengths etc. If some DMA controllers have more parameters to be sent then
-they should try to embed struct dma_slave_config in their controller specific
-structure. That gives flexibility to client to pass more parameters, if
-required.
-
-Interface:
-int dmaengine_slave_config(struct dma_chan *chan,
-					  struct dma_slave_config *config)
+
+   Next step is always to pass some specific information to the DMA
+   driver.  Most of the generic information which a slave DMA can use
+   is in struct dma_slave_config.  This allows the clients to specify
+   DMA direction, DMA addresses, bus widths, DMA burst lengths etc
+   for the peripheral.
+
+   If some DMA controllers have more parameters to be sent then they
+   should try to embed struct dma_slave_config in their controller
+   specific structure. That gives flexibility to client to pass more
+   parameters, if required.
+
+   Interface:
+	int dmaengine_slave_config(struct dma_chan *chan,
+				  struct dma_slave_config *config)
+
+   Please see the dma_slave_config structure definition in dmaengine.h
+   for a detailed explaination of the struct members.  Please note
+   that the 'direction' member will be going away as it duplicates the
+   direction given in the prepare call.
 
 
 3. Get a descriptor for transaction
 3. Get a descriptor for transaction
-For slave usage the various modes of slave transfers supported by the
-DMA-engine are:
-slave_sg	- DMA a list of scatter gather buffers from/to a peripheral
-dma_cyclic	- Perform a cyclic DMA operation from/to a peripheral till the
+
+   For slave usage the various modes of slave transfers supported by the
+   DMA-engine are:
+
+   slave_sg	- DMA a list of scatter gather buffers from/to a peripheral
+   dma_cyclic	- Perform a cyclic DMA operation from/to a peripheral till the
 		  operation is explicitly stopped.
 		  operation is explicitly stopped.
-The non NULL return of this transfer API represents a "descriptor" for the given
-transaction.
-
-Interface:
-struct dma_async_tx_descriptor *(*chan->device->device_prep_dma_sg)(
-		struct dma_chan *chan,
-		struct scatterlist *dst_sg, unsigned int dst_nents,
-		struct scatterlist *src_sg, unsigned int src_nents,
+
+   A non-NULL return of this transfer API represents a "descriptor" for
+   the given transaction.
+
+   Interface:
+	struct dma_async_tx_descriptor *(*chan->device->device_prep_slave_sg)(
+		struct dma_chan *chan, struct scatterlist *sgl,
+		unsigned int sg_len, enum dma_data_direction direction,
 		unsigned long flags);
 		unsigned long flags);
-struct dma_async_tx_descriptor *(*chan->device->device_prep_dma_cyclic)(
+
+	struct dma_async_tx_descriptor *(*chan->device->device_prep_dma_cyclic)(
 		struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
 		struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
 		size_t period_len, enum dma_data_direction direction);
 		size_t period_len, enum dma_data_direction direction);
 
 
-4. Submit the transaction and wait for callback notification
-To schedule the transaction to be scheduled by dma device, the "descriptor"
-returned in above (3) needs to be submitted.
-To tell the dma driver that a transaction is ready to be serviced, the
-descriptor->submit() callback needs to be invoked. This chains the descriptor to
-the pending queue.
-The transactions in the pending queue can be activated by calling the
-issue_pending API. If channel is idle then the first transaction in queue is
-started and subsequent ones queued up.
-On completion of the DMA operation the next in queue is submitted and a tasklet
-triggered. The tasklet would then call the client driver completion callback
-routine for notification, if set.
-Interface:
-void dma_async_issue_pending(struct dma_chan *chan);
-
-==============================================================================
-
-Additional usage notes for dma driver writers
-1/ Although DMA engine specifies that completion callback routines cannot submit
-any new operations, but typically for slave DMA subsequent transaction may not
-be available for submit prior to callback routine being called. This requirement
-is not a requirement for DMA-slave devices. But they should take care to drop
-the spin-lock they might be holding before calling the callback routine
+   The peripheral driver is expected to have mapped the scatterlist for
+   the DMA operation prior to calling device_prep_slave_sg, and must
+   keep the scatterlist mapped until the DMA operation has completed.
+   The scatterlist must be mapped using the DMA struct device.  So,
+   normal setup should look like this:
+
+	nr_sg = dma_map_sg(chan->device->dev, sgl, sg_len);
+	if (nr_sg == 0)
+		/* error */
+
+	desc = chan->device->device_prep_slave_sg(chan, sgl, nr_sg,
+			direction, flags);
+
+   Once a descriptor has been obtained, the callback information can be
+   added and the descriptor must then be submitted.  Some DMA engine
+   drivers may hold a spinlock between a successful preparation and
+   submission so it is important that these two operations are closely
+   paired.
+
+   Note:
+	Although the async_tx API specifies that completion callback
+	routines cannot submit any new operations, this is not the
+	case for slave/cyclic DMA.
+
+	For slave DMA, the subsequent transaction may not be available
+	for submission prior to callback function being invoked, so
+	slave DMA callbacks are permitted to prepare and submit a new
+	transaction.
+
+	For cyclic DMA, a callback function may wish to terminate the
+	DMA via dmaengine_terminate_all().
+
+	Therefore, it is important that DMA engine drivers drop any
+	locks before calling the callback function which may cause a
+	deadlock.
+
+	Note that callbacks will always be invoked from the DMA
+	engines tasklet, never from interrupt context.
+
+4. Submit the transaction
+
+   Once the descriptor has been prepared and the callback information
+   added, it must be placed on the DMA engine drivers pending queue.
+
+   Interface:
+	dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
+
+   This returns a cookie can be used to check the progress of DMA engine
+   activity via other DMA engine calls not covered in this document.
+
+   dmaengine_submit() will not start the DMA operation, it merely adds
+   it to the pending queue.  For this, see step 5, dma_async_issue_pending.
+
+5. Issue pending DMA requests and wait for callback notification
+
+   The transactions in the pending queue can be activated by calling the
+   issue_pending API. If channel is idle then the first transaction in
+   queue is started and subsequent ones queued up.
+
+   On completion of each DMA operation, the next in queue is started and
+   a tasklet triggered. The tasklet will then call the client driver
+   completion callback routine for notification, if set.
+
+   Interface:
+	void dma_async_issue_pending(struct dma_chan *chan);
+
+Further APIs:
+
+1. int dmaengine_terminate_all(struct dma_chan *chan)
+
+   This causes all activity for the DMA channel to be stopped, and may
+   discard data in the DMA FIFO which hasn't been fully transferred.
+   No callback functions will be called for any incomplete transfers.
+
+2. int dmaengine_pause(struct dma_chan *chan)
+
+   This pauses activity on the DMA channel without data loss.
+
+3. int dmaengine_resume(struct dma_chan *chan)
+
+   Resume a previously paused DMA channel.  It is invalid to resume a
+   channel which is not currently paused.
+
+4. enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
+        dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
+
+   This can be used to check the status of the channel.  Please see
+   the documentation in include/linux/dmaengine.h for a more complete
+   description of this API.
+
+   This can be used in conjunction with dma_async_is_complete() and
+   the cookie returned from 'descriptor->submit()' to check for
+   completion of a specific DMA transaction.
+
+   Note:
+	Not all DMA engine drivers can return reliable information for
+	a running DMA channel.  It is recommended that DMA engine users
+	pause or stop (via dmaengine_terminate_all) the channel before
+	using this API.

+ 1 - 2
Documentation/fault-injection/fault-injection.txt

@@ -143,8 +143,7 @@ o provide a way to configure fault attributes
   failslab, fail_page_alloc, and fail_make_request use this way.
   failslab, fail_page_alloc, and fail_make_request use this way.
   Helper functions:
   Helper functions:
 
 
-	init_fault_attr_dentries(entries, attr, name);
-	void cleanup_fault_attr_dentries(entries);
+	fault_create_debugfs_attr(name, parent, attr);
 
 
 - module parameters
 - module parameters
 
 

+ 11 - 9
Documentation/feature-removal-schedule.txt

@@ -296,15 +296,6 @@ Who:	Ravikiran Thirumalai <kiran@scalex86.org>
 
 
 ---------------------------
 ---------------------------
 
 
-What:	CONFIG_THERMAL_HWMON
-When:	January 2009
-Why:	This option was introduced just to allow older lm-sensors userspace
-	to keep working over the upgrade to 2.6.26. At the scheduled time of
-	removal fixed lm-sensors (2.x or 3.x) should be readily available.
-Who:	Rene Herman <rene.herman@gmail.com>
-
----------------------------
-
 What:	Code that is now under CONFIG_WIRELESS_EXT_SYSFS
 What:	Code that is now under CONFIG_WIRELESS_EXT_SYSFS
 	(in net/core/net-sysfs.c)
 	(in net/core/net-sysfs.c)
 When:	After the only user (hal) has seen a release with the patches
 When:	After the only user (hal) has seen a release with the patches
@@ -590,3 +581,14 @@ Why:	This driver has been superseded by g_mass_storage.
 Who:	Alan Stern <stern@rowland.harvard.edu>
 Who:	Alan Stern <stern@rowland.harvard.edu>
 
 
 ----------------------------
 ----------------------------
+
+What:   threeg and interface sysfs files in /sys/devices/platform/acer-wmi
+When:   2012
+Why:    In 3.0, we can now autodetect internal 3G device and already have
+	the threeg rfkill device. So, we plan to remove threeg sysfs support
+	for it's no longer necessary.
+
+	We also plan to remove interface sysfs file that exposed which ACPI-WMI
+	interface that was used by acer-wmi driver. It will replaced by
+	information log when acer-wmi initial.
+Who:    Lee, Chun-Yi <jlee@novell.com>

+ 10 - 3
Documentation/frv/booting.txt

@@ -106,13 +106,20 @@ separated by spaces:
       To use the first on-chip serial port at baud rate 115200, no parity, 8
       To use the first on-chip serial port at baud rate 115200, no parity, 8
       bits, and no flow control.
       bits, and no flow control.
 
 
-  (*) root=/dev/<xxxx>
+  (*) root=<xxxx>
 
 
-      This specifies the device upon which the root filesystem resides. For
-      example:
+      This specifies the device upon which the root filesystem resides. It
+      may be specified by major and minor number, device path, or even
+      partition uuid, if supported.  For example:
 
 
 	/dev/nfs	NFS root filesystem
 	/dev/nfs	NFS root filesystem
 	/dev/mtdblock3	Fourth RedBoot partition on the System Flash
 	/dev/mtdblock3	Fourth RedBoot partition on the System Flash
+	PARTUUID=00112233-4455-6677-8899-AABBCCDDEEFF/PARTNROFF=1
+		first partition after the partition with the given UUID
+	253:0		Device with major 253 and minor 0
+
+      Authoritative information can be found in
+      "Documentation/kernel-parameters.txt".
 
 
   (*) rw
   (*) rw
 
 

+ 1 - 0
Documentation/ioctl/ioctl-number.txt

@@ -292,6 +292,7 @@ Code  Seq#(hex)	Include File		Comments
 					<mailto:buk@buks.ipn.de>
 					<mailto:buk@buks.ipn.de>
 0xA0	all	linux/sdp/sdp.h		Industrial Device Project
 0xA0	all	linux/sdp/sdp.h		Industrial Device Project
 					<mailto:kenji@bitgate.com>
 					<mailto:kenji@bitgate.com>
+0xA2	00-0F	arch/tile/include/asm/hardwall.h
 0xA3	80-8F	Port ACL		in development:
 0xA3	80-8F	Port ACL		in development:
 					<mailto:tlewis@mindspring.com>
 					<mailto:tlewis@mindspring.com>
 0xA3	90-9F	linux/dtlk.h
 0xA3	90-9F	linux/dtlk.h

+ 11 - 0
Documentation/kernel-parameters.txt

@@ -163,6 +163,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 
 
 			See also Documentation/power/pm.txt, pci=noacpi
 			See also Documentation/power/pm.txt, pci=noacpi
 
 
+	acpi_rsdp=	[ACPI,EFI,KEXEC]
+			Pass the RSDP address to the kernel, mostly used
+			on machines running EFI runtime service to boot the
+			second kernel for kdump.
+
 	acpi_apic_instance=	[ACPI, IOAPIC]
 	acpi_apic_instance=	[ACPI, IOAPIC]
 			Format: <int>
 			Format: <int>
 			2: use 2nd APIC table, if available
 			2: use 2nd APIC table, if available
@@ -546,6 +551,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 			/proc/<pid>/coredump_filter.
 			/proc/<pid>/coredump_filter.
 			See also Documentation/filesystems/proc.txt.
 			See also Documentation/filesystems/proc.txt.
 
 
+	cpuidle.off=1	[CPU_IDLE]
+			disable the cpuidle sub-system
+
 	cpcihp_generic=	[HW,PCI] Generic port I/O CompactPCI driver
 	cpcihp_generic=	[HW,PCI] Generic port I/O CompactPCI driver
 			Format:
 			Format:
 			<first_slot>,<last_slot>,<port>,<enum_bit>[,<debug>]
 			<first_slot>,<last_slot>,<port>,<enum_bit>[,<debug>]
@@ -2153,6 +2161,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 			[HW,MOUSE] Controls Logitech smartscroll autorepeat.
 			[HW,MOUSE] Controls Logitech smartscroll autorepeat.
 			0 = disabled, 1 = enabled (default).
 			0 = disabled, 1 = enabled (default).
 
 
+	pstore.backend=	Specify the name of the pstore backend to use
+
 	pt.		[PARIDE]
 	pt.		[PARIDE]
 			See Documentation/blockdev/paride.txt.
 			See Documentation/blockdev/paride.txt.
 
 
@@ -2238,6 +2248,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 	ro		[KNL] Mount root device read-only on boot
 	ro		[KNL] Mount root device read-only on boot
 
 
 	root=		[KNL] Root filesystem
 	root=		[KNL] Root filesystem
+			See name_to_dev_t comment in init/do_mounts.c.
 
 
 	rootdelay=	[KNL] Delay (in seconds) to pause before attempting to
 	rootdelay=	[KNL] Delay (in seconds) to pause before attempting to
 			mount the root filesystem
 			mount the root filesystem

+ 14 - 0
Documentation/m68k/kernel-options.txt

@@ -129,6 +129,20 @@ decimal 11 is the major of SCSI CD-ROMs, and the minor 0 stands for
 the first of these. You can find out all valid major numbers by
 the first of these. You can find out all valid major numbers by
 looking into include/linux/major.h.
 looking into include/linux/major.h.
 
 
+In addition to major and minor numbers, if the device containing your
+root partition uses a partition table format with unique partition
+identifiers, then you may use them.  For instance,
+"root=PARTUUID=00112233-4455-6677-8899-AABBCCDDEEFF".  It is also
+possible to reference another partition on the same device using a
+known partition UUID as the starting point.  For example,
+if partition 5 of the device has the UUID of
+00112233-4455-6677-8899-AABBCCDDEEFF then partition 3 may be found as
+follows:
+  PARTUUID=00112233-4455-6677-8899-AABBCCDDEEFF/PARTNROFF=-2
+
+Authoritative information can be found in
+"Documentation/kernel-parameters.txt".
+
 
 
 2.2) ro, rw
 2.2) ro, rw
 -----------
 -----------

+ 1 - 1
Documentation/networking/bonding.txt

@@ -599,7 +599,7 @@ num_unsol_na
 	affect only the active-backup mode.  These options were added for
 	affect only the active-backup mode.  These options were added for
 	bonding versions 3.3.0 and 3.4.0 respectively.
 	bonding versions 3.3.0 and 3.4.0 respectively.
 
 
-	From Linux 2.6.40 and bonding version 3.7.1, these notifications
+	From Linux 3.0 and bonding version 3.7.1, these notifications
 	are generated by the ipv4 and ipv6 code and the numbers of
 	are generated by the ipv4 and ipv6 code and the numbers of
 	repetitions cannot be set independently.
 	repetitions cannot be set independently.
 
 

+ 5 - 5
Documentation/power/runtime_pm.txt

@@ -54,11 +54,10 @@ referred to as subsystem-level callbacks in what follows.
 By default, the callbacks are always invoked in process context with interrupts
 By default, the callbacks are always invoked in process context with interrupts
 enabled.  However, subsystems can use the pm_runtime_irq_safe() helper function
 enabled.  However, subsystems can use the pm_runtime_irq_safe() helper function
 to tell the PM core that a device's ->runtime_suspend() and ->runtime_resume()
 to tell the PM core that a device's ->runtime_suspend() and ->runtime_resume()
-callbacks should be invoked in atomic context with interrupts disabled
-(->runtime_idle() is still invoked the default way).  This implies that these
-callback routines must not block or sleep, but it also means that the
-synchronous helper functions listed at the end of Section 4 can be used within
-an interrupt handler or in an atomic context.
+callbacks should be invoked in atomic context with interrupts disabled.
+This implies that these callback routines must not block or sleep, but it also
+means that the synchronous helper functions listed at the end of Section 4 can
+be used within an interrupt handler or in an atomic context.
 
 
 The subsystem-level suspend callback is _entirely_ _responsible_ for handling
 The subsystem-level suspend callback is _entirely_ _responsible_ for handling
 the suspend of the device as appropriate, which may, but need not include
 the suspend of the device as appropriate, which may, but need not include
@@ -483,6 +482,7 @@ pm_runtime_suspend()
 pm_runtime_autosuspend()
 pm_runtime_autosuspend()
 pm_runtime_resume()
 pm_runtime_resume()
 pm_runtime_get_sync()
 pm_runtime_get_sync()
+pm_runtime_put_sync()
 pm_runtime_put_sync_suspend()
 pm_runtime_put_sync_suspend()
 
 
 5. Runtime PM Initialization, Device Probing and Removal
 5. Runtime PM Initialization, Device Probing and Removal

+ 12 - 6
MAINTAINERS

@@ -2643,9 +2643,8 @@ S:	Maintained
 F:	arch/x86/math-emu/
 F:	arch/x86/math-emu/
 
 
 FRAME RELAY DLCI/FRAD (Sangoma drivers too)
 FRAME RELAY DLCI/FRAD (Sangoma drivers too)
-M:	Mike McLagan <mike.mclagan@linux.org>
 L:	netdev@vger.kernel.org
 L:	netdev@vger.kernel.org
-S:	Maintained
+S:	Orphan
 F:	drivers/net/wan/dlci.c
 F:	drivers/net/wan/dlci.c
 F:	drivers/net/wan/sdla.c
 F:	drivers/net/wan/sdla.c
 
 
@@ -3367,6 +3366,12 @@ F:	drivers/net/ixgb/
 F:	drivers/net/ixgbe/
 F:	drivers/net/ixgbe/
 F:	drivers/net/ixgbevf/
 F:	drivers/net/ixgbevf/
 
 
+INTEL MRST PMU DRIVER
+M:	Len Brown <len.brown@intel.com>
+L:	linux-pm@lists.linux-foundation.org
+S:	Supported
+F:	arch/x86/platform/mrst/pmu.*
+
 INTEL PRO/WIRELESS 2100 NETWORK CONNECTION SUPPORT
 INTEL PRO/WIRELESS 2100 NETWORK CONNECTION SUPPORT
 L:	linux-wireless@vger.kernel.org
 L:	linux-wireless@vger.kernel.org
 S:	Orphan
 S:	Orphan
@@ -4409,10 +4414,10 @@ F:	net/*/netfilter/
 F:	net/netfilter/
 F:	net/netfilter/
 
 
 NETLABEL
 NETLABEL
-M:	Paul Moore <paul.moore@hp.com>
+M:	Paul Moore <paul@paul-moore.com>
 W:	http://netlabel.sf.net
 W:	http://netlabel.sf.net
 L:	netdev@vger.kernel.org
 L:	netdev@vger.kernel.org
-S:	Supported
+S:	Maintained
 F:	Documentation/netlabel/
 F:	Documentation/netlabel/
 F:	include/net/netlabel.h
 F:	include/net/netlabel.h
 F:	net/netlabel/
 F:	net/netlabel/
@@ -4457,7 +4462,6 @@ F:	include/linux/netdevice.h
 NETWORKING [IPv4/IPv6]
 NETWORKING [IPv4/IPv6]
 M:	"David S. Miller" <davem@davemloft.net>
 M:	"David S. Miller" <davem@davemloft.net>
 M:	Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
 M:	Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
-M:	"Pekka Savola (ipv6)" <pekkas@netcore.fi>
 M:	James Morris <jmorris@namei.org>
 M:	James Morris <jmorris@namei.org>
 M:	Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
 M:	Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
 M:	Patrick McHardy <kaber@trash.net>
 M:	Patrick McHardy <kaber@trash.net>
@@ -4470,7 +4474,7 @@ F:	include/net/ip*
 F:	arch/x86/net/*
 F:	arch/x86/net/*
 
 
 NETWORKING [LABELED] (NetLabel, CIPSO, Labeled IPsec, SECMARK)
 NETWORKING [LABELED] (NetLabel, CIPSO, Labeled IPsec, SECMARK)
-M:	Paul Moore <paul.moore@hp.com>
+M:	Paul Moore <paul@paul-moore.com>
 L:	netdev@vger.kernel.org
 L:	netdev@vger.kernel.org
 S:	Maintained
 S:	Maintained
 
 
@@ -4722,6 +4726,7 @@ S:	Maintained
 F:	drivers/of
 F:	drivers/of
 F:	include/linux/of*.h
 F:	include/linux/of*.h
 K:	of_get_property
 K:	of_get_property
+K:	of_match_table
 
 
 OPENRISC ARCHITECTURE
 OPENRISC ARCHITECTURE
 M:	Jonas Bonn <jonas@southpole.se>
 M:	Jonas Bonn <jonas@southpole.se>
@@ -6318,6 +6323,7 @@ F:	include/linux/sysv_fs.h
 TARGET SUBSYSTEM
 TARGET SUBSYSTEM
 M:	Nicholas A. Bellinger <nab@linux-iscsi.org>
 M:	Nicholas A. Bellinger <nab@linux-iscsi.org>
 L:	linux-scsi@vger.kernel.org
 L:	linux-scsi@vger.kernel.org
+L:	target-devel@vger.kernel.org
 L:	http://groups.google.com/group/linux-iscsi-target-dev
 L:	http://groups.google.com/group/linux-iscsi-target-dev
 W:	http://www.linux-iscsi.org
 W:	http://www.linux-iscsi.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/nab/lio-core-2.6.git master
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/nab/lio-core-2.6.git master

+ 2 - 2
Makefile

@@ -1,7 +1,7 @@
 VERSION = 3
 VERSION = 3
-PATCHLEVEL = 0
+PATCHLEVEL = 1
 SUBLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc1
 NAME = Sneaky Weasel
 NAME = Sneaky Weasel
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*

+ 3 - 0
arch/Kconfig

@@ -178,4 +178,7 @@ config HAVE_ARCH_MUTEX_CPU_RELAX
 config HAVE_RCU_TABLE_FREE
 config HAVE_RCU_TABLE_FREE
 	bool
 	bool
 
 
+config ARCH_HAVE_NMI_SAFE_CMPXCHG
+	bool
+
 source "kernel/gcov/Kconfig"
 source "kernel/gcov/Kconfig"

+ 1 - 0
arch/alpha/Kconfig

@@ -14,6 +14,7 @@ config ALPHA
 	select AUTO_IRQ_AFFINITY if SMP
 	select AUTO_IRQ_AFFINITY if SMP
 	select GENERIC_IRQ_SHOW
 	select GENERIC_IRQ_SHOW
 	select ARCH_WANT_OPTIONAL_GPIOLIB
 	select ARCH_WANT_OPTIONAL_GPIOLIB
+	select ARCH_HAVE_NMI_SAFE_CMPXCHG
 	help
 	help
 	  The Alpha is a 64-bit general-purpose processor designed and
 	  The Alpha is a 64-bit general-purpose processor designed and
 	  marketed by the Digital Equipment Corporation of blessed memory,
 	  marketed by the Digital Equipment Corporation of blessed memory,

+ 0 - 3
arch/arm/kernel/armksyms.c

@@ -112,9 +112,6 @@ EXPORT_SYMBOL(__put_user_4);
 EXPORT_SYMBOL(__put_user_8);
 EXPORT_SYMBOL(__put_user_8);
 #endif
 #endif
 
 
-	/* crypto hash */
-EXPORT_SYMBOL(sha_transform);
-
 	/* gcc lib functions */
 	/* gcc lib functions */
 EXPORT_SYMBOL(__ashldi3);
 EXPORT_SYMBOL(__ashldi3);
 EXPORT_SYMBOL(__ashrdi3);
 EXPORT_SYMBOL(__ashrdi3);

+ 3 - 1
arch/arm/kernel/process.c

@@ -30,6 +30,7 @@
 #include <linux/uaccess.h>
 #include <linux/uaccess.h>
 #include <linux/random.h>
 #include <linux/random.h>
 #include <linux/hw_breakpoint.h>
 #include <linux/hw_breakpoint.h>
+#include <linux/cpuidle.h>
 
 
 #include <asm/cacheflush.h>
 #include <asm/cacheflush.h>
 #include <asm/leds.h>
 #include <asm/leds.h>
@@ -196,7 +197,8 @@ void cpu_idle(void)
 				cpu_relax();
 				cpu_relax();
 			} else {
 			} else {
 				stop_critical_timings();
 				stop_critical_timings();
-				pm_idle();
+				if (cpuidle_idle_call())
+					pm_idle();
 				start_critical_timings();
 				start_critical_timings();
 				/*
 				/*
 				 * This will eventually be removed - pm_idle
 				 * This will eventually be removed - pm_idle

+ 1 - 1
arch/arm/lib/Makefile

@@ -12,7 +12,7 @@ lib-y		:= backtrace.o changebit.o csumipv6.o csumpartial.o   \
 		   strchr.o strrchr.o                                 \
 		   strchr.o strrchr.o                                 \
 		   testchangebit.o testclearbit.o testsetbit.o        \
 		   testchangebit.o testclearbit.o testsetbit.o        \
 		   ashldi3.o ashrdi3.o lshrdi3.o muldi3.o             \
 		   ashldi3.o ashrdi3.o lshrdi3.o muldi3.o             \
-		   ucmpdi2.o lib1funcs.o div64.o sha1.o               \
+		   ucmpdi2.o lib1funcs.o div64.o                      \
 		   io-readsb.o io-writesb.o io-readsl.o io-writesl.o
 		   io-readsb.o io-writesb.o io-readsl.o io-writesl.o
 
 
 mmu-y	:= clear_user.o copy_page.o getuser.o putuser.o
 mmu-y	:= clear_user.o copy_page.o getuser.o putuser.o

+ 0 - 211
arch/arm/lib/sha1.S

@@ -1,211 +0,0 @@
-/*
- *  linux/arch/arm/lib/sha1.S
- *
- *  SHA transform optimized for ARM
- *
- *  Copyright:	(C) 2005 by Nicolas Pitre <nico@fluxnic.net>
- *  Created:	September 17, 2005
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License version 2 as
- *  published by the Free Software Foundation.
- *
- *  The reference implementation for this code is linux/lib/sha1.c
- */
-
-#include <linux/linkage.h>
-
-	.text
-
-
-/*
- * void sha_transform(__u32 *digest, const char *in, __u32 *W)
- *
- * Note: the "in" ptr may be unaligned.
- */
-
-ENTRY(sha_transform)
-
-	stmfd	sp!, {r4 - r8, lr}
-
-	@ for (i = 0; i < 16; i++)
-	@         W[i] = be32_to_cpu(in[i]);
-
-#ifdef __ARMEB__
-	mov	r4, r0
-	mov	r0, r2
-	mov	r2, #64
-	bl	memcpy
-	mov	r2, r0
-	mov	r0, r4
-#else
-	mov	r3, r2
-	mov	lr, #16
-1:	ldrb	r4, [r1], #1
-	ldrb	r5, [r1], #1
-	ldrb	r6, [r1], #1
-	ldrb	r7, [r1], #1
-	subs	lr, lr, #1
-	orr	r5, r5, r4, lsl #8
-	orr	r6, r6, r5, lsl #8
-	orr	r7, r7, r6, lsl #8
-	str	r7, [r3], #4
-	bne	1b
-#endif
-
-	@ for (i = 0; i < 64; i++)
-	@         W[i+16] = ror(W[i+13] ^ W[i+8] ^ W[i+2] ^ W[i], 31);
-
-	sub	r3, r2, #4
-	mov	lr, #64
-2:	ldr	r4, [r3, #4]!
-	subs	lr, lr, #1
-	ldr	r5, [r3, #8]
-	ldr	r6, [r3, #32]
-	ldr	r7, [r3, #52]
-	eor	r4, r4, r5
-	eor	r4, r4, r6
-	eor	r4, r4, r7
-	mov	r4, r4, ror #31
-	str	r4, [r3, #64]
-	bne	2b
-
-	/*
-	 * The SHA functions are:
-	 *
-	 * f1(B,C,D) = (D ^ (B & (C ^ D)))
-	 * f2(B,C,D) = (B ^ C ^ D)
-	 * f3(B,C,D) = ((B & C) | (D & (B | C)))
-	 *
-	 * Then the sub-blocks are processed as follows:
-	 *
-	 * A' = ror(A, 27) + f(B,C,D) + E + K + *W++
-	 * B' = A
-	 * C' = ror(B, 2)
-	 * D' = C
-	 * E' = D
-	 *
-	 * We therefore unroll each loop 5 times to avoid register shuffling.
-	 * Also the ror for C (and also D and E which are successivelyderived
-	 * from it) is applied in place to cut on an additional mov insn for
-	 * each round.
-	 */
-
-	.macro	sha_f1, A, B, C, D, E
-	ldr	r3, [r2], #4
-	eor	ip, \C, \D
-	add	\E, r1, \E, ror #2
-	and	ip, \B, ip, ror #2
-	add	\E, \E, \A, ror #27
-	eor	ip, ip, \D, ror #2
-	add	\E, \E, r3
-	add	\E, \E, ip
-	.endm
-
-	.macro	sha_f2, A, B, C, D, E
-	ldr	r3, [r2], #4
-	add	\E, r1, \E, ror #2
-	eor	ip, \B, \C, ror #2
-	add	\E, \E, \A, ror #27
-	eor	ip, ip, \D, ror #2
-	add	\E, \E, r3
-	add	\E, \E, ip
-	.endm
-
-	.macro	sha_f3, A, B, C, D, E
-	ldr	r3, [r2], #4
-	add	\E, r1, \E, ror #2
-	orr	ip, \B, \C, ror #2
-	add	\E, \E, \A, ror #27
-	and	ip, ip, \D, ror #2
-	add	\E, \E, r3
-	and	r3, \B, \C, ror #2
-	orr	ip, ip, r3
-	add	\E, \E, ip
-	.endm
-
-	ldmia	r0, {r4 - r8}
-
-	mov	lr, #4
-	ldr	r1, .L_sha_K + 0
-
-	/* adjust initial values */
-	mov	r6, r6, ror #30
-	mov	r7, r7, ror #30
-	mov	r8, r8, ror #30
-
-3:	subs	lr, lr, #1
-	sha_f1	r4, r5, r6, r7, r8
-	sha_f1	r8, r4, r5, r6, r7
-	sha_f1	r7, r8, r4, r5, r6
-	sha_f1	r6, r7, r8, r4, r5
-	sha_f1	r5, r6, r7, r8, r4
-	bne	3b
-
-	ldr	r1, .L_sha_K + 4
-	mov	lr, #4
-
-4:	subs	lr, lr, #1
-	sha_f2	r4, r5, r6, r7, r8
-	sha_f2	r8, r4, r5, r6, r7
-	sha_f2	r7, r8, r4, r5, r6
-	sha_f2	r6, r7, r8, r4, r5
-	sha_f2	r5, r6, r7, r8, r4
-	bne	4b
-
-	ldr	r1, .L_sha_K + 8
-	mov	lr, #4
-
-5:	subs	lr, lr, #1
-	sha_f3	r4, r5, r6, r7, r8
-	sha_f3	r8, r4, r5, r6, r7
-	sha_f3	r7, r8, r4, r5, r6
-	sha_f3	r6, r7, r8, r4, r5
-	sha_f3	r5, r6, r7, r8, r4
-	bne	5b
-
-	ldr	r1, .L_sha_K + 12
-	mov	lr, #4
-
-6:	subs	lr, lr, #1
-	sha_f2	r4, r5, r6, r7, r8
-	sha_f2	r8, r4, r5, r6, r7
-	sha_f2	r7, r8, r4, r5, r6
-	sha_f2	r6, r7, r8, r4, r5
-	sha_f2	r5, r6, r7, r8, r4
-	bne	6b
-
-	ldmia	r0, {r1, r2, r3, ip, lr}
-	add	r4, r1, r4
-	add	r5, r2, r5
-	add	r6, r3, r6, ror #2
-	add	r7, ip, r7, ror #2
-	add	r8, lr, r8, ror #2
-	stmia	r0, {r4 - r8}
-
-	ldmfd	sp!, {r4 - r8, pc}
-
-ENDPROC(sha_transform)
-
-	.align	2
-.L_sha_K:
-	.word	0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6
-
-
-/*
- * void sha_init(__u32 *buf)
- */
-
-	.align	2
-.L_sha_initial_digest:
-	.word	0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476, 0xc3d2e1f0
-
-ENTRY(sha_init)
-
-	str	lr, [sp, #-4]!
-	adr	r1, .L_sha_initial_digest
-	ldmia	r1, {r1, r2, r3, ip, lr}
-	stmia	r0, {r1, r2, r3, ip, lr}
-	ldr	pc, [sp], #4
-
-ENDPROC(sha_init)

+ 1 - 0
arch/avr32/Kconfig

@@ -10,6 +10,7 @@ config AVR32
 	select GENERIC_IRQ_PROBE
 	select GENERIC_IRQ_PROBE
 	select HARDIRQS_SW_RESEND
 	select HARDIRQS_SW_RESEND
 	select GENERIC_IRQ_SHOW
 	select GENERIC_IRQ_SHOW
+	select ARCH_HAVE_NMI_SAFE_CMPXCHG
 	help
 	help
 	  AVR32 is a high-performance 32-bit RISC microprocessor core,
 	  AVR32 is a high-performance 32-bit RISC microprocessor core,
 	  designed for cost-sensitive embedded applications, with particular
 	  designed for cost-sensitive embedded applications, with particular

+ 3 - 3
arch/cris/arch-v10/drivers/sync_serial.c

@@ -158,7 +158,7 @@ static int sync_serial_open(struct inode *inode, struct file *file);
 static int sync_serial_release(struct inode *inode, struct file *file);
 static int sync_serial_release(struct inode *inode, struct file *file);
 static unsigned int sync_serial_poll(struct file *filp, poll_table *wait);
 static unsigned int sync_serial_poll(struct file *filp, poll_table *wait);
 
 
-static int sync_serial_ioctl(struct file *file,
+static long sync_serial_ioctl(struct file *file,
 	unsigned int cmd, unsigned long arg);
 	unsigned int cmd, unsigned long arg);
 static ssize_t sync_serial_write(struct file *file, const char *buf,
 static ssize_t sync_serial_write(struct file *file, const char *buf,
 	size_t count, loff_t *ppos);
 	size_t count, loff_t *ppos);
@@ -625,11 +625,11 @@ static int sync_serial_open(struct inode *inode, struct file *file)
 			*R_IRQ_MASK1_SET = 1 << port->data_avail_bit;
 			*R_IRQ_MASK1_SET = 1 << port->data_avail_bit;
 		DEBUG(printk(KERN_DEBUG "sser%d rec started\n", dev));
 		DEBUG(printk(KERN_DEBUG "sser%d rec started\n", dev));
 	}
 	}
-	ret = 0;
+	err = 0;
 	
 	
 out:
 out:
 	mutex_unlock(&sync_serial_mutex);
 	mutex_unlock(&sync_serial_mutex);
-	return ret;
+	return err;
 }
 }
 
 
 static int sync_serial_release(struct inode *inode, struct file *file)
 static int sync_serial_release(struct inode *inode, struct file *file)

+ 3 - 0
arch/cris/arch-v10/kernel/irq.c

@@ -20,6 +20,9 @@
 #define crisv10_mask_irq(irq_nr) (*R_VECT_MASK_CLR = 1 << (irq_nr));
 #define crisv10_mask_irq(irq_nr) (*R_VECT_MASK_CLR = 1 << (irq_nr));
 #define crisv10_unmask_irq(irq_nr) (*R_VECT_MASK_SET = 1 << (irq_nr));
 #define crisv10_unmask_irq(irq_nr) (*R_VECT_MASK_SET = 1 << (irq_nr));
 
 
+extern void kgdb_init(void);
+extern void breakpoint(void);
+
 /* don't use set_int_vector, it bypasses the linux interrupt handlers. it is
 /* don't use set_int_vector, it bypasses the linux interrupt handlers. it is
  * global just so that the kernel gdb can use it.
  * global just so that the kernel gdb can use it.
  */
  */

+ 3 - 3
arch/cris/include/asm/thread_info.h

@@ -11,8 +11,6 @@
 
 
 #ifdef __KERNEL__
 #ifdef __KERNEL__
 
 
-#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
-
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__
 #include <asm/types.h>
 #include <asm/types.h>
 #include <asm/processor.h>
 #include <asm/processor.h>
@@ -67,8 +65,10 @@ struct thread_info {
 
 
 #define init_thread_info	(init_thread_union.thread_info)
 #define init_thread_info	(init_thread_union.thread_info)
 
 
+#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
 /* thread information allocation */
 /* thread information allocation */
-#define alloc_thread_info(tsk, node) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
+#define alloc_thread_info_node(tsk, node)	\
+	((struct thread_info *) __get_free_pages(GFP_KERNEL, 1))
 #define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
 #define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
 
 
 #endif /* !__ASSEMBLY__ */
 #endif /* !__ASSEMBLY__ */

+ 1 - 0
arch/frv/Kconfig

@@ -7,6 +7,7 @@ config FRV
 	select HAVE_PERF_EVENTS
 	select HAVE_PERF_EVENTS
 	select HAVE_GENERIC_HARDIRQS
 	select HAVE_GENERIC_HARDIRQS
 	select GENERIC_IRQ_SHOW
 	select GENERIC_IRQ_SHOW
+	select ARCH_HAVE_NMI_SAFE_CMPXCHG
 
 
 config ZONE_DMA
 config ZONE_DMA
 	bool
 	bool

+ 5 - 0
arch/ia64/Kconfig

@@ -27,6 +27,8 @@ config IA64
 	select GENERIC_PENDING_IRQ if SMP
 	select GENERIC_PENDING_IRQ if SMP
 	select IRQ_PER_CPU
 	select IRQ_PER_CPU
 	select GENERIC_IRQ_SHOW
 	select GENERIC_IRQ_SHOW
+	select ARCH_WANT_OPTIONAL_GPIOLIB
+	select ARCH_HAVE_NMI_SAFE_CMPXCHG
 	default y
 	default y
 	help
 	help
 	  The Itanium Processor Family is Intel's 64-bit successor to
 	  The Itanium Processor Family is Intel's 64-bit successor to
@@ -89,6 +91,9 @@ config GENERIC_TIME_VSYSCALL
 config HAVE_SETUP_PER_CPU_AREA
 config HAVE_SETUP_PER_CPU_AREA
 	def_bool y
 	def_bool y
 
 
+config GENERIC_GPIO
+	def_bool y
+
 config DMI
 config DMI
 	bool
 	bool
 	default y
 	default y

+ 55 - 0
arch/ia64/include/asm/gpio.h

@@ -0,0 +1,55 @@
+/*
+ * Generic GPIO API implementation for IA-64.
+ *
+ * A stright copy of that for PowerPC which was:
+ *
+ * Copyright (c) 2007-2008  MontaVista Software, Inc.
+ *
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _ASM_IA64_GPIO_H
+#define _ASM_IA64_GPIO_H
+
+#include <linux/errno.h>
+#include <asm-generic/gpio.h>
+
+#ifdef CONFIG_GPIOLIB
+
+/*
+ * We don't (yet) implement inlined/rapid versions for on-chip gpios.
+ * Just call gpiolib.
+ */
+static inline int gpio_get_value(unsigned int gpio)
+{
+	return __gpio_get_value(gpio);
+}
+
+static inline void gpio_set_value(unsigned int gpio, int value)
+{
+	__gpio_set_value(gpio, value);
+}
+
+static inline int gpio_cansleep(unsigned int gpio)
+{
+	return __gpio_cansleep(gpio);
+}
+
+static inline int gpio_to_irq(unsigned int gpio)
+{
+	return __gpio_to_irq(gpio);
+}
+
+static inline int irq_to_gpio(unsigned int irq)
+{
+	return -EINVAL;
+}
+
+#endif /* CONFIG_GPIOLIB */
+
+#endif /* _ASM_IA64_GPIO_H */

+ 1 - 1
arch/ia64/kernel/efi.c

@@ -156,7 +156,7 @@ prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name,      \
 #define STUB_SET_VARIABLE(prefix, adjust_arg)				       \
 #define STUB_SET_VARIABLE(prefix, adjust_arg)				       \
 static efi_status_t							       \
 static efi_status_t							       \
 prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor,		       \
 prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor,		       \
-		       unsigned long attr, unsigned long data_size,	       \
+		       u32 attr, unsigned long data_size,		       \
 		       void *data)					       \
 		       void *data)					       \
 {									       \
 {									       \
 	struct ia64_fpreg fr[6];					       \
 	struct ia64_fpreg fr[6];					       \

+ 1 - 0
arch/m68k/Kconfig

@@ -6,6 +6,7 @@ config M68K
 	select GENERIC_ATOMIC64 if MMU
 	select GENERIC_ATOMIC64 if MMU
 	select HAVE_GENERIC_HARDIRQS if !MMU
 	select HAVE_GENERIC_HARDIRQS if !MMU
 	select GENERIC_IRQ_SHOW if !MMU
 	select GENERIC_IRQ_SHOW if !MMU
+	select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
 
 
 config RWSEM_GENERIC_SPINLOCK
 config RWSEM_GENERIC_SPINLOCK
 	bool
 	bool

+ 1 - 0
arch/parisc/Kconfig

@@ -15,6 +15,7 @@ config PARISC
 	select HAVE_GENERIC_HARDIRQS
 	select HAVE_GENERIC_HARDIRQS
 	select GENERIC_IRQ_PROBE
 	select GENERIC_IRQ_PROBE
 	select IRQ_PER_CPU
 	select IRQ_PER_CPU
+	select ARCH_HAVE_NMI_SAFE_CMPXCHG
 
 
 	help
 	help
 	  The PA-RISC microprocessor is designed by Hewlett-Packard and used
 	  The PA-RISC microprocessor is designed by Hewlett-Packard and used

+ 2 - 2
arch/parisc/include/asm/atomic.h

@@ -258,10 +258,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
 
 
 #define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
 #define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
 
 
-static __inline__ int
+static __inline__ s64
 __atomic64_add_return(s64 i, atomic64_t *v)
 __atomic64_add_return(s64 i, atomic64_t *v)
 {
 {
-	int ret;
+	s64 ret;
 	unsigned long flags;
 	unsigned long flags;
 	_atomic_spin_lock_irqsave(v, flags);
 	_atomic_spin_lock_irqsave(v, flags);
 
 

+ 60 - 6
arch/parisc/include/asm/futex.h

@@ -5,11 +5,14 @@
 
 
 #include <linux/futex.h>
 #include <linux/futex.h>
 #include <linux/uaccess.h>
 #include <linux/uaccess.h>
+#include <asm/atomic.h>
 #include <asm/errno.h>
 #include <asm/errno.h>
 
 
 static inline int
 static inline int
 futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
 futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
 {
 {
+	unsigned long int flags;
+	u32 val;
 	int op = (encoded_op >> 28) & 7;
 	int op = (encoded_op >> 28) & 7;
 	int cmp = (encoded_op >> 24) & 15;
 	int cmp = (encoded_op >> 24) & 15;
 	int oparg = (encoded_op << 8) >> 20;
 	int oparg = (encoded_op << 8) >> 20;
@@ -18,21 +21,58 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
 	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
 	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
 		oparg = 1 << oparg;
 		oparg = 1 << oparg;
 
 
-	if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
+	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr)))
 		return -EFAULT;
 		return -EFAULT;
 
 
 	pagefault_disable();
 	pagefault_disable();
 
 
+	_atomic_spin_lock_irqsave(uaddr, flags);
+
 	switch (op) {
 	switch (op) {
 	case FUTEX_OP_SET:
 	case FUTEX_OP_SET:
+		/* *(int *)UADDR2 = OPARG; */
+		ret = get_user(oldval, uaddr);
+		if (!ret)
+			ret = put_user(oparg, uaddr);
+		break;
 	case FUTEX_OP_ADD:
 	case FUTEX_OP_ADD:
+		/* *(int *)UADDR2 += OPARG; */
+		ret = get_user(oldval, uaddr);
+		if (!ret) {
+			val = oldval + oparg;
+			ret = put_user(val, uaddr);
+		}
+		break;
 	case FUTEX_OP_OR:
 	case FUTEX_OP_OR:
+		/* *(int *)UADDR2 |= OPARG; */
+		ret = get_user(oldval, uaddr);
+		if (!ret) {
+			val = oldval | oparg;
+			ret = put_user(val, uaddr);
+		}
+		break;
 	case FUTEX_OP_ANDN:
 	case FUTEX_OP_ANDN:
+		/* *(int *)UADDR2 &= ~OPARG; */
+		ret = get_user(oldval, uaddr);
+		if (!ret) {
+			val = oldval & ~oparg;
+			ret = put_user(val, uaddr);
+		}
+		break;
 	case FUTEX_OP_XOR:
 	case FUTEX_OP_XOR:
+		/* *(int *)UADDR2 ^= OPARG; */
+		ret = get_user(oldval, uaddr);
+		if (!ret) {
+			val = oldval ^ oparg;
+			ret = put_user(val, uaddr);
+		}
+		break;
 	default:
 	default:
 		ret = -ENOSYS;
 		ret = -ENOSYS;
 	}
 	}
 
 
+	_atomic_spin_unlock_irqrestore(uaddr, flags);
+
 	pagefault_enable();
 	pagefault_enable();
 
 
 	if (!ret) {
 	if (!ret) {
@@ -54,7 +94,9 @@ static inline int
 futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 			      u32 oldval, u32 newval)
 			      u32 oldval, u32 newval)
 {
 {
+	int ret;
 	u32 val;
 	u32 val;
+	unsigned long flags;
 
 
 	/* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is
 	/* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is
 	 * our gateway page, and causes no end of trouble...
 	 * our gateway page, and causes no end of trouble...
@@ -65,12 +107,24 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
 	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
 		return -EFAULT;
 		return -EFAULT;
 
 
-	if (get_user(val, uaddr))
-		return -EFAULT;
-	if (val == oldval && put_user(newval, uaddr))
-		return -EFAULT;
+	/* HPPA has no cmpxchg in hardware and therefore the
+	 * best we can do here is use an array of locks. The
+	 * lock selected is based on a hash of the userspace
+	 * address. This should scale to a couple of CPUs.
+	 */
+
+	_atomic_spin_lock_irqsave(uaddr, flags);
+
+	ret = get_user(val, uaddr);
+
+	if (!ret && val == oldval)
+		ret = put_user(newval, uaddr);
+
 	*uval = val;
 	*uval = val;
-	return 0;
+
+	_atomic_spin_unlock_irqrestore(uaddr, flags);
+
+	return ret;
 }
 }
 
 
 #endif /*__KERNEL__*/
 #endif /*__KERNEL__*/

+ 2 - 1
arch/parisc/include/asm/unistd.h

@@ -821,8 +821,9 @@
 #define __NR_open_by_handle_at	(__NR_Linux + 326)
 #define __NR_open_by_handle_at	(__NR_Linux + 326)
 #define __NR_syncfs		(__NR_Linux + 327)
 #define __NR_syncfs		(__NR_Linux + 327)
 #define __NR_setns		(__NR_Linux + 328)
 #define __NR_setns		(__NR_Linux + 328)
+#define __NR_sendmmsg		(__NR_Linux + 329)
 
 
-#define __NR_Linux_syscalls	(__NR_setns + 1)
+#define __NR_Linux_syscalls	(__NR_sendmmsg + 1)
 
 
 
 
 #define __IGNORE_select		/* newselect */
 #define __IGNORE_select		/* newselect */

+ 1 - 0
arch/parisc/kernel/syscall_table.S

@@ -427,6 +427,7 @@
 	ENTRY_COMP(open_by_handle_at)
 	ENTRY_COMP(open_by_handle_at)
 	ENTRY_SAME(syncfs)
 	ENTRY_SAME(syncfs)
 	ENTRY_SAME(setns)
 	ENTRY_SAME(setns)
+	ENTRY_COMP(sendmmsg)
 
 
 	/* Nothing yet */
 	/* Nothing yet */
 
 

+ 1 - 0
arch/powerpc/Kconfig

@@ -136,6 +136,7 @@ config PPC
 	select HAVE_SYSCALL_TRACEPOINTS
 	select HAVE_SYSCALL_TRACEPOINTS
 	select HAVE_BPF_JIT if (PPC64 && NET)
 	select HAVE_BPF_JIT if (PPC64 && NET)
 	select HAVE_ARCH_JUMP_LABEL
 	select HAVE_ARCH_JUMP_LABEL
+	select ARCH_HAVE_NMI_SAFE_CMPXCHG
 
 
 config EARLY_PRINTK
 config EARLY_PRINTK
 	bool
 	bool

+ 5 - 4
arch/s390/Kconfig

@@ -81,6 +81,7 @@ config S390
 	select INIT_ALL_POSSIBLE
 	select INIT_ALL_POSSIBLE
 	select HAVE_IRQ_WORK
 	select HAVE_IRQ_WORK
 	select HAVE_PERF_EVENTS
 	select HAVE_PERF_EVENTS
+	select ARCH_HAVE_NMI_SAFE_CMPXCHG
 	select HAVE_KERNEL_GZIP
 	select HAVE_KERNEL_GZIP
 	select HAVE_KERNEL_BZIP2
 	select HAVE_KERNEL_BZIP2
 	select HAVE_KERNEL_LZMA
 	select HAVE_KERNEL_LZMA
@@ -273,11 +274,11 @@ config MARCH_Z10
 	  on older machines.
 	  on older machines.
 
 
 config MARCH_Z196
 config MARCH_Z196
-	bool "IBM zEnterprise 196"
+	bool "IBM zEnterprise 114 and 196"
 	help
 	help
-	  Select this to enable optimizations for IBM zEnterprise 196
-	  (2817 series). The kernel will be slightly faster but will not work
-	  on older machines.
+	  Select this to enable optimizations for IBM zEnterprise 114 and 196
+	  (2818 and 2817 series). The kernel will be slightly faster but will
+	  not work on older machines.
 
 
 endchoice
 endchoice
 
 

+ 1 - 0
arch/s390/include/asm/ipl.h

@@ -167,5 +167,6 @@ enum diag308_rc {
 };
 };
 
 
 extern int diag308(unsigned long subcode, void *addr);
 extern int diag308(unsigned long subcode, void *addr);
+extern void diag308_reset(void);
 
 
 #endif /* _ASM_S390_IPL_H */
 #endif /* _ASM_S390_IPL_H */

+ 9 - 2
arch/s390/include/asm/lowcore.h

@@ -18,6 +18,7 @@ void system_call(void);
 void pgm_check_handler(void);
 void pgm_check_handler(void);
 void mcck_int_handler(void);
 void mcck_int_handler(void);
 void io_int_handler(void);
 void io_int_handler(void);
+void psw_restart_int_handler(void);
 
 
 #ifdef CONFIG_32BIT
 #ifdef CONFIG_32BIT
 
 
@@ -150,7 +151,10 @@ struct _lowcore {
 	 */
 	 */
 	__u32	ipib;				/* 0x0e00 */
 	__u32	ipib;				/* 0x0e00 */
 	__u32	ipib_checksum;			/* 0x0e04 */
 	__u32	ipib_checksum;			/* 0x0e04 */
-	__u8	pad_0x0e08[0x0f00-0x0e08];	/* 0x0e08 */
+
+	/* 64 bit save area */
+	__u64	save_area_64;			/* 0x0e08 */
+	__u8	pad_0x0e10[0x0f00-0x0e10];	/* 0x0e10 */
 
 
 	/* Extended facility list */
 	/* Extended facility list */
 	__u64	stfle_fac_list[32];		/* 0x0f00 */
 	__u64	stfle_fac_list[32];		/* 0x0f00 */
@@ -286,7 +290,10 @@ struct _lowcore {
 	 */
 	 */
 	__u64	ipib;				/* 0x0e00 */
 	__u64	ipib;				/* 0x0e00 */
 	__u32	ipib_checksum;			/* 0x0e08 */
 	__u32	ipib_checksum;			/* 0x0e08 */
-	__u8	pad_0x0e0c[0x0f00-0x0e0c];	/* 0x0e0c */
+
+	/* 64 bit save area */
+	__u64	save_area_64;			/* 0x0e0c */
+	__u8	pad_0x0e14[0x0f00-0x0e14];	/* 0x0e14 */
 
 
 	/* Extended facility list */
 	/* Extended facility list */
 	__u64	stfle_fac_list[32];		/* 0x0f00 */
 	__u64	stfle_fac_list[32];		/* 0x0f00 */

+ 0 - 2
arch/s390/include/asm/processor.h

@@ -119,14 +119,12 @@ struct stack_frame {
  * Do necessary setup to start up a new thread.
  * Do necessary setup to start up a new thread.
  */
  */
 #define start_thread(regs, new_psw, new_stackp) do {		\
 #define start_thread(regs, new_psw, new_stackp) do {		\
-	set_fs(USER_DS);					\
 	regs->psw.mask	= psw_user_bits;			\
 	regs->psw.mask	= psw_user_bits;			\
 	regs->psw.addr	= new_psw | PSW_ADDR_AMODE;		\
 	regs->psw.addr	= new_psw | PSW_ADDR_AMODE;		\
 	regs->gprs[15]	= new_stackp;				\
 	regs->gprs[15]	= new_stackp;				\
 } while (0)
 } while (0)
 
 
 #define start_thread31(regs, new_psw, new_stackp) do {		\
 #define start_thread31(regs, new_psw, new_stackp) do {		\
-	set_fs(USER_DS);					\
 	regs->psw.mask	= psw_user32_bits;			\
 	regs->psw.mask	= psw_user32_bits;			\
 	regs->psw.addr	= new_psw | PSW_ADDR_AMODE;		\
 	regs->psw.addr	= new_psw | PSW_ADDR_AMODE;		\
 	regs->gprs[15]	= new_stackp;				\
 	regs->gprs[15]	= new_stackp;				\

+ 1 - 0
arch/s390/include/asm/system.h

@@ -113,6 +113,7 @@ extern void pfault_fini(void);
 
 
 extern void cmma_init(void);
 extern void cmma_init(void);
 extern int memcpy_real(void *, void *, size_t);
 extern int memcpy_real(void *, void *, size_t);
+extern void copy_to_absolute_zero(void *dest, void *src, size_t count);
 
 
 #define finish_arch_switch(prev) do {					     \
 #define finish_arch_switch(prev) do {					     \
 	set_fs(current->thread.mm_segment);				     \
 	set_fs(current->thread.mm_segment);				     \

+ 4 - 6
arch/s390/kernel/asm-offsets.c

@@ -27,12 +27,9 @@ int main(void)
 	BLANK();
 	BLANK();
 	DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
 	DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
 	BLANK();
 	BLANK();
-	DEFINE(__THREAD_per_cause,
-	       offsetof(struct task_struct, thread.per_event.cause));
-	DEFINE(__THREAD_per_address,
-	       offsetof(struct task_struct, thread.per_event.address));
-	DEFINE(__THREAD_per_paid,
-	       offsetof(struct task_struct, thread.per_event.paid));
+	DEFINE(__THREAD_per_cause, offsetof(struct task_struct, thread.per_event.cause));
+	DEFINE(__THREAD_per_address, offsetof(struct task_struct, thread.per_event.address));
+	DEFINE(__THREAD_per_paid, offsetof(struct task_struct, thread.per_event.paid));
 	BLANK();
 	BLANK();
 	DEFINE(__TI_task, offsetof(struct thread_info, task));
 	DEFINE(__TI_task, offsetof(struct thread_info, task));
 	DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain));
 	DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain));
@@ -142,6 +139,7 @@ int main(void)
 	DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area));
 	DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area));
 	DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area));
 	DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area));
 	DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area));
 	DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area));
+	DEFINE(__LC_SAVE_AREA_64, offsetof(struct _lowcore, save_area_64));
 #ifdef CONFIG_32BIT
 #ifdef CONFIG_32BIT
 	DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr));
 	DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr));
 #else /* CONFIG_32BIT */
 #else /* CONFIG_32BIT */

+ 36 - 0
arch/s390/kernel/base.S

@@ -76,6 +76,42 @@ s390_base_pgm_handler_fn:
 	.quad	0
 	.quad	0
 	.previous
 	.previous
 
 
+#
+# Calls diag 308 subcode 1 and continues execution
+#
+# The following conditions must be ensured before calling this function:
+# * Prefix register = 0
+# * Lowcore protection is disabled
+#
+ENTRY(diag308_reset)
+	larl	%r4,.Lctlregs		# Save control registers
+	stctg	%c0,%c15,0(%r4)
+	larl	%r4,.Lrestart_psw	# Setup restart PSW at absolute 0
+	lghi	%r3,0
+	lg	%r4,0(%r4)		# Save PSW
+	sturg	%r4,%r3			# Use sturg, because of large pages
+	lghi	%r1,1
+	diag	%r1,%r1,0x308
+.Lrestart_part2:
+	lhi	%r0,0			# Load r0 with zero
+	lhi	%r1,2			# Use mode 2 = ESAME (dump)
+	sigp	%r1,%r0,0x12		# Switch to ESAME mode
+	sam64				# Switch to 64 bit addressing mode
+	larl	%r4,.Lctlregs		# Restore control registers
+	lctlg	%c0,%c15,0(%r4)
+	br	%r14
+.align 16
+.Lrestart_psw:
+	.long	0x00080000,0x80000000 + .Lrestart_part2
+
+	.section .bss
+.align 8
+.Lctlregs:
+	.rept	16
+	.quad	0
+	.endr
+	.previous
+
 #else /* CONFIG_64BIT */
 #else /* CONFIG_64BIT */
 
 
 ENTRY(s390_base_mcck_handler)
 ENTRY(s390_base_mcck_handler)

+ 12 - 31
arch/s390/kernel/compat_signal.c

@@ -380,20 +380,13 @@ asmlinkage long sys32_sigreturn(void)
 		goto badframe;
 		goto badframe;
 	if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
 	if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
 		goto badframe;
 		goto badframe;
-
 	sigdelsetmask(&set, ~_BLOCKABLE);
 	sigdelsetmask(&set, ~_BLOCKABLE);
-	spin_lock_irq(&current->sighand->siglock);
-	current->blocked = set;
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
-
+	set_current_blocked(&set);
 	if (restore_sigregs32(regs, &frame->sregs))
 	if (restore_sigregs32(regs, &frame->sregs))
 		goto badframe;
 		goto badframe;
 	if (restore_sigregs_gprs_high(regs, frame->gprs_high))
 	if (restore_sigregs_gprs_high(regs, frame->gprs_high))
 		goto badframe;
 		goto badframe;
-
 	return regs->gprs[2];
 	return regs->gprs[2];
-
 badframe:
 badframe:
 	force_sig(SIGSEGV, current);
 	force_sig(SIGSEGV, current);
 	return 0;
 	return 0;
@@ -413,31 +406,22 @@ asmlinkage long sys32_rt_sigreturn(void)
 		goto badframe;
 		goto badframe;
 	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
 	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
 		goto badframe;
 		goto badframe;
-
 	sigdelsetmask(&set, ~_BLOCKABLE);
 	sigdelsetmask(&set, ~_BLOCKABLE);
-	spin_lock_irq(&current->sighand->siglock);
-	current->blocked = set;
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
-
+	set_current_blocked(&set);
 	if (restore_sigregs32(regs, &frame->uc.uc_mcontext))
 	if (restore_sigregs32(regs, &frame->uc.uc_mcontext))
 		goto badframe;
 		goto badframe;
 	if (restore_sigregs_gprs_high(regs, frame->gprs_high))
 	if (restore_sigregs_gprs_high(regs, frame->gprs_high))
 		goto badframe;
 		goto badframe;
-
 	err = __get_user(ss_sp, &frame->uc.uc_stack.ss_sp);
 	err = __get_user(ss_sp, &frame->uc.uc_stack.ss_sp);
 	st.ss_sp = compat_ptr(ss_sp);
 	st.ss_sp = compat_ptr(ss_sp);
 	err |= __get_user(st.ss_size, &frame->uc.uc_stack.ss_size);
 	err |= __get_user(st.ss_size, &frame->uc.uc_stack.ss_size);
 	err |= __get_user(st.ss_flags, &frame->uc.uc_stack.ss_flags);
 	err |= __get_user(st.ss_flags, &frame->uc.uc_stack.ss_flags);
 	if (err)
 	if (err)
 		goto badframe; 
 		goto badframe; 
-
 	set_fs (KERNEL_DS);
 	set_fs (KERNEL_DS);
 	do_sigaltstack((stack_t __force __user *)&st, NULL, regs->gprs[15]);
 	do_sigaltstack((stack_t __force __user *)&st, NULL, regs->gprs[15]);
 	set_fs (old_fs);
 	set_fs (old_fs);
-
 	return regs->gprs[2];
 	return regs->gprs[2];
-
 badframe:
 badframe:
 	force_sig(SIGSEGV, current);
 	force_sig(SIGSEGV, current);
 	return 0;
 	return 0;
@@ -605,10 +589,10 @@ give_sigsegv:
  * OK, we're invoking a handler
  * OK, we're invoking a handler
  */	
  */	
 
 
-int
-handle_signal32(unsigned long sig, struct k_sigaction *ka,
-		siginfo_t *info, sigset_t *oldset, struct pt_regs * regs)
+int handle_signal32(unsigned long sig, struct k_sigaction *ka,
+		    siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
 {
 {
+	sigset_t blocked;
 	int ret;
 	int ret;
 
 
 	/* Set up the stack frame */
 	/* Set up the stack frame */
@@ -616,15 +600,12 @@ handle_signal32(unsigned long sig, struct k_sigaction *ka,
 		ret = setup_rt_frame32(sig, ka, info, oldset, regs);
 		ret = setup_rt_frame32(sig, ka, info, oldset, regs);
 	else
 	else
 		ret = setup_frame32(sig, ka, oldset, regs);
 		ret = setup_frame32(sig, ka, oldset, regs);
-
-	if (ret == 0) {
-		spin_lock_irq(&current->sighand->siglock);
-		sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
-		if (!(ka->sa.sa_flags & SA_NODEFER))
-			sigaddset(&current->blocked,sig);
-		recalc_sigpending();
-		spin_unlock_irq(&current->sighand->siglock);
-	}
-	return ret;
+	if (ret)
+		return ret;
+	sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
+	if (!(ka->sa.sa_flags & SA_NODEFER))
+		sigaddset(&blocked, sig);
+	set_current_blocked(&blocked);
+	return 0;
 }
 }
 
 

+ 28 - 0
arch/s390/kernel/entry.S

@@ -849,6 +849,34 @@ restart_crash:
 restart_go:
 restart_go:
 #endif
 #endif
 
 
+#
+# PSW restart interrupt handler
+#
+ENTRY(psw_restart_int_handler)
+	st	%r15,__LC_SAVE_AREA_64(%r0)	# save r15
+	basr	%r15,0
+0:	l	%r15,.Lrestart_stack-0b(%r15)	# load restart stack
+	l	%r15,0(%r15)
+	ahi	%r15,-SP_SIZE			# make room for pt_regs
+	stm	%r0,%r14,SP_R0(%r15)		# store gprs %r0-%r14 to stack
+	mvc	SP_R15(4,%r15),__LC_SAVE_AREA_64(%r0)# store saved %r15 to stack
+	mvc	SP_PSW(8,%r15),__LC_RST_OLD_PSW(%r0) # store restart old psw
+	xc	__SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0
+	basr	%r14,0
+1:	l	%r14,.Ldo_restart-1b(%r14)
+	basr	%r14,%r14
+
+	basr	%r14,0				# load disabled wait PSW if
+2:	lpsw	restart_psw_crash-2b(%r14)	# do_restart returns
+	.align 4
+.Ldo_restart:
+	.long	do_restart
+.Lrestart_stack:
+	.long	restart_stack
+	.align 8
+restart_psw_crash:
+	.long	0x000a0000,0x00000000 + restart_psw_crash
+
 	.section .kprobes.text, "ax"
 	.section .kprobes.text, "ax"
 
 
 #ifdef CONFIG_CHECK_STACK
 #ifdef CONFIG_CHECK_STACK

+ 20 - 0
arch/s390/kernel/entry64.S

@@ -865,6 +865,26 @@ restart_crash:
 restart_go:
 restart_go:
 #endif
 #endif
 
 
+#
+# PSW restart interrupt handler
+#
+ENTRY(psw_restart_int_handler)
+	stg	%r15,__LC_SAVE_AREA_64(%r0)	# save r15
+	larl	%r15,restart_stack		# load restart stack
+	lg	%r15,0(%r15)
+	aghi	%r15,-SP_SIZE			# make room for pt_regs
+	stmg	%r0,%r14,SP_R0(%r15)		# store gprs %r0-%r14 to stack
+	mvc	SP_R15(8,%r15),__LC_SAVE_AREA_64(%r0)# store saved %r15 to stack
+	mvc	SP_PSW(16,%r15),__LC_RST_OLD_PSW(%r0)# store restart old psw
+	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0
+	brasl	%r14,do_restart
+
+	larl	%r14,restart_psw_crash		# load disabled wait PSW if
+	lpswe	0(%r14)				# do_restart returns
+	.align 8
+restart_psw_crash:
+	.quad	0x0002000080000000,0x0000000000000000 + restart_psw_crash
+
 	.section .kprobes.text, "ax"
 	.section .kprobes.text, "ax"
 
 
 #ifdef CONFIG_CHECK_STACK
 #ifdef CONFIG_CHECK_STACK

+ 44 - 1
arch/s390/kernel/ipl.c

@@ -45,11 +45,13 @@
  * - halt
  * - halt
  * - power off
  * - power off
  * - reipl
  * - reipl
+ * - restart
  */
  */
 #define ON_PANIC_STR		"on_panic"
 #define ON_PANIC_STR		"on_panic"
 #define ON_HALT_STR		"on_halt"
 #define ON_HALT_STR		"on_halt"
 #define ON_POFF_STR		"on_poff"
 #define ON_POFF_STR		"on_poff"
 #define ON_REIPL_STR		"on_reboot"
 #define ON_REIPL_STR		"on_reboot"
+#define ON_RESTART_STR		"on_restart"
 
 
 struct shutdown_action;
 struct shutdown_action;
 struct shutdown_trigger {
 struct shutdown_trigger {
@@ -1544,17 +1546,20 @@ static char vmcmd_on_reboot[128];
 static char vmcmd_on_panic[128];
 static char vmcmd_on_panic[128];
 static char vmcmd_on_halt[128];
 static char vmcmd_on_halt[128];
 static char vmcmd_on_poff[128];
 static char vmcmd_on_poff[128];
+static char vmcmd_on_restart[128];
 
 
 DEFINE_IPL_ATTR_STR_RW(vmcmd, on_reboot, "%s\n", "%s\n", vmcmd_on_reboot);
 DEFINE_IPL_ATTR_STR_RW(vmcmd, on_reboot, "%s\n", "%s\n", vmcmd_on_reboot);
 DEFINE_IPL_ATTR_STR_RW(vmcmd, on_panic, "%s\n", "%s\n", vmcmd_on_panic);
 DEFINE_IPL_ATTR_STR_RW(vmcmd, on_panic, "%s\n", "%s\n", vmcmd_on_panic);
 DEFINE_IPL_ATTR_STR_RW(vmcmd, on_halt, "%s\n", "%s\n", vmcmd_on_halt);
 DEFINE_IPL_ATTR_STR_RW(vmcmd, on_halt, "%s\n", "%s\n", vmcmd_on_halt);
 DEFINE_IPL_ATTR_STR_RW(vmcmd, on_poff, "%s\n", "%s\n", vmcmd_on_poff);
 DEFINE_IPL_ATTR_STR_RW(vmcmd, on_poff, "%s\n", "%s\n", vmcmd_on_poff);
+DEFINE_IPL_ATTR_STR_RW(vmcmd, on_restart, "%s\n", "%s\n", vmcmd_on_restart);
 
 
 static struct attribute *vmcmd_attrs[] = {
 static struct attribute *vmcmd_attrs[] = {
 	&sys_vmcmd_on_reboot_attr.attr,
 	&sys_vmcmd_on_reboot_attr.attr,
 	&sys_vmcmd_on_panic_attr.attr,
 	&sys_vmcmd_on_panic_attr.attr,
 	&sys_vmcmd_on_halt_attr.attr,
 	&sys_vmcmd_on_halt_attr.attr,
 	&sys_vmcmd_on_poff_attr.attr,
 	&sys_vmcmd_on_poff_attr.attr,
+	&sys_vmcmd_on_restart_attr.attr,
 	NULL,
 	NULL,
 };
 };
 
 
@@ -1576,6 +1581,8 @@ static void vmcmd_run(struct shutdown_trigger *trigger)
 		cmd = vmcmd_on_halt;
 		cmd = vmcmd_on_halt;
 	else if (strcmp(trigger->name, ON_POFF_STR) == 0)
 	else if (strcmp(trigger->name, ON_POFF_STR) == 0)
 		cmd = vmcmd_on_poff;
 		cmd = vmcmd_on_poff;
+	else if (strcmp(trigger->name, ON_RESTART_STR) == 0)
+		cmd = vmcmd_on_restart;
 	else
 	else
 		return;
 		return;
 
 
@@ -1707,6 +1714,34 @@ static void do_panic(void)
 	stop_run(&on_panic_trigger);
 	stop_run(&on_panic_trigger);
 }
 }
 
 
+/* on restart */
+
+static struct shutdown_trigger on_restart_trigger = {ON_RESTART_STR,
+	&reipl_action};
+
+static ssize_t on_restart_show(struct kobject *kobj,
+			       struct kobj_attribute *attr, char *page)
+{
+	return sprintf(page, "%s\n", on_restart_trigger.action->name);
+}
+
+static ssize_t on_restart_store(struct kobject *kobj,
+				struct kobj_attribute *attr,
+				const char *buf, size_t len)
+{
+	return set_trigger(buf, &on_restart_trigger, len);
+}
+
+static struct kobj_attribute on_restart_attr =
+	__ATTR(on_restart, 0644, on_restart_show, on_restart_store);
+
+void do_restart(void)
+{
+	smp_send_stop();
+	on_restart_trigger.action->fn(&on_restart_trigger);
+	stop_run(&on_restart_trigger);
+}
+
 /* on halt */
 /* on halt */
 
 
 static struct shutdown_trigger on_halt_trigger = {ON_HALT_STR, &stop_action};
 static struct shutdown_trigger on_halt_trigger = {ON_HALT_STR, &stop_action};
@@ -1783,7 +1818,9 @@ static void __init shutdown_triggers_init(void)
 	if (sysfs_create_file(&shutdown_actions_kset->kobj,
 	if (sysfs_create_file(&shutdown_actions_kset->kobj,
 			      &on_poff_attr.attr))
 			      &on_poff_attr.attr))
 		goto fail;
 		goto fail;
-
+	if (sysfs_create_file(&shutdown_actions_kset->kobj,
+			      &on_restart_attr.attr))
+		goto fail;
 	return;
 	return;
 fail:
 fail:
 	panic("shutdown_triggers_init failed\n");
 	panic("shutdown_triggers_init failed\n");
@@ -1959,6 +1996,12 @@ static void do_reset_calls(void)
 {
 {
 	struct reset_call *reset;
 	struct reset_call *reset;
 
 
+#ifdef CONFIG_64BIT
+	if (diag308_set_works) {
+		diag308_reset();
+		return;
+	}
+#endif
 	list_for_each_entry(reset, &rcall, list)
 	list_for_each_entry(reset, &rcall, list)
 		reset->fn();
 		reset->fn();
 }
 }

+ 60 - 20
arch/s390/kernel/reipl64.S

@@ -1,5 +1,5 @@
 /*
 /*
- *    Copyright IBM Corp 2000,2009
+ *    Copyright IBM Corp 2000,2011
  *    Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>,
  *    Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>,
  *		 Denis Joseph Barrow,
  *		 Denis Joseph Barrow,
  */
  */
@@ -7,6 +7,64 @@
 #include <linux/linkage.h>
 #include <linux/linkage.h>
 #include <asm/asm-offsets.h>
 #include <asm/asm-offsets.h>
 
 
+#
+# store_status
+#
+# Prerequisites to run this function:
+# - Prefix register is set to zero
+# - Original prefix register is stored in "dump_prefix_page"
+# - Lowcore protection is off
+#
+ENTRY(store_status)
+	/* Save register one and load save area base */
+	stg	%r1,__LC_SAVE_AREA_64(%r0)
+	lghi	%r1,SAVE_AREA_BASE
+	/* General purpose registers */
+	stmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+	lg	%r2,__LC_SAVE_AREA_64(%r0)
+	stg	%r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1)
+	/* Control registers */
+	stctg	%c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+	/* Access registers */
+	stam	%a0,%a15,__LC_AREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+	/* Floating point registers */
+	std	%f0, 0x00 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+	std	%f1, 0x08 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+	std	%f2, 0x10 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+	std	%f3, 0x18 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+	std	%f4, 0x20 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+	std	%f5, 0x28 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+	std	%f6, 0x30 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+	std	%f7, 0x38 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+	std	%f8, 0x40 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+	std	%f9, 0x48 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+	std	%f10,0x50 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+	std	%f11,0x58 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+	std	%f12,0x60 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+	std	%f13,0x68 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+	std	%f14,0x70 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+	std	%f15,0x78 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+	/* Floating point control register */
+	stfpc	__LC_FP_CREG_SAVE_AREA-SAVE_AREA_BASE(%r1)
+	/* CPU timer */
+	stpt	__LC_CPU_TIMER_SAVE_AREA-SAVE_AREA_BASE(%r1)
+	/* Saved prefix register */
+	larl	%r2,dump_prefix_page
+	mvc	__LC_PREFIX_SAVE_AREA-SAVE_AREA_BASE(4,%r1),0(%r2)
+	/* Clock comparator - seven bytes */
+	larl	%r2,.Lclkcmp
+	stckc	0(%r2)
+	mvc	__LC_CLOCK_COMP_SAVE_AREA-SAVE_AREA_BASE + 1(7,%r1),1(%r2)
+	/* Program status word */
+	epsw	%r2,%r3
+	st	%r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 0(%r1)
+	st	%r3,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 4(%r1)
+	larl	%r2,store_status
+	stg	%r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1)
+	br	%r14
+.align	8
+.Lclkcmp:	.quad	0x0000000000000000
+
 #
 #
 # do_reipl_asm
 # do_reipl_asm
 # Parameter: r2 = schid of reipl device
 # Parameter: r2 = schid of reipl device
@@ -15,22 +73,7 @@
 ENTRY(do_reipl_asm)
 ENTRY(do_reipl_asm)
 		basr	%r13,0
 		basr	%r13,0
 .Lpg0:		lpswe	.Lnewpsw-.Lpg0(%r13)
 .Lpg0:		lpswe	.Lnewpsw-.Lpg0(%r13)
-.Lpg1:		# do store status of all registers
-
-		stg	%r1,.Lregsave-.Lpg0(%r13)
-		lghi	%r1,0x1000
-		stmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-0x1000(%r1)
-		lg	%r0,.Lregsave-.Lpg0(%r13)
-		stg	%r0,__LC_GPREGS_SAVE_AREA-0x1000+8(%r1)
-		stctg	%c0,%c15,__LC_CREGS_SAVE_AREA-0x1000(%r1)
-		stam	%a0,%a15,__LC_AREGS_SAVE_AREA-0x1000(%r1)
-		lg	%r10,.Ldump_pfx-.Lpg0(%r13)
-		mvc	__LC_PREFIX_SAVE_AREA-0x1000(4,%r1),0(%r10)
-		stfpc	__LC_FP_CREG_SAVE_AREA-0x1000(%r1)
-		stckc	.Lclkcmp-.Lpg0(%r13)
-		mvc	__LC_CLOCK_COMP_SAVE_AREA-0x1000(7,%r1),.Lclkcmp-.Lpg0(%r13)
-		stpt	__LC_CPU_TIMER_SAVE_AREA-0x1000(%r1)
-		stg	%r13, __LC_PSW_SAVE_AREA-0x1000+8(%r1)
+.Lpg1:		brasl	%r14,store_status
 
 
 		lctlg	%c6,%c6,.Lall-.Lpg0(%r13)
 		lctlg	%c6,%c6,.Lall-.Lpg0(%r13)
 		lgr	%r1,%r2
 		lgr	%r1,%r2
@@ -67,10 +110,7 @@ ENTRY(do_reipl_asm)
 		st	%r14,.Ldispsw+12-.Lpg0(%r13)
 		st	%r14,.Ldispsw+12-.Lpg0(%r13)
 		lpswe	.Ldispsw-.Lpg0(%r13)
 		lpswe	.Ldispsw-.Lpg0(%r13)
 		.align	8
 		.align	8
-.Lclkcmp:	.quad	0x0000000000000000
 .Lall:		.quad	0x00000000ff000000
 .Lall:		.quad	0x00000000ff000000
-.Ldump_pfx:	.quad	dump_prefix_page
-.Lregsave:	.quad	0x0000000000000000
 		.align	16
 		.align	16
 /*
 /*
  * These addresses have to be 31 bit otherwise
  * These addresses have to be 31 bit otherwise

+ 24 - 1
arch/s390/kernel/setup.c

@@ -346,7 +346,7 @@ setup_lowcore(void)
 	lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
 	lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
 	lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
 	lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
 	lc->restart_psw.addr =
 	lc->restart_psw.addr =
-		PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
+		PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
 	if (user_mode != HOME_SPACE_MODE)
 	if (user_mode != HOME_SPACE_MODE)
 		lc->restart_psw.mask |= PSW_ASC_HOME;
 		lc->restart_psw.mask |= PSW_ASC_HOME;
 	lc->external_new_psw.mask = psw_kernel_bits;
 	lc->external_new_psw.mask = psw_kernel_bits;
@@ -529,6 +529,27 @@ static void __init setup_memory_end(void)
 		memory_end = memory_size;
 		memory_end = memory_size;
 }
 }
 
 
+void *restart_stack __attribute__((__section__(".data")));
+
+/*
+ * Setup new PSW and allocate stack for PSW restart interrupt
+ */
+static void __init setup_restart_psw(void)
+{
+	psw_t psw;
+
+	restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0);
+	restart_stack += ASYNC_SIZE;
+
+	/*
+	 * Setup restart PSW for absolute zero lowcore. This is necesary
+	 * if PSW restart is done on an offline CPU that has lowcore zero
+	 */
+	psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
+	psw.addr = PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
+	copy_to_absolute_zero(&S390_lowcore.restart_psw, &psw, sizeof(psw));
+}
+
 static void __init
 static void __init
 setup_memory(void)
 setup_memory(void)
 {
 {
@@ -731,6 +752,7 @@ static void __init setup_hwcaps(void)
 		strcpy(elf_platform, "z10");
 		strcpy(elf_platform, "z10");
 		break;
 		break;
 	case 0x2817:
 	case 0x2817:
+	case 0x2818:
 		strcpy(elf_platform, "z196");
 		strcpy(elf_platform, "z196");
 		break;
 		break;
 	}
 	}
@@ -792,6 +814,7 @@ setup_arch(char **cmdline_p)
 	setup_addressing_mode();
 	setup_addressing_mode();
 	setup_memory();
 	setup_memory();
 	setup_resources();
 	setup_resources();
+	setup_restart_psw();
 	setup_lowcore();
 	setup_lowcore();
 
 
         cpu_init();
         cpu_init();

+ 19 - 42
arch/s390/kernel/signal.c

@@ -57,17 +57,15 @@ typedef struct
  */
  */
 SYSCALL_DEFINE3(sigsuspend, int, history0, int, history1, old_sigset_t, mask)
 SYSCALL_DEFINE3(sigsuspend, int, history0, int, history1, old_sigset_t, mask)
 {
 {
-	mask &= _BLOCKABLE;
-	spin_lock_irq(&current->sighand->siglock);
-	current->saved_sigmask = current->blocked;
-	siginitset(&current->blocked, mask);
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
+	sigset_t blocked;
 
 
+	current->saved_sigmask = current->blocked;
+	mask &= _BLOCKABLE;
+	siginitset(&blocked, mask);
+	set_current_blocked(&blocked);
 	set_current_state(TASK_INTERRUPTIBLE);
 	set_current_state(TASK_INTERRUPTIBLE);
 	schedule();
 	schedule();
-	set_thread_flag(TIF_RESTORE_SIGMASK);
-
+	set_restore_sigmask();
 	return -ERESTARTNOHAND;
 	return -ERESTARTNOHAND;
 }
 }
 
 
@@ -172,18 +170,11 @@ SYSCALL_DEFINE0(sigreturn)
 		goto badframe;
 		goto badframe;
 	if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE))
 	if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE))
 		goto badframe;
 		goto badframe;
-
 	sigdelsetmask(&set, ~_BLOCKABLE);
 	sigdelsetmask(&set, ~_BLOCKABLE);
-	spin_lock_irq(&current->sighand->siglock);
-	current->blocked = set;
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
-
+	set_current_blocked(&set);
 	if (restore_sigregs(regs, &frame->sregs))
 	if (restore_sigregs(regs, &frame->sregs))
 		goto badframe;
 		goto badframe;
-
 	return regs->gprs[2];
 	return regs->gprs[2];
-
 badframe:
 badframe:
 	force_sig(SIGSEGV, current);
 	force_sig(SIGSEGV, current);
 	return 0;
 	return 0;
@@ -199,21 +190,14 @@ SYSCALL_DEFINE0(rt_sigreturn)
 		goto badframe;
 		goto badframe;
 	if (__copy_from_user(&set.sig, &frame->uc.uc_sigmask, sizeof(set)))
 	if (__copy_from_user(&set.sig, &frame->uc.uc_sigmask, sizeof(set)))
 		goto badframe;
 		goto badframe;
-
 	sigdelsetmask(&set, ~_BLOCKABLE);
 	sigdelsetmask(&set, ~_BLOCKABLE);
-	spin_lock_irq(&current->sighand->siglock);
-	current->blocked = set;
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
-
+	set_current_blocked(&set);
 	if (restore_sigregs(regs, &frame->uc.uc_mcontext))
 	if (restore_sigregs(regs, &frame->uc.uc_mcontext))
 		goto badframe;
 		goto badframe;
-
 	if (do_sigaltstack(&frame->uc.uc_stack, NULL,
 	if (do_sigaltstack(&frame->uc.uc_stack, NULL,
 			   regs->gprs[15]) == -EFAULT)
 			   regs->gprs[15]) == -EFAULT)
 		goto badframe;
 		goto badframe;
 	return regs->gprs[2];
 	return regs->gprs[2];
-
 badframe:
 badframe:
 	force_sig(SIGSEGV, current);
 	force_sig(SIGSEGV, current);
 	return 0;
 	return 0;
@@ -385,14 +369,11 @@ give_sigsegv:
 	return -EFAULT;
 	return -EFAULT;
 }
 }
 
 
-/*
- * OK, we're invoking a handler
- */	
-
-static int
-handle_signal(unsigned long sig, struct k_sigaction *ka,
-	      siginfo_t *info, sigset_t *oldset, struct pt_regs * regs)
+static int handle_signal(unsigned long sig, struct k_sigaction *ka,
+			 siginfo_t *info, sigset_t *oldset,
+			 struct pt_regs *regs)
 {
 {
+	sigset_t blocked;
 	int ret;
 	int ret;
 
 
 	/* Set up the stack frame */
 	/* Set up the stack frame */
@@ -400,17 +381,13 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
 		ret = setup_rt_frame(sig, ka, info, oldset, regs);
 		ret = setup_rt_frame(sig, ka, info, oldset, regs);
 	else
 	else
 		ret = setup_frame(sig, ka, oldset, regs);
 		ret = setup_frame(sig, ka, oldset, regs);
-
-	if (ret == 0) {
-		spin_lock_irq(&current->sighand->siglock);
-		sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
-		if (!(ka->sa.sa_flags & SA_NODEFER))
-			sigaddset(&current->blocked,sig);
-		recalc_sigpending();
-		spin_unlock_irq(&current->sighand->siglock);
-	}
-
-	return ret;
+	if (ret)
+		return ret;
+	sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
+	if (!(ka->sa.sa_flags & SA_NODEFER))
+		sigaddset(&blocked, sig);
+	set_current_blocked(&blocked);
+	return 0;
 }
 }
 
 
 /*
 /*

+ 16 - 8
arch/s390/kernel/smp.c

@@ -452,23 +452,27 @@ out:
  */
  */
 int __cpuinit start_secondary(void *cpuvoid)
 int __cpuinit start_secondary(void *cpuvoid)
 {
 {
-	/* Setup the cpu */
 	cpu_init();
 	cpu_init();
 	preempt_disable();
 	preempt_disable();
-	/* Enable TOD clock interrupts on the secondary cpu. */
 	init_cpu_timer();
 	init_cpu_timer();
-	/* Enable cpu timer interrupts on the secondary cpu. */
 	init_cpu_vtimer();
 	init_cpu_vtimer();
-	/* Enable pfault pseudo page faults on this cpu. */
 	pfault_init();
 	pfault_init();
 
 
-	/* call cpu notifiers */
 	notify_cpu_starting(smp_processor_id());
 	notify_cpu_starting(smp_processor_id());
-	/* Mark this cpu as online */
 	ipi_call_lock();
 	ipi_call_lock();
 	set_cpu_online(smp_processor_id(), true);
 	set_cpu_online(smp_processor_id(), true);
 	ipi_call_unlock();
 	ipi_call_unlock();
-	/* Switch on interrupts */
+	__ctl_clear_bit(0, 28); /* Disable lowcore protection */
+	S390_lowcore.restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
+	S390_lowcore.restart_psw.addr =
+		PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
+	__ctl_set_bit(0, 28); /* Enable lowcore protection */
+	/*
+	 * Wait until the cpu which brought this one up marked it
+	 * active before enabling interrupts.
+	 */
+	while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask))
+		cpu_relax();
 	local_irq_enable();
 	local_irq_enable();
 	/* cpu_idle will call schedule for us */
 	/* cpu_idle will call schedule for us */
 	cpu_idle();
 	cpu_idle();
@@ -507,7 +511,11 @@ static int __cpuinit smp_alloc_lowcore(int cpu)
 	memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512);
 	memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512);
 	lowcore->async_stack = async_stack + ASYNC_SIZE;
 	lowcore->async_stack = async_stack + ASYNC_SIZE;
 	lowcore->panic_stack = panic_stack + PAGE_SIZE;
 	lowcore->panic_stack = panic_stack + PAGE_SIZE;
-
+	lowcore->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
+	lowcore->restart_psw.addr =
+		PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
+	if (user_mode != HOME_SPACE_MODE)
+		lowcore->restart_psw.mask |= PSW_ASC_HOME;
 #ifndef CONFIG_64BIT
 #ifndef CONFIG_64BIT
 	if (MACHINE_HAS_IEEE) {
 	if (MACHINE_HAS_IEEE) {
 		unsigned long save_area;
 		unsigned long save_area;

+ 16 - 0
arch/s390/mm/maccess.c

@@ -85,3 +85,19 @@ int memcpy_real(void *dest, void *src, size_t count)
 	arch_local_irq_restore(flags);
 	arch_local_irq_restore(flags);
 	return rc;
 	return rc;
 }
 }
+
+/*
+ * Copy memory to absolute zero
+ */
+void copy_to_absolute_zero(void *dest, void *src, size_t count)
+{
+	unsigned long cr0;
+
+	BUG_ON((unsigned long) dest + count >= sizeof(struct _lowcore));
+	preempt_disable();
+	__ctl_store(cr0, 0, 0);
+	__ctl_clear_bit(0, 28); /* disable lowcore protection */
+	memcpy_real(dest + store_prefix(), src, count);
+	__ctl_load(cr0, 0, 0);
+	preempt_enable();
+}

+ 1 - 0
arch/s390/mm/pgtable.c

@@ -528,6 +528,7 @@ static inline void page_table_free_pgste(unsigned long *table)
 static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
 static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
 						    unsigned long vmaddr)
 						    unsigned long vmaddr)
 {
 {
+	return NULL;
 }
 }
 
 
 static inline void page_table_free_pgste(unsigned long *table)
 static inline void page_table_free_pgste(unsigned long *table)

+ 1 - 0
arch/sh/Kconfig

@@ -11,6 +11,7 @@ config SUPERH
 	select HAVE_DMA_ATTRS
 	select HAVE_DMA_ATTRS
 	select HAVE_IRQ_WORK
 	select HAVE_IRQ_WORK
 	select HAVE_PERF_EVENTS
 	select HAVE_PERF_EVENTS
+	select ARCH_HAVE_NMI_SAFE_CMPXCHG if (GUSA_RB || CPU_SH4A)
 	select PERF_USE_VMALLOC
 	select PERF_USE_VMALLOC
 	select HAVE_KERNEL_GZIP
 	select HAVE_KERNEL_GZIP
 	select HAVE_KERNEL_BZIP2
 	select HAVE_KERNEL_BZIP2

+ 4 - 2
arch/sh/kernel/idle.c

@@ -16,12 +16,13 @@
 #include <linux/thread_info.h>
 #include <linux/thread_info.h>
 #include <linux/irqflags.h>
 #include <linux/irqflags.h>
 #include <linux/smp.h>
 #include <linux/smp.h>
+#include <linux/cpuidle.h>
 #include <asm/pgalloc.h>
 #include <asm/pgalloc.h>
 #include <asm/system.h>
 #include <asm/system.h>
 #include <linux/atomic.h>
 #include <linux/atomic.h>
 #include <asm/smp.h>
 #include <asm/smp.h>
 
 
-void (*pm_idle)(void) = NULL;
+static void (*pm_idle)(void);
 
 
 static int hlt_counter;
 static int hlt_counter;
 
 
@@ -100,7 +101,8 @@ void cpu_idle(void)
 			local_irq_disable();
 			local_irq_disable();
 			/* Don't trace irqs off for idle */
 			/* Don't trace irqs off for idle */
 			stop_critical_timings();
 			stop_critical_timings();
-			pm_idle();
+			if (cpuidle_idle_call())
+				pm_idle();
 			/*
 			/*
 			 * Sanity check to ensure that pm_idle() returns
 			 * Sanity check to ensure that pm_idle() returns
 			 * with IRQs enabled
 			 * with IRQs enabled

+ 1 - 0
arch/sparc/Kconfig

@@ -54,6 +54,7 @@ config SPARC64
 	select HAVE_PERF_EVENTS
 	select HAVE_PERF_EVENTS
 	select PERF_USE_VMALLOC
 	select PERF_USE_VMALLOC
 	select IRQ_PREFLOW_FASTEOI
 	select IRQ_PREFLOW_FASTEOI
+	select ARCH_HAVE_NMI_SAFE_CMPXCHG
 
 
 config ARCH_DEFCONFIG
 config ARCH_DEFCONFIG
 	string
 	string

+ 5 - 0
arch/sparc/include/asm/Kbuild

@@ -16,3 +16,8 @@ header-y += traps.h
 header-y += uctx.h
 header-y += uctx.h
 header-y += utrap.h
 header-y += utrap.h
 header-y += watchdog.h
 header-y += watchdog.h
+
+generic-y += div64.h
+generic-y += local64.h
+generic-y += irq_regs.h
+generic-y += local.h

+ 8 - 41
arch/sparc/include/asm/bitops_64.h

@@ -26,61 +26,28 @@ extern void change_bit(unsigned long nr, volatile unsigned long *addr);
 #define smp_mb__before_clear_bit()	barrier()
 #define smp_mb__before_clear_bit()	barrier()
 #define smp_mb__after_clear_bit()	barrier()
 #define smp_mb__after_clear_bit()	barrier()
 
 
-#include <asm-generic/bitops/ffz.h>
-#include <asm-generic/bitops/__ffs.h>
 #include <asm-generic/bitops/fls.h>
 #include <asm-generic/bitops/fls.h>
 #include <asm-generic/bitops/__fls.h>
 #include <asm-generic/bitops/__fls.h>
 #include <asm-generic/bitops/fls64.h>
 #include <asm-generic/bitops/fls64.h>
 
 
 #ifdef __KERNEL__
 #ifdef __KERNEL__
 
 
+extern int ffs(int x);
+extern unsigned long __ffs(unsigned long);
+
+#include <asm-generic/bitops/ffz.h>
 #include <asm-generic/bitops/sched.h>
 #include <asm-generic/bitops/sched.h>
-#include <asm-generic/bitops/ffs.h>
 
 
 /*
 /*
  * hweightN: returns the hamming weight (i.e. the number
  * hweightN: returns the hamming weight (i.e. the number
  * of bits set) of a N-bit word
  * of bits set) of a N-bit word
  */
  */
 
 
-#ifdef ULTRA_HAS_POPULATION_COUNT
-
-static inline unsigned int __arch_hweight64(unsigned long w)
-{
-	unsigned int res;
-
-	__asm__ ("popc %1,%0" : "=r" (res) : "r" (w));
-	return res;
-}
-
-static inline unsigned int __arch_hweight32(unsigned int w)
-{
-	unsigned int res;
-
-	__asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xffffffff));
-	return res;
-}
+extern unsigned long __arch_hweight64(__u64 w);
+extern unsigned int __arch_hweight32(unsigned int w);
+extern unsigned int __arch_hweight16(unsigned int w);
+extern unsigned int __arch_hweight8(unsigned int w);
 
 
-static inline unsigned int __arch_hweight16(unsigned int w)
-{
-	unsigned int res;
-
-	__asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xffff));
-	return res;
-}
-
-static inline unsigned int __arch_hweight8(unsigned int w)
-{
-	unsigned int res;
-
-	__asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xff));
-	return res;
-}
-
-#else
-
-#include <asm-generic/bitops/arch_hweight.h>
-
-#endif
 #include <asm-generic/bitops/const_hweight.h>
 #include <asm-generic/bitops/const_hweight.h>
 #include <asm-generic/bitops/lock.h>
 #include <asm-generic/bitops/lock.h>
 #endif /* __KERNEL__ */
 #endif /* __KERNEL__ */

+ 0 - 1
arch/sparc/include/asm/div64.h

@@ -1 +0,0 @@
-#include <asm-generic/div64.h>

+ 29 - 36
arch/sparc/include/asm/elf_64.h

@@ -59,15 +59,33 @@
 #define R_SPARC_6		45
 #define R_SPARC_6		45
 
 
 /* Bits present in AT_HWCAP, primarily for Sparc32.  */
 /* Bits present in AT_HWCAP, primarily for Sparc32.  */
-
-#define HWCAP_SPARC_FLUSH       1    /* CPU supports flush instruction. */
-#define HWCAP_SPARC_STBAR       2
-#define HWCAP_SPARC_SWAP        4
-#define HWCAP_SPARC_MULDIV      8
-#define HWCAP_SPARC_V9		16
-#define HWCAP_SPARC_ULTRA3	32
-#define HWCAP_SPARC_BLKINIT	64
-#define HWCAP_SPARC_N2		128
+#define HWCAP_SPARC_FLUSH       0x00000001
+#define HWCAP_SPARC_STBAR       0x00000002
+#define HWCAP_SPARC_SWAP        0x00000004
+#define HWCAP_SPARC_MULDIV      0x00000008
+#define HWCAP_SPARC_V9		0x00000010
+#define HWCAP_SPARC_ULTRA3	0x00000020
+#define HWCAP_SPARC_BLKINIT	0x00000040
+#define HWCAP_SPARC_N2		0x00000080
+
+/* Solaris compatible AT_HWCAP bits. */
+#define AV_SPARC_MUL32		0x00000100 /* 32x32 multiply is efficient */
+#define AV_SPARC_DIV32		0x00000200 /* 32x32 divide is efficient */
+#define AV_SPARC_FSMULD		0x00000400 /* 'fsmuld' is efficient */
+#define AV_SPARC_V8PLUS		0x00000800 /* v9 insn available to 32bit */
+#define AV_SPARC_POPC		0x00001000 /* 'popc' is efficient */
+#define AV_SPARC_VIS		0x00002000 /* VIS insns available */
+#define AV_SPARC_VIS2		0x00004000 /* VIS2 insns available */
+#define AV_SPARC_ASI_BLK_INIT	0x00008000 /* block init ASIs available */
+#define AV_SPARC_FMAF		0x00010000 /* fused multiply-add */
+#define AV_SPARC_VIS3		0x00020000 /* VIS3 insns available */
+#define AV_SPARC_HPC		0x00040000 /* HPC insns available */
+#define AV_SPARC_RANDOM		0x00080000 /* 'random' insn available */
+#define AV_SPARC_TRANS		0x00100000 /* transaction insns available */
+#define AV_SPARC_FJFMAU		0x00200000 /* unfused multiply-add */
+#define AV_SPARC_IMA		0x00400000 /* integer multiply-add */
+#define AV_SPARC_ASI_CACHE_SPARING \
+				0x00800000 /* cache sparing ASIs available */
 
 
 #define CORE_DUMP_USE_REGSET
 #define CORE_DUMP_USE_REGSET
 
 
@@ -162,33 +180,8 @@ typedef struct {
 #define ELF_ET_DYN_BASE		0x0000010000000000UL
 #define ELF_ET_DYN_BASE		0x0000010000000000UL
 #define COMPAT_ELF_ET_DYN_BASE	0x0000000070000000UL
 #define COMPAT_ELF_ET_DYN_BASE	0x0000000070000000UL
 
 
-
-/* This yields a mask that user programs can use to figure out what
-   instruction set this cpu supports.  */
-
-/* On Ultra, we support all of the v8 capabilities. */
-static inline unsigned int sparc64_elf_hwcap(void)
-{
-	unsigned int cap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR |
-			    HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV |
-			    HWCAP_SPARC_V9);
-
-	if (tlb_type == cheetah || tlb_type == cheetah_plus)
-		cap |= HWCAP_SPARC_ULTRA3;
-	else if (tlb_type == hypervisor) {
-		if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 ||
-		    sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
-		    sun4v_chip_type == SUN4V_CHIP_NIAGARA3)
-			cap |= HWCAP_SPARC_BLKINIT;
-		if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
-		    sun4v_chip_type == SUN4V_CHIP_NIAGARA3)
-			cap |= HWCAP_SPARC_N2;
-	}
-
-	return cap;
-}
-
-#define ELF_HWCAP	sparc64_elf_hwcap()
+extern unsigned long sparc64_elf_hwcap;
+#define ELF_HWCAP	sparc64_elf_hwcap
 
 
 /* This yields a string that ld.so will use to load implementation
 /* This yields a string that ld.so will use to load implementation
    specific libraries for optimization.  This is more specific in
    specific libraries for optimization.  This is more specific in

+ 13 - 0
arch/sparc/include/asm/hypervisor.h

@@ -2927,6 +2927,13 @@ extern unsigned long sun4v_ncs_request(unsigned long request,
 #define HV_FAST_FIRE_GET_PERFREG	0x120
 #define HV_FAST_FIRE_GET_PERFREG	0x120
 #define HV_FAST_FIRE_SET_PERFREG	0x121
 #define HV_FAST_FIRE_SET_PERFREG	0x121
 
 
+#define HV_FAST_REBOOT_DATA_SET		0x172
+
+#ifndef __ASSEMBLY__
+extern unsigned long sun4v_reboot_data_set(unsigned long ra,
+					   unsigned long len);
+#endif
+
 /* Function numbers for HV_CORE_TRAP.  */
 /* Function numbers for HV_CORE_TRAP.  */
 #define HV_CORE_SET_VER			0x00
 #define HV_CORE_SET_VER			0x00
 #define HV_CORE_PUTCHAR			0x01
 #define HV_CORE_PUTCHAR			0x01
@@ -2940,11 +2947,17 @@ extern unsigned long sun4v_ncs_request(unsigned long request,
 #define HV_GRP_CORE			0x0001
 #define HV_GRP_CORE			0x0001
 #define HV_GRP_INTR			0x0002
 #define HV_GRP_INTR			0x0002
 #define HV_GRP_SOFT_STATE		0x0003
 #define HV_GRP_SOFT_STATE		0x0003
+#define HV_GRP_TM			0x0080
 #define HV_GRP_PCI			0x0100
 #define HV_GRP_PCI			0x0100
 #define HV_GRP_LDOM			0x0101
 #define HV_GRP_LDOM			0x0101
 #define HV_GRP_SVC_CHAN			0x0102
 #define HV_GRP_SVC_CHAN			0x0102
 #define HV_GRP_NCS			0x0103
 #define HV_GRP_NCS			0x0103
 #define HV_GRP_RNG			0x0104
 #define HV_GRP_RNG			0x0104
+#define HV_GRP_PBOOT			0x0105
+#define HV_GRP_TPM			0x0107
+#define HV_GRP_SDIO			0x0108
+#define HV_GRP_SDIO_ERR			0x0109
+#define HV_GRP_REBOOT_DATA		0x0110
 #define HV_GRP_NIAG_PERF		0x0200
 #define HV_GRP_NIAG_PERF		0x0200
 #define HV_GRP_FIRE_PERF		0x0201
 #define HV_GRP_FIRE_PERF		0x0201
 #define HV_GRP_N2_CPU			0x0202
 #define HV_GRP_N2_CPU			0x0202

+ 0 - 1
arch/sparc/include/asm/irq_regs.h

@@ -1 +0,0 @@
-#include <asm-generic/irq_regs.h>

+ 0 - 6
arch/sparc/include/asm/local.h

@@ -1,6 +0,0 @@
-#ifndef _SPARC_LOCAL_H
-#define _SPARC_LOCAL_H
-
-#include <asm-generic/local.h>
-
-#endif

+ 0 - 1
arch/sparc/include/asm/local64.h

@@ -1 +0,0 @@
-#include <asm-generic/local64.h>

+ 24 - 27
arch/sparc/include/asm/tsb.h

@@ -133,29 +133,6 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
 	sub	TSB, 0x8, TSB;   \
 	sub	TSB, 0x8, TSB;   \
 	TSB_STORE(TSB, TAG);
 	TSB_STORE(TSB, TAG);
 
 
-#define KTSB_LOAD_QUAD(TSB, REG) \
-	ldda		[TSB] ASI_NUCLEUS_QUAD_LDD, REG;
-
-#define KTSB_STORE(ADDR, VAL) \
-	stxa		VAL, [ADDR] ASI_N;
-
-#define KTSB_LOCK_TAG(TSB, REG1, REG2)	\
-99:	lduwa	[TSB] ASI_N, REG1;	\
-	sethi	%hi(TSB_TAG_LOCK_HIGH), REG2;\
-	andcc	REG1, REG2, %g0;	\
-	bne,pn	%icc, 99b;		\
-	 nop;				\
-	casa	[TSB] ASI_N, REG1, REG2;\
-	cmp	REG1, REG2;		\
-	bne,pn	%icc, 99b;		\
-	 nop;				\
-
-#define KTSB_WRITE(TSB, TTE, TAG) \
-	add	TSB, 0x8, TSB;   \
-	stxa	TTE, [TSB] ASI_N;     \
-	sub	TSB, 0x8, TSB;   \
-	stxa	TAG, [TSB] ASI_N;
-
 	/* Do a kernel page table walk.  Leaves physical PTE pointer in
 	/* Do a kernel page table walk.  Leaves physical PTE pointer in
 	 * REG1.  Jumps to FAIL_LABEL on early page table walk termination.
 	 * REG1.  Jumps to FAIL_LABEL on early page table walk termination.
 	 * VADDR will not be clobbered, but REG2 will.
 	 * VADDR will not be clobbered, but REG2 will.
@@ -239,6 +216,8 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
 	(KERNEL_TSB_SIZE_BYTES / 16)
 	(KERNEL_TSB_SIZE_BYTES / 16)
 #define KERNEL_TSB4M_NENTRIES	4096
 #define KERNEL_TSB4M_NENTRIES	4096
 
 
+#define KTSB_PHYS_SHIFT		15
+
 	/* Do a kernel TSB lookup at tl>0 on VADDR+TAG, branch to OK_LABEL
 	/* Do a kernel TSB lookup at tl>0 on VADDR+TAG, branch to OK_LABEL
 	 * on TSB hit.  REG1, REG2, REG3, and REG4 are used as temporaries
 	 * on TSB hit.  REG1, REG2, REG3, and REG4 are used as temporaries
 	 * and the found TTE will be left in REG1.  REG3 and REG4 must
 	 * and the found TTE will be left in REG1.  REG3 and REG4 must
@@ -247,13 +226,22 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
 	 * VADDR and TAG will be preserved and not clobbered by this macro.
 	 * VADDR and TAG will be preserved and not clobbered by this macro.
 	 */
 	 */
 #define KERN_TSB_LOOKUP_TL1(VADDR, TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
 #define KERN_TSB_LOOKUP_TL1(VADDR, TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
-	sethi		%hi(swapper_tsb), REG1; \
+661:	sethi		%hi(swapper_tsb), REG1;			\
 	or		REG1, %lo(swapper_tsb), REG1; \
 	or		REG1, %lo(swapper_tsb), REG1; \
+	.section	.swapper_tsb_phys_patch, "ax"; \
+	.word		661b; \
+	.previous; \
+661:	nop; \
+	.section	.tsb_ldquad_phys_patch, "ax"; \
+	.word		661b; \
+	sllx		REG1, KTSB_PHYS_SHIFT, REG1; \
+	sllx		REG1, KTSB_PHYS_SHIFT, REG1; \
+	.previous; \
 	srlx		VADDR, PAGE_SHIFT, REG2; \
 	srlx		VADDR, PAGE_SHIFT, REG2; \
 	and		REG2, (KERNEL_TSB_NENTRIES - 1), REG2; \
 	and		REG2, (KERNEL_TSB_NENTRIES - 1), REG2; \
 	sllx		REG2, 4, REG2; \
 	sllx		REG2, 4, REG2; \
 	add		REG1, REG2, REG2; \
 	add		REG1, REG2, REG2; \
-	KTSB_LOAD_QUAD(REG2, REG3); \
+	TSB_LOAD_QUAD(REG2, REG3); \
 	cmp		REG3, TAG; \
 	cmp		REG3, TAG; \
 	be,a,pt		%xcc, OK_LABEL; \
 	be,a,pt		%xcc, OK_LABEL; \
 	 mov		REG4, REG1;
 	 mov		REG4, REG1;
@@ -263,12 +251,21 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
 	 * we can make use of that for the index computation.
 	 * we can make use of that for the index computation.
 	 */
 	 */
 #define KERN_TSB4M_LOOKUP_TL1(TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
 #define KERN_TSB4M_LOOKUP_TL1(TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
-	sethi		%hi(swapper_4m_tsb), REG1; \
+661:	sethi		%hi(swapper_4m_tsb), REG1;	     \
 	or		REG1, %lo(swapper_4m_tsb), REG1; \
 	or		REG1, %lo(swapper_4m_tsb), REG1; \
+	.section	.swapper_4m_tsb_phys_patch, "ax"; \
+	.word		661b; \
+	.previous; \
+661:	nop; \
+	.section	.tsb_ldquad_phys_patch, "ax"; \
+	.word		661b; \
+	sllx		REG1, KTSB_PHYS_SHIFT, REG1; \
+	sllx		REG1, KTSB_PHYS_SHIFT, REG1; \
+	.previous; \
 	and		TAG, (KERNEL_TSB4M_NENTRIES - 1), REG2; \
 	and		TAG, (KERNEL_TSB4M_NENTRIES - 1), REG2; \
 	sllx		REG2, 4, REG2; \
 	sllx		REG2, 4, REG2; \
 	add		REG1, REG2, REG2; \
 	add		REG1, REG2, REG2; \
-	KTSB_LOAD_QUAD(REG2, REG3); \
+	TSB_LOAD_QUAD(REG2, REG3); \
 	cmp		REG3, TAG; \
 	cmp		REG3, TAG; \
 	be,a,pt		%xcc, OK_LABEL; \
 	be,a,pt		%xcc, OK_LABEL; \
 	 mov		REG4, REG1;
 	 mov		REG4, REG1;

+ 1 - 0
arch/sparc/kernel/cpu.c

@@ -396,6 +396,7 @@ static int show_cpuinfo(struct seq_file *m, void *__unused)
 		   , cpu_data(0).clock_tick
 		   , cpu_data(0).clock_tick
 #endif
 #endif
 		);
 		);
+	cpucap_info(m);
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
 	smp_bogo(m);
 	smp_bogo(m);
 #endif
 #endif

+ 28 - 2
arch/sparc/kernel/ds.c

@@ -15,12 +15,15 @@
 #include <linux/reboot.h>
 #include <linux/reboot.h>
 #include <linux/cpu.h>
 #include <linux/cpu.h>
 
 
+#include <asm/hypervisor.h>
 #include <asm/ldc.h>
 #include <asm/ldc.h>
 #include <asm/vio.h>
 #include <asm/vio.h>
 #include <asm/mdesc.h>
 #include <asm/mdesc.h>
 #include <asm/head.h>
 #include <asm/head.h>
 #include <asm/irq.h>
 #include <asm/irq.h>
 
 
+#include "kernel.h"
+
 #define DRV_MODULE_NAME		"ds"
 #define DRV_MODULE_NAME		"ds"
 #define PFX DRV_MODULE_NAME	": "
 #define PFX DRV_MODULE_NAME	": "
 #define DRV_MODULE_VERSION	"1.0"
 #define DRV_MODULE_VERSION	"1.0"
@@ -828,18 +831,32 @@ void ldom_set_var(const char *var, const char *value)
 	}
 	}
 }
 }
 
 
+static char full_boot_str[256] __attribute__((aligned(32)));
+static int reboot_data_supported;
+
 void ldom_reboot(const char *boot_command)
 void ldom_reboot(const char *boot_command)
 {
 {
 	/* Don't bother with any of this if the boot_command
 	/* Don't bother with any of this if the boot_command
 	 * is empty.
 	 * is empty.
 	 */
 	 */
 	if (boot_command && strlen(boot_command)) {
 	if (boot_command && strlen(boot_command)) {
-		char full_boot_str[256];
+		unsigned long len;
 
 
 		strcpy(full_boot_str, "boot ");
 		strcpy(full_boot_str, "boot ");
 		strcpy(full_boot_str + strlen("boot "), boot_command);
 		strcpy(full_boot_str + strlen("boot "), boot_command);
+		len = strlen(full_boot_str);
 
 
-		ldom_set_var("reboot-command", full_boot_str);
+		if (reboot_data_supported) {
+			unsigned long ra = kimage_addr_to_ra(full_boot_str);
+			unsigned long hv_ret;
+
+			hv_ret = sun4v_reboot_data_set(ra, len);
+			if (hv_ret != HV_EOK)
+				pr_err("SUN4V: Unable to set reboot data "
+				       "hv_ret=%lu\n", hv_ret);
+		} else {
+			ldom_set_var("reboot-command", full_boot_str);
+		}
 	}
 	}
 	sun4v_mach_sir();
 	sun4v_mach_sir();
 }
 }
@@ -1237,6 +1254,15 @@ static struct vio_driver ds_driver = {
 
 
 static int __init ds_init(void)
 static int __init ds_init(void)
 {
 {
+	unsigned long hv_ret, major, minor;
+
+	hv_ret = sun4v_get_version(HV_GRP_REBOOT_DATA, &major, &minor);
+	if (hv_ret == HV_EOK) {
+		pr_info("SUN4V: Reboot data supported (maj=%lu,min=%lu).\n",
+			major, minor);
+		reboot_data_supported = 1;
+	}
+
 	kthread_run(ds_thread, NULL, "kldomd");
 	kthread_run(ds_thread, NULL, "kldomd");
 
 
 	return vio_register_driver(&ds_driver);
 	return vio_register_driver(&ds_driver);

+ 14 - 0
arch/sparc/kernel/entry.h

@@ -42,6 +42,20 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
 extern void fpload(unsigned long *fpregs, unsigned long *fsr);
 extern void fpload(unsigned long *fpregs, unsigned long *fsr);
 
 
 #else /* CONFIG_SPARC32 */
 #else /* CONFIG_SPARC32 */
+struct popc_3insn_patch_entry {
+	unsigned int	addr;
+	unsigned int	insns[3];
+};
+extern struct popc_3insn_patch_entry __popc_3insn_patch,
+	__popc_3insn_patch_end;
+
+struct popc_6insn_patch_entry {
+	unsigned int	addr;
+	unsigned int	insns[6];
+};
+extern struct popc_6insn_patch_entry __popc_6insn_patch,
+	__popc_6insn_patch_end;
+
 extern void __init per_cpu_patch(void);
 extern void __init per_cpu_patch(void);
 extern void __init sun4v_patch(void);
 extern void __init sun4v_patch(void);
 extern void __init boot_cpu_id_too_large(int cpu);
 extern void __init boot_cpu_id_too_large(int cpu);

+ 1 - 1
arch/sparc/kernel/head_64.S

@@ -559,7 +559,7 @@ niagara2_patch:
 	 nop
 	 nop
 	call	niagara_patch_bzero
 	call	niagara_patch_bzero
 	 nop
 	 nop
-	call	niagara2_patch_pageops
+	call	niagara_patch_pageops
 	 nop
 	 nop
 
 
 	ba,a,pt	%xcc, 80f
 	ba,a,pt	%xcc, 80f

+ 6 - 0
arch/sparc/kernel/hvapi.c

@@ -28,11 +28,17 @@ static struct api_info api_table[] = {
 	{ .group = HV_GRP_CORE,		.flags = FLAG_PRE_API	},
 	{ .group = HV_GRP_CORE,		.flags = FLAG_PRE_API	},
 	{ .group = HV_GRP_INTR,					},
 	{ .group = HV_GRP_INTR,					},
 	{ .group = HV_GRP_SOFT_STATE,				},
 	{ .group = HV_GRP_SOFT_STATE,				},
+	{ .group = HV_GRP_TM,					},
 	{ .group = HV_GRP_PCI,		.flags = FLAG_PRE_API	},
 	{ .group = HV_GRP_PCI,		.flags = FLAG_PRE_API	},
 	{ .group = HV_GRP_LDOM,					},
 	{ .group = HV_GRP_LDOM,					},
 	{ .group = HV_GRP_SVC_CHAN,	.flags = FLAG_PRE_API	},
 	{ .group = HV_GRP_SVC_CHAN,	.flags = FLAG_PRE_API	},
 	{ .group = HV_GRP_NCS,		.flags = FLAG_PRE_API	},
 	{ .group = HV_GRP_NCS,		.flags = FLAG_PRE_API	},
 	{ .group = HV_GRP_RNG,					},
 	{ .group = HV_GRP_RNG,					},
+	{ .group = HV_GRP_PBOOT,				},
+	{ .group = HV_GRP_TPM,					},
+	{ .group = HV_GRP_SDIO,					},
+	{ .group = HV_GRP_SDIO_ERR,				},
+	{ .group = HV_GRP_REBOOT_DATA,				},
 	{ .group = HV_GRP_NIAG_PERF,	.flags = FLAG_PRE_API	},
 	{ .group = HV_GRP_NIAG_PERF,	.flags = FLAG_PRE_API	},
 	{ .group = HV_GRP_FIRE_PERF,				},
 	{ .group = HV_GRP_FIRE_PERF,				},
 	{ .group = HV_GRP_N2_CPU,				},
 	{ .group = HV_GRP_N2_CPU,				},

+ 7 - 0
arch/sparc/kernel/hvcalls.S

@@ -798,3 +798,10 @@ ENTRY(sun4v_niagara2_setperf)
 	retl
 	retl
 	 nop
 	 nop
 ENDPROC(sun4v_niagara2_setperf)
 ENDPROC(sun4v_niagara2_setperf)
+
+ENTRY(sun4v_reboot_data_set)
+	mov	HV_FAST_REBOOT_DATA_SET, %o5
+	ta	HV_FAST_TRAP
+	retl
+	 nop
+ENDPROC(sun4v_reboot_data_set)

+ 15 - 0
arch/sparc/kernel/kernel.h

@@ -4,12 +4,27 @@
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
 
 
 #include <asm/traps.h>
 #include <asm/traps.h>
+#include <asm/head.h>
+#include <asm/io.h>
 
 
 /* cpu.c */
 /* cpu.c */
 extern const char *sparc_pmu_type;
 extern const char *sparc_pmu_type;
 extern unsigned int fsr_storage;
 extern unsigned int fsr_storage;
 extern int ncpus_probed;
 extern int ncpus_probed;
 
 
+#ifdef CONFIG_SPARC64
+/* setup_64.c */
+struct seq_file;
+extern void cpucap_info(struct seq_file *);
+
+static inline unsigned long kimage_addr_to_ra(const char *p)
+{
+	unsigned long val = (unsigned long) p;
+
+	return kern_base + (val - KERNBASE);
+}
+#endif
+
 #ifdef CONFIG_SPARC32
 #ifdef CONFIG_SPARC32
 /* cpu.c */
 /* cpu.c */
 extern void cpu_probe(void);
 extern void cpu_probe(void);

+ 12 - 12
arch/sparc/kernel/ktlb.S

@@ -47,16 +47,16 @@ kvmap_itlb_tsb_miss:
 kvmap_itlb_vmalloc_addr:
 kvmap_itlb_vmalloc_addr:
 	KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)
 	KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)
 
 
-	KTSB_LOCK_TAG(%g1, %g2, %g7)
+	TSB_LOCK_TAG(%g1, %g2, %g7)
 
 
 	/* Load and check PTE.  */
 	/* Load and check PTE.  */
 	ldxa		[%g5] ASI_PHYS_USE_EC, %g5
 	ldxa		[%g5] ASI_PHYS_USE_EC, %g5
 	mov		1, %g7
 	mov		1, %g7
 	sllx		%g7, TSB_TAG_INVALID_BIT, %g7
 	sllx		%g7, TSB_TAG_INVALID_BIT, %g7
 	brgez,a,pn	%g5, kvmap_itlb_longpath
 	brgez,a,pn	%g5, kvmap_itlb_longpath
-	 KTSB_STORE(%g1, %g7)
+	 TSB_STORE(%g1, %g7)
 
 
-	KTSB_WRITE(%g1, %g5, %g6)
+	TSB_WRITE(%g1, %g5, %g6)
 
 
 	/* fallthrough to TLB load */
 	/* fallthrough to TLB load */
 
 
@@ -102,9 +102,9 @@ kvmap_itlb_longpath:
 kvmap_itlb_obp:
 kvmap_itlb_obp:
 	OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath)
 	OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath)
 
 
-	KTSB_LOCK_TAG(%g1, %g2, %g7)
+	TSB_LOCK_TAG(%g1, %g2, %g7)
 
 
-	KTSB_WRITE(%g1, %g5, %g6)
+	TSB_WRITE(%g1, %g5, %g6)
 
 
 	ba,pt		%xcc, kvmap_itlb_load
 	ba,pt		%xcc, kvmap_itlb_load
 	 nop
 	 nop
@@ -112,17 +112,17 @@ kvmap_itlb_obp:
 kvmap_dtlb_obp:
 kvmap_dtlb_obp:
 	OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath)
 	OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath)
 
 
-	KTSB_LOCK_TAG(%g1, %g2, %g7)
+	TSB_LOCK_TAG(%g1, %g2, %g7)
 
 
-	KTSB_WRITE(%g1, %g5, %g6)
+	TSB_WRITE(%g1, %g5, %g6)
 
 
 	ba,pt		%xcc, kvmap_dtlb_load
 	ba,pt		%xcc, kvmap_dtlb_load
 	 nop
 	 nop
 
 
 	.align		32
 	.align		32
 kvmap_dtlb_tsb4m_load:
 kvmap_dtlb_tsb4m_load:
-	KTSB_LOCK_TAG(%g1, %g2, %g7)
-	KTSB_WRITE(%g1, %g5, %g6)
+	TSB_LOCK_TAG(%g1, %g2, %g7)
+	TSB_WRITE(%g1, %g5, %g6)
 	ba,pt		%xcc, kvmap_dtlb_load
 	ba,pt		%xcc, kvmap_dtlb_load
 	 nop
 	 nop
 
 
@@ -222,16 +222,16 @@ kvmap_linear_patch:
 kvmap_dtlb_vmalloc_addr:
 kvmap_dtlb_vmalloc_addr:
 	KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
 	KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
 
 
-	KTSB_LOCK_TAG(%g1, %g2, %g7)
+	TSB_LOCK_TAG(%g1, %g2, %g7)
 
 
 	/* Load and check PTE.  */
 	/* Load and check PTE.  */
 	ldxa		[%g5] ASI_PHYS_USE_EC, %g5
 	ldxa		[%g5] ASI_PHYS_USE_EC, %g5
 	mov		1, %g7
 	mov		1, %g7
 	sllx		%g7, TSB_TAG_INVALID_BIT, %g7
 	sllx		%g7, TSB_TAG_INVALID_BIT, %g7
 	brgez,a,pn	%g5, kvmap_dtlb_longpath
 	brgez,a,pn	%g5, kvmap_dtlb_longpath
-	 KTSB_STORE(%g1, %g7)
+	 TSB_STORE(%g1, %g7)
 
 
-	KTSB_WRITE(%g1, %g5, %g6)
+	TSB_WRITE(%g1, %g5, %g6)
 
 
 	/* fallthrough to TLB load */
 	/* fallthrough to TLB load */
 
 

+ 23 - 7
arch/sparc/kernel/mdesc.c

@@ -508,6 +508,8 @@ const char *mdesc_node_name(struct mdesc_handle *hp, u64 node)
 }
 }
 EXPORT_SYMBOL(mdesc_node_name);
 EXPORT_SYMBOL(mdesc_node_name);
 
 
+static u64 max_cpus = 64;
+
 static void __init report_platform_properties(void)
 static void __init report_platform_properties(void)
 {
 {
 	struct mdesc_handle *hp = mdesc_grab();
 	struct mdesc_handle *hp = mdesc_grab();
@@ -543,8 +545,10 @@ static void __init report_platform_properties(void)
 	if (v)
 	if (v)
 		printk("PLATFORM: watchdog-max-timeout [%llu ms]\n", *v);
 		printk("PLATFORM: watchdog-max-timeout [%llu ms]\n", *v);
 	v = mdesc_get_property(hp, pn, "max-cpus", NULL);
 	v = mdesc_get_property(hp, pn, "max-cpus", NULL);
-	if (v)
-		printk("PLATFORM: max-cpus [%llu]\n", *v);
+	if (v) {
+		max_cpus = *v;
+		printk("PLATFORM: max-cpus [%llu]\n", max_cpus);
+	}
 
 
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
 	{
 	{
@@ -715,7 +719,7 @@ static void __cpuinit set_proc_ids(struct mdesc_handle *hp)
 }
 }
 
 
 static void __cpuinit get_one_mondo_bits(const u64 *p, unsigned int *mask,
 static void __cpuinit get_one_mondo_bits(const u64 *p, unsigned int *mask,
-					 unsigned char def)
+					 unsigned long def, unsigned long max)
 {
 {
 	u64 val;
 	u64 val;
 
 
@@ -726,6 +730,9 @@ static void __cpuinit get_one_mondo_bits(const u64 *p, unsigned int *mask,
 	if (!val || val >= 64)
 	if (!val || val >= 64)
 		goto use_default;
 		goto use_default;
 
 
+	if (val > max)
+		val = max;
+
 	*mask = ((1U << val) * 64U) - 1U;
 	*mask = ((1U << val) * 64U) - 1U;
 	return;
 	return;
 
 
@@ -736,19 +743,28 @@ use_default:
 static void __cpuinit get_mondo_data(struct mdesc_handle *hp, u64 mp,
 static void __cpuinit get_mondo_data(struct mdesc_handle *hp, u64 mp,
 				     struct trap_per_cpu *tb)
 				     struct trap_per_cpu *tb)
 {
 {
+	static int printed;
 	const u64 *val;
 	const u64 *val;
 
 
 	val = mdesc_get_property(hp, mp, "q-cpu-mondo-#bits", NULL);
 	val = mdesc_get_property(hp, mp, "q-cpu-mondo-#bits", NULL);
-	get_one_mondo_bits(val, &tb->cpu_mondo_qmask, 7);
+	get_one_mondo_bits(val, &tb->cpu_mondo_qmask, 7, ilog2(max_cpus * 2));
 
 
 	val = mdesc_get_property(hp, mp, "q-dev-mondo-#bits", NULL);
 	val = mdesc_get_property(hp, mp, "q-dev-mondo-#bits", NULL);
-	get_one_mondo_bits(val, &tb->dev_mondo_qmask, 7);
+	get_one_mondo_bits(val, &tb->dev_mondo_qmask, 7, 8);
 
 
 	val = mdesc_get_property(hp, mp, "q-resumable-#bits", NULL);
 	val = mdesc_get_property(hp, mp, "q-resumable-#bits", NULL);
-	get_one_mondo_bits(val, &tb->resum_qmask, 6);
+	get_one_mondo_bits(val, &tb->resum_qmask, 6, 7);
 
 
 	val = mdesc_get_property(hp, mp, "q-nonresumable-#bits", NULL);
 	val = mdesc_get_property(hp, mp, "q-nonresumable-#bits", NULL);
-	get_one_mondo_bits(val, &tb->nonresum_qmask, 2);
+	get_one_mondo_bits(val, &tb->nonresum_qmask, 2, 2);
+	if (!printed++) {
+		pr_info("SUN4V: Mondo queue sizes "
+			"[cpu(%u) dev(%u) r(%u) nr(%u)]\n",
+			tb->cpu_mondo_qmask + 1,
+			tb->dev_mondo_qmask + 1,
+			tb->resum_qmask + 1,
+			tb->nonresum_qmask + 1);
+	}
 }
 }
 
 
 static void * __cpuinit mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handle *, u64, int, void *), void *arg, cpumask_t *mask)
 static void * __cpuinit mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handle *, u64, int, void *), void *arg, cpumask_t *mask)

+ 186 - 0
arch/sparc/kernel/setup_64.c

@@ -29,6 +29,7 @@
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
 #include <linux/cpu.h>
 #include <linux/cpu.h>
 #include <linux/initrd.h>
 #include <linux/initrd.h>
+#include <linux/module.h>
 
 
 #include <asm/system.h>
 #include <asm/system.h>
 #include <asm/io.h>
 #include <asm/io.h>
@@ -46,6 +47,8 @@
 #include <asm/mmu.h>
 #include <asm/mmu.h>
 #include <asm/ns87303.h>
 #include <asm/ns87303.h>
 #include <asm/btext.h>
 #include <asm/btext.h>
+#include <asm/elf.h>
+#include <asm/mdesc.h>
 
 
 #ifdef CONFIG_IP_PNP
 #ifdef CONFIG_IP_PNP
 #include <net/ipconfig.h>
 #include <net/ipconfig.h>
@@ -269,6 +272,40 @@ void __init sun4v_patch(void)
 	sun4v_hvapi_init();
 	sun4v_hvapi_init();
 }
 }
 
 
+static void __init popc_patch(void)
+{
+	struct popc_3insn_patch_entry *p3;
+	struct popc_6insn_patch_entry *p6;
+
+	p3 = &__popc_3insn_patch;
+	while (p3 < &__popc_3insn_patch_end) {
+		unsigned long i, addr = p3->addr;
+
+		for (i = 0; i < 3; i++) {
+			*(unsigned int *) (addr +  (i * 4)) = p3->insns[i];
+			wmb();
+			__asm__ __volatile__("flush	%0"
+					     : : "r" (addr +  (i * 4)));
+		}
+
+		p3++;
+	}
+
+	p6 = &__popc_6insn_patch;
+	while (p6 < &__popc_6insn_patch_end) {
+		unsigned long i, addr = p6->addr;
+
+		for (i = 0; i < 6; i++) {
+			*(unsigned int *) (addr +  (i * 4)) = p6->insns[i];
+			wmb();
+			__asm__ __volatile__("flush	%0"
+					     : : "r" (addr +  (i * 4)));
+		}
+
+		p6++;
+	}
+}
+
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
 void __init boot_cpu_id_too_large(int cpu)
 void __init boot_cpu_id_too_large(int cpu)
 {
 {
@@ -278,6 +315,154 @@ void __init boot_cpu_id_too_large(int cpu)
 }
 }
 #endif
 #endif
 
 
+/* On Ultra, we support all of the v8 capabilities. */
+unsigned long sparc64_elf_hwcap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR |
+				   HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV |
+				   HWCAP_SPARC_V9);
+EXPORT_SYMBOL(sparc64_elf_hwcap);
+
+static const char *hwcaps[] = {
+	"flush", "stbar", "swap", "muldiv", "v9",
+	"ultra3", "blkinit", "n2",
+
+	/* These strings are as they appear in the machine description
+	 * 'hwcap-list' property for cpu nodes.
+	 */
+	"mul32", "div32", "fsmuld", "v8plus", "popc", "vis", "vis2",
+	"ASIBlkInit", "fmaf", "vis3", "hpc", "random", "trans", "fjfmau",
+	"ima", "cspare",
+};
+
+void cpucap_info(struct seq_file *m)
+{
+	unsigned long caps = sparc64_elf_hwcap;
+	int i, printed = 0;
+
+	seq_puts(m, "cpucaps\t\t: ");
+	for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
+		unsigned long bit = 1UL << i;
+		if (caps & bit) {
+			seq_printf(m, "%s%s",
+				   printed ? "," : "", hwcaps[i]);
+			printed++;
+		}
+	}
+	seq_putc(m, '\n');
+}
+
+static void __init report_hwcaps(unsigned long caps)
+{
+	int i, printed = 0;
+
+	printk(KERN_INFO "CPU CAPS: [");
+	for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
+		unsigned long bit = 1UL << i;
+		if (caps & bit) {
+			printk(KERN_CONT "%s%s",
+			       printed ? "," : "", hwcaps[i]);
+			if (++printed == 8) {
+				printk(KERN_CONT "]\n");
+				printk(KERN_INFO "CPU CAPS: [");
+				printed = 0;
+			}
+		}
+	}
+	printk(KERN_CONT "]\n");
+}
+
+static unsigned long __init mdesc_cpu_hwcap_list(void)
+{
+	struct mdesc_handle *hp;
+	unsigned long caps = 0;
+	const char *prop;
+	int len;
+	u64 pn;
+
+	hp = mdesc_grab();
+	if (!hp)
+		return 0;
+
+	pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "cpu");
+	if (pn == MDESC_NODE_NULL)
+		goto out;
+
+	prop = mdesc_get_property(hp, pn, "hwcap-list", &len);
+	if (!prop)
+		goto out;
+
+	while (len) {
+		int i, plen;
+
+		for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
+			unsigned long bit = 1UL << i;
+
+			if (!strcmp(prop, hwcaps[i])) {
+				caps |= bit;
+				break;
+			}
+		}
+
+		plen = strlen(prop) + 1;
+		prop += plen;
+		len -= plen;
+	}
+
+out:
+	mdesc_release(hp);
+	return caps;
+}
+
+/* This yields a mask that user programs can use to figure out what
+ * instruction set this cpu supports.
+ */
+static void __init init_sparc64_elf_hwcap(void)
+{
+	unsigned long cap = sparc64_elf_hwcap;
+	unsigned long mdesc_caps;
+
+	if (tlb_type == cheetah || tlb_type == cheetah_plus)
+		cap |= HWCAP_SPARC_ULTRA3;
+	else if (tlb_type == hypervisor) {
+		if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 ||
+		    sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
+		    sun4v_chip_type == SUN4V_CHIP_NIAGARA3)
+			cap |= HWCAP_SPARC_BLKINIT;
+		if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
+		    sun4v_chip_type == SUN4V_CHIP_NIAGARA3)
+			cap |= HWCAP_SPARC_N2;
+	}
+
+	cap |= (AV_SPARC_MUL32 | AV_SPARC_DIV32 | AV_SPARC_V8PLUS);
+
+	mdesc_caps = mdesc_cpu_hwcap_list();
+	if (!mdesc_caps) {
+		if (tlb_type == spitfire)
+			cap |= AV_SPARC_VIS;
+		if (tlb_type == cheetah || tlb_type == cheetah_plus)
+			cap |= AV_SPARC_VIS | AV_SPARC_VIS2;
+		if (tlb_type == cheetah_plus)
+			cap |= AV_SPARC_POPC;
+		if (tlb_type == hypervisor) {
+			if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1)
+				cap |= AV_SPARC_ASI_BLK_INIT;
+			if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
+			    sun4v_chip_type == SUN4V_CHIP_NIAGARA3)
+				cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
+					AV_SPARC_ASI_BLK_INIT |
+					AV_SPARC_POPC);
+			if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3)
+				cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
+					AV_SPARC_FMAF);
+		}
+	}
+	sparc64_elf_hwcap = cap | mdesc_caps;
+
+	report_hwcaps(sparc64_elf_hwcap);
+
+	if (sparc64_elf_hwcap & AV_SPARC_POPC)
+		popc_patch();
+}
+
 void __init setup_arch(char **cmdline_p)
 void __init setup_arch(char **cmdline_p)
 {
 {
 	/* Initialize PROM console and command line. */
 	/* Initialize PROM console and command line. */
@@ -337,6 +522,7 @@ void __init setup_arch(char **cmdline_p)
 	init_cur_cpu_trap(current_thread_info());
 	init_cur_cpu_trap(current_thread_info());
 
 
 	paging_init();
 	paging_init();
+	init_sparc64_elf_hwcap();
 }
 }
 
 
 extern int stop_a_enabled;
 extern int stop_a_enabled;

+ 11 - 0
arch/sparc/kernel/sparc_ksyms_64.c

@@ -8,6 +8,7 @@
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/pci.h>
 #include <linux/init.h>
 #include <linux/init.h>
+#include <linux/bitops.h>
 
 
 #include <asm/system.h>
 #include <asm/system.h>
 #include <asm/cpudata.h>
 #include <asm/cpudata.h>
@@ -38,5 +39,15 @@ EXPORT_SYMBOL(sun4v_niagara_setperf);
 EXPORT_SYMBOL(sun4v_niagara2_getperf);
 EXPORT_SYMBOL(sun4v_niagara2_getperf);
 EXPORT_SYMBOL(sun4v_niagara2_setperf);
 EXPORT_SYMBOL(sun4v_niagara2_setperf);
 
 
+/* from hweight.S */
+EXPORT_SYMBOL(__arch_hweight8);
+EXPORT_SYMBOL(__arch_hweight16);
+EXPORT_SYMBOL(__arch_hweight32);
+EXPORT_SYMBOL(__arch_hweight64);
+
+/* from ffs_ffz.S */
+EXPORT_SYMBOL(ffs);
+EXPORT_SYMBOL(__ffs);
+
 /* Exporting a symbol from /init/main.c */
 /* Exporting a symbol from /init/main.c */
 EXPORT_SYMBOL(saved_command_line);
 EXPORT_SYMBOL(saved_command_line);

+ 2 - 7
arch/sparc/kernel/sstate.c

@@ -14,14 +14,9 @@
 #include <asm/head.h>
 #include <asm/head.h>
 #include <asm/io.h>
 #include <asm/io.h>
 
 
-static int hv_supports_soft_state;
-
-static unsigned long kimage_addr_to_ra(const char *p)
-{
-	unsigned long val = (unsigned long) p;
+#include "kernel.h"
 
 
-	return kern_base + (val - KERNBASE);
-}
+static int hv_supports_soft_state;
 
 
 static void do_set_sstate(unsigned long state, const char *msg)
 static void do_set_sstate(unsigned long state, const char *msg)
 {
 {

+ 4 - 11
arch/sparc/kernel/unaligned_64.c

@@ -22,6 +22,7 @@
 #include <linux/bitops.h>
 #include <linux/bitops.h>
 #include <linux/perf_event.h>
 #include <linux/perf_event.h>
 #include <linux/ratelimit.h>
 #include <linux/ratelimit.h>
+#include <linux/bitops.h>
 #include <asm/fpumacro.h>
 #include <asm/fpumacro.h>
 
 
 enum direction {
 enum direction {
@@ -373,16 +374,11 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
 	}
 	}
 }
 }
 
 
-static char popc_helper[] = {
-0, 1, 1, 2, 1, 2, 2, 3,
-1, 2, 2, 3, 2, 3, 3, 4, 
-};
-
 int handle_popc(u32 insn, struct pt_regs *regs)
 int handle_popc(u32 insn, struct pt_regs *regs)
 {
 {
-	u64 value;
-	int ret, i, rd = ((insn >> 25) & 0x1f);
 	int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
 	int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
+	int ret, rd = ((insn >> 25) & 0x1f);
+	u64 value;
 	                        
 	                        
 	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
 	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
 	if (insn & 0x2000) {
 	if (insn & 0x2000) {
@@ -392,10 +388,7 @@ int handle_popc(u32 insn, struct pt_regs *regs)
 		maybe_flush_windows(0, insn & 0x1f, rd, from_kernel);
 		maybe_flush_windows(0, insn & 0x1f, rd, from_kernel);
 		value = fetch_reg(insn & 0x1f, regs);
 		value = fetch_reg(insn & 0x1f, regs);
 	}
 	}
-	for (ret = 0, i = 0; i < 16; i++) {
-		ret += popc_helper[value & 0xf];
-		value >>= 4;
-	}
+	ret = hweight64(value);
 	if (rd < 16) {
 	if (rd < 16) {
 		if (rd)
 		if (rd)
 			regs->u_regs[rd] = ret;
 			regs->u_regs[rd] = ret;

+ 20 - 1
arch/sparc/kernel/vmlinux.lds.S

@@ -107,7 +107,26 @@ SECTIONS
 		*(.sun4v_2insn_patch)
 		*(.sun4v_2insn_patch)
 		__sun4v_2insn_patch_end = .;
 		__sun4v_2insn_patch_end = .;
 	}
 	}
-
+	.swapper_tsb_phys_patch : {
+		__swapper_tsb_phys_patch = .;
+		*(.swapper_tsb_phys_patch)
+		__swapper_tsb_phys_patch_end = .;
+	}
+	.swapper_4m_tsb_phys_patch : {
+		__swapper_4m_tsb_phys_patch = .;
+		*(.swapper_4m_tsb_phys_patch)
+		__swapper_4m_tsb_phys_patch_end = .;
+	}
+	.popc_3insn_patch : {
+		__popc_3insn_patch = .;
+		*(.popc_3insn_patch)
+		__popc_3insn_patch_end = .;
+	}
+	.popc_6insn_patch : {
+		__popc_6insn_patch = .;
+		*(.popc_6insn_patch)
+		__popc_6insn_patch_end = .;
+	}
 	PERCPU_SECTION(SMP_CACHE_BYTES)
 	PERCPU_SECTION(SMP_CACHE_BYTES)
 
 
 	. = ALIGN(PAGE_SIZE);
 	. = ALIGN(PAGE_SIZE);

+ 2 - 2
arch/sparc/lib/Makefile

@@ -31,13 +31,13 @@ lib-$(CONFIG_SPARC64) += NGmemcpy.o NGcopy_from_user.o NGcopy_to_user.o
 lib-$(CONFIG_SPARC64) += NGpatch.o NGpage.o NGbzero.o
 lib-$(CONFIG_SPARC64) += NGpatch.o NGpage.o NGbzero.o
 
 
 lib-$(CONFIG_SPARC64) += NG2memcpy.o NG2copy_from_user.o NG2copy_to_user.o
 lib-$(CONFIG_SPARC64) += NG2memcpy.o NG2copy_from_user.o NG2copy_to_user.o
-lib-$(CONFIG_SPARC64) +=  NG2patch.o NG2page.o
+lib-$(CONFIG_SPARC64) +=  NG2patch.o
 
 
 lib-$(CONFIG_SPARC64) += GENmemcpy.o GENcopy_from_user.o GENcopy_to_user.o
 lib-$(CONFIG_SPARC64) += GENmemcpy.o GENcopy_from_user.o GENcopy_to_user.o
 lib-$(CONFIG_SPARC64) += GENpatch.o GENpage.o GENbzero.o
 lib-$(CONFIG_SPARC64) += GENpatch.o GENpage.o GENbzero.o
 
 
 lib-$(CONFIG_SPARC64) += copy_in_user.o user_fixup.o memmove.o
 lib-$(CONFIG_SPARC64) += copy_in_user.o user_fixup.o memmove.o
-lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o
+lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o hweight.o ffs.o
 
 
 obj-y                 += iomap.o
 obj-y                 += iomap.o
 obj-$(CONFIG_SPARC32) += atomic32.o
 obj-$(CONFIG_SPARC32) += atomic32.o

+ 0 - 61
arch/sparc/lib/NG2page.S

@@ -1,61 +0,0 @@
-/* NG2page.S: Niagara-2 optimized clear and copy page.
- *
- * Copyright (C) 2007 (davem@davemloft.net)
- */
-
-#include <asm/asi.h>
-#include <asm/page.h>
-#include <asm/visasm.h>
-
-	.text
-	.align	32
-
-	/* This is heavily simplified from the sun4u variants
-	 * because Niagara-2 does not have any D-cache aliasing issues.
-	 */
-NG2copy_user_page:	/* %o0=dest, %o1=src, %o2=vaddr */
-	prefetch	[%o1 + 0x00], #one_read
-	prefetch	[%o1 + 0x40], #one_read
-	VISEntryHalf
-	set		PAGE_SIZE, %g7
-	sub		%o0, %o1, %g3
-1:	stxa		%g0, [%o1 + %g3] ASI_BLK_INIT_QUAD_LDD_P
-	subcc		%g7, 64, %g7
-	ldda		[%o1] ASI_BLK_P, %f0
-	stda		%f0, [%o1 + %g3] ASI_BLK_P
-	add		%o1, 64, %o1
-	bne,pt		%xcc, 1b
-	 prefetch	[%o1 + 0x40], #one_read
-	membar		#Sync
-	VISExitHalf
-	retl
-	 nop
-
-#define BRANCH_ALWAYS	0x10680000
-#define NOP		0x01000000
-#define NG_DO_PATCH(OLD, NEW)	\
-	sethi	%hi(NEW), %g1; \
-	or	%g1, %lo(NEW), %g1; \
-	sethi	%hi(OLD), %g2; \
-	or	%g2, %lo(OLD), %g2; \
-	sub	%g1, %g2, %g1; \
-	sethi	%hi(BRANCH_ALWAYS), %g3; \
-	sll	%g1, 11, %g1; \
-	srl	%g1, 11 + 2, %g1; \
-	or	%g3, %lo(BRANCH_ALWAYS), %g3; \
-	or	%g3, %g1, %g3; \
-	stw	%g3, [%g2]; \
-	sethi	%hi(NOP), %g3; \
-	or	%g3, %lo(NOP), %g3; \
-	stw	%g3, [%g2 + 0x4]; \
-	flush	%g2;
-
-	.globl	niagara2_patch_pageops
-	.type	niagara2_patch_pageops,#function
-niagara2_patch_pageops:
-	NG_DO_PATCH(copy_user_page, NG2copy_user_page)
-	NG_DO_PATCH(_clear_page, NGclear_page)
-	NG_DO_PATCH(clear_user_page, NGclear_user_page)
-	retl
-	 nop
-	.size	niagara2_patch_pageops,.-niagara2_patch_pageops

+ 75 - 39
arch/sparc/lib/NGpage.S

@@ -16,55 +16,91 @@
 	 */
 	 */
 
 
 NGcopy_user_page:	/* %o0=dest, %o1=src, %o2=vaddr */
 NGcopy_user_page:	/* %o0=dest, %o1=src, %o2=vaddr */
-	prefetch	[%o1 + 0x00], #one_read
-	mov		8, %g1
-	mov		16, %g2
-	mov		24, %g3
+	save		%sp, -192, %sp
+	rd		%asi, %g3
+	wr		%g0, ASI_BLK_INIT_QUAD_LDD_P, %asi
 	set		PAGE_SIZE, %g7
 	set		PAGE_SIZE, %g7
+	prefetch	[%i1 + 0x00], #one_read
+	prefetch	[%i1 + 0x40], #one_read
 
 
-1:	ldda		[%o1 + %g0] ASI_BLK_INIT_QUAD_LDD_P, %o2
-	ldda		[%o1 + %g2] ASI_BLK_INIT_QUAD_LDD_P, %o4
-	prefetch	[%o1 + 0x40], #one_read
-	add		%o1, 32, %o1
-	stxa		%o2, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P
-	stxa		%o3, [%o0 + %g1] ASI_BLK_INIT_QUAD_LDD_P
-	ldda		[%o1 + %g0] ASI_BLK_INIT_QUAD_LDD_P, %o2
-	stxa		%o4, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P
-	stxa		%o5, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P
-	ldda		[%o1 + %g2] ASI_BLK_INIT_QUAD_LDD_P, %o4
-	add		%o1, 32, %o1
-	add		%o0, 32, %o0
-	stxa		%o2, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P
-	stxa		%o3, [%o0 + %g1] ASI_BLK_INIT_QUAD_LDD_P
-	stxa		%o4, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P
-	stxa		%o5, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P
-	subcc		%g7, 64, %g7
+1:	prefetch	[%i1 + 0x80], #one_read
+	prefetch	[%i1 + 0xc0], #one_read
+	ldda		[%i1 + 0x00] %asi, %o2
+	ldda		[%i1 + 0x10] %asi, %o4
+	ldda		[%i1 + 0x20] %asi, %l2
+	ldda		[%i1 + 0x30] %asi, %l4
+	stxa		%o2, [%i0 + 0x00] %asi
+	stxa		%o3, [%i0 + 0x08] %asi
+	stxa		%o4, [%i0 + 0x10] %asi
+	stxa		%o5, [%i0 + 0x18] %asi
+	stxa		%l2, [%i0 + 0x20] %asi
+	stxa		%l3, [%i0 + 0x28] %asi
+	stxa		%l4, [%i0 + 0x30] %asi
+	stxa		%l5, [%i0 + 0x38] %asi
+	ldda		[%i1 + 0x40] %asi, %o2
+	ldda		[%i1 + 0x50] %asi, %o4
+	ldda		[%i1 + 0x60] %asi, %l2
+	ldda		[%i1 + 0x70] %asi, %l4
+	stxa		%o2, [%i0 + 0x40] %asi
+	stxa		%o3, [%i0 + 0x48] %asi
+	stxa		%o4, [%i0 + 0x50] %asi
+	stxa		%o5, [%i0 + 0x58] %asi
+	stxa		%l2, [%i0 + 0x60] %asi
+	stxa		%l3, [%i0 + 0x68] %asi
+	stxa		%l4, [%i0 + 0x70] %asi
+	stxa		%l5, [%i0 + 0x78] %asi
+	add		%i1, 128, %i1
+	subcc		%g7, 128, %g7
 	bne,pt		%xcc, 1b
 	bne,pt		%xcc, 1b
-	 add		%o0, 32, %o0
+	 add		%i0, 128, %i0
+	wr		%g3, 0x0, %asi
 	membar		#Sync
 	membar		#Sync
-	retl
-	 nop
+	ret
+	 restore
 
 
-	.globl		NGclear_page, NGclear_user_page
+	.align		32
 NGclear_page:		/* %o0=dest */
 NGclear_page:		/* %o0=dest */
 NGclear_user_page:	/* %o0=dest, %o1=vaddr */
 NGclear_user_page:	/* %o0=dest, %o1=vaddr */
-	mov		8, %g1
-	mov		16, %g2
-	mov		24, %g3
+	rd		%asi, %g3
+	wr		%g0, ASI_BLK_INIT_QUAD_LDD_P, %asi
 	set		PAGE_SIZE, %g7
 	set		PAGE_SIZE, %g7
 
 
-1:	stxa		%g0, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P
-	stxa		%g0, [%o0 + %g1] ASI_BLK_INIT_QUAD_LDD_P
-	stxa		%g0, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P
-	stxa		%g0, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P
-	add		%o0, 32, %o0
-	stxa		%g0, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P
-	stxa		%g0, [%o0 + %g1] ASI_BLK_INIT_QUAD_LDD_P
-	stxa		%g0, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P
-	stxa		%g0, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P
-	subcc		%g7, 64, %g7
+1:	stxa		%g0, [%o0 + 0x00] %asi
+	stxa		%g0, [%o0 + 0x08] %asi
+	stxa		%g0, [%o0 + 0x10] %asi
+	stxa		%g0, [%o0 + 0x18] %asi
+	stxa		%g0, [%o0 + 0x20] %asi
+	stxa		%g0, [%o0 + 0x28] %asi
+	stxa		%g0, [%o0 + 0x30] %asi
+	stxa		%g0, [%o0 + 0x38] %asi
+	stxa		%g0, [%o0 + 0x40] %asi
+	stxa		%g0, [%o0 + 0x48] %asi
+	stxa		%g0, [%o0 + 0x50] %asi
+	stxa		%g0, [%o0 + 0x58] %asi
+	stxa		%g0, [%o0 + 0x60] %asi
+	stxa		%g0, [%o0 + 0x68] %asi
+	stxa		%g0, [%o0 + 0x70] %asi
+	stxa		%g0, [%o0 + 0x78] %asi
+	stxa		%g0, [%o0 + 0x80] %asi
+	stxa		%g0, [%o0 + 0x88] %asi
+	stxa		%g0, [%o0 + 0x90] %asi
+	stxa		%g0, [%o0 + 0x98] %asi
+	stxa		%g0, [%o0 + 0xa0] %asi
+	stxa		%g0, [%o0 + 0xa8] %asi
+	stxa		%g0, [%o0 + 0xb0] %asi
+	stxa		%g0, [%o0 + 0xb8] %asi
+	stxa		%g0, [%o0 + 0xc0] %asi
+	stxa		%g0, [%o0 + 0xc8] %asi
+	stxa		%g0, [%o0 + 0xd0] %asi
+	stxa		%g0, [%o0 + 0xd8] %asi
+	stxa		%g0, [%o0 + 0xe0] %asi
+	stxa		%g0, [%o0 + 0xe8] %asi
+	stxa		%g0, [%o0 + 0xf0] %asi
+	stxa		%g0, [%o0 + 0xf8] %asi
+	subcc		%g7, 256, %g7
 	bne,pt		%xcc, 1b
 	bne,pt		%xcc, 1b
-	 add		%o0, 32, %o0
+	 add		%o0, 256, %o0
+	wr		%g3, 0x0, %asi
 	membar		#Sync
 	membar		#Sync
 	retl
 	retl
 	 nop
 	 nop

+ 1 - 1
arch/sparc/lib/atomic32.c

@@ -65,7 +65,7 @@ int __atomic_add_unless(atomic_t *v, int a, int u)
 	if (ret != u)
 	if (ret != u)
 		v->counter += a;
 		v->counter += a;
 	spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
 	spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
-	return ret != u;
+	return ret;
 }
 }
 EXPORT_SYMBOL(__atomic_add_unless);
 EXPORT_SYMBOL(__atomic_add_unless);
 
 

+ 84 - 0
arch/sparc/lib/ffs.S

@@ -0,0 +1,84 @@
+#include <linux/linkage.h>
+
+	.register	%g2,#scratch
+
+	.text
+	.align	32
+
+ENTRY(ffs)
+	brnz,pt	%o0, 1f
+	 mov	1, %o1
+	retl
+	 clr	%o0
+	nop
+	nop
+ENTRY(__ffs)
+	sllx	%o0, 32, %g1		/* 1  */
+	srlx	%o0, 32, %g2
+
+	clr	%o1			/* 2  */
+	movrz	%g1, %g2, %o0
+
+	movrz	%g1, 32, %o1		/* 3  */
+1:	clr	%o2
+
+	sllx	%o0, (64 - 16), %g1	/* 4  */
+	srlx	%o0, 16, %g2
+
+	movrz	%g1, %g2, %o0		/* 5  */
+	clr	%o3
+
+	movrz	%g1, 16, %o2		/* 6  */
+	clr	%o4
+
+	and	%o0, 0xff, %g1		/* 7  */
+	srlx	%o0, 8, %g2
+
+	movrz	%g1, %g2, %o0		/* 8  */
+	clr	%o5
+
+	movrz	%g1, 8, %o3		/* 9  */
+	add	%o2, %o1, %o2
+
+	and	%o0, 0xf, %g1		/* 10 */
+	srlx	%o0, 4, %g2
+
+	movrz	%g1, %g2, %o0		/* 11 */
+	add	%o2, %o3, %o2
+
+	movrz	%g1, 4, %o4		/* 12 */
+
+	and	%o0, 0x3, %g1		/* 13 */
+	srlx	%o0, 2, %g2
+
+	movrz	%g1, %g2, %o0		/* 14 */
+	add	%o2, %o4, %o2
+
+	movrz	%g1, 2, %o5		/* 15 */
+
+	and	%o0, 0x1, %g1		/* 16 */
+
+	add	%o2, %o5, %o2		/* 17 */
+	xor	%g1, 0x1, %g1
+
+	retl				/* 18 */
+	 add	%o2, %g1, %o0
+ENDPROC(ffs)
+ENDPROC(__ffs)
+
+	.section	.popc_6insn_patch, "ax"
+	.word		ffs
+	brz,pn	%o0, 98f
+	 neg	%o0, %g1
+	xnor	%o0, %g1, %o1
+	popc	%o1, %o0
+98:	retl
+	 nop
+	.word		__ffs
+	neg	%o0, %g1
+	xnor	%o0, %g1, %o1
+	popc	%o1, %o0
+	retl
+	 sub	%o0, 1, %o0
+	nop
+	.previous

+ 51 - 0
arch/sparc/lib/hweight.S

@@ -0,0 +1,51 @@
+#include <linux/linkage.h>
+
+	.text
+	.align	32
+ENTRY(__arch_hweight8)
+	ba,pt	%xcc, __sw_hweight8
+	 nop
+	nop
+ENDPROC(__arch_hweight8)
+	.section	.popc_3insn_patch, "ax"
+	.word		__arch_hweight8
+	sllx		%o0, 64-8, %g1
+	retl
+	 popc		%g1, %o0
+	.previous
+
+ENTRY(__arch_hweight16)
+	ba,pt	%xcc, __sw_hweight16
+	 nop
+	nop
+ENDPROC(__arch_hweight16)
+	.section	.popc_3insn_patch, "ax"
+	.word		__arch_hweight16
+	sllx		%o0, 64-16, %g1
+	retl
+	 popc		%g1, %o0
+	.previous
+
+ENTRY(__arch_hweight32)
+	ba,pt	%xcc, __sw_hweight32
+	 nop
+	nop
+ENDPROC(__arch_hweight32)
+	.section	.popc_3insn_patch, "ax"
+	.word		__arch_hweight32
+	sllx		%o0, 64-32, %g1
+	retl
+	 popc		%g1, %o0
+	.previous
+
+ENTRY(__arch_hweight64)
+	ba,pt	%xcc, __sw_hweight64
+	 nop
+	nop
+ENDPROC(__arch_hweight64)
+	.section	.popc_3insn_patch, "ax"
+	.word		__arch_hweight64
+	retl
+	 popc		%o0, %o0
+	nop
+	.previous

+ 41 - 1
arch/sparc/mm/init_64.c

@@ -1597,6 +1597,44 @@ static void __init tsb_phys_patch(void)
 static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
 static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
 
 
+static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
+{
+	pa >>= KTSB_PHYS_SHIFT;
+
+	while (start < end) {
+		unsigned int *ia = (unsigned int *)(unsigned long)*start;
+
+		ia[0] = (ia[0] & ~0x3fffff) | (pa >> 10);
+		__asm__ __volatile__("flush	%0" : : "r" (ia));
+
+		ia[1] = (ia[1] & ~0x3ff) | (pa & 0x3ff);
+		__asm__ __volatile__("flush	%0" : : "r" (ia + 1));
+
+		start++;
+	}
+}
+
+static void ktsb_phys_patch(void)
+{
+	extern unsigned int __swapper_tsb_phys_patch;
+	extern unsigned int __swapper_tsb_phys_patch_end;
+	unsigned long ktsb_pa;
+
+	ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
+	patch_one_ktsb_phys(&__swapper_tsb_phys_patch,
+			    &__swapper_tsb_phys_patch_end, ktsb_pa);
+#ifndef CONFIG_DEBUG_PAGEALLOC
+	{
+	extern unsigned int __swapper_4m_tsb_phys_patch;
+	extern unsigned int __swapper_4m_tsb_phys_patch_end;
+	ktsb_pa = (kern_base +
+		   ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
+	patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch,
+			    &__swapper_4m_tsb_phys_patch_end, ktsb_pa);
+	}
+#endif
+}
+
 static void __init sun4v_ktsb_init(void)
 static void __init sun4v_ktsb_init(void)
 {
 {
 	unsigned long ktsb_pa;
 	unsigned long ktsb_pa;
@@ -1716,8 +1754,10 @@ void __init paging_init(void)
 		sun4u_pgprot_init();
 		sun4u_pgprot_init();
 
 
 	if (tlb_type == cheetah_plus ||
 	if (tlb_type == cheetah_plus ||
-	    tlb_type == hypervisor)
+	    tlb_type == hypervisor) {
 		tsb_phys_patch();
 		tsb_phys_patch();
+		ktsb_phys_patch();
+	}
 
 
 	if (tlb_type == hypervisor) {
 	if (tlb_type == hypervisor) {
 		sun4v_patch_tlb_handlers();
 		sun4v_patch_tlb_handlers();

+ 1 - 0
arch/tile/Kconfig

@@ -12,6 +12,7 @@ config TILE
 	select GENERIC_PENDING_IRQ if SMP
 	select GENERIC_PENDING_IRQ if SMP
 	select GENERIC_IRQ_SHOW
 	select GENERIC_IRQ_SHOW
 	select SYS_HYPERVISOR
 	select SYS_HYPERVISOR
+	select ARCH_HAVE_NMI_SAFE_CMPXCHG if !M386
 
 
 # FIXME: investigate whether we need/want these options.
 # FIXME: investigate whether we need/want these options.
 #	select HAVE_IOREMAP_PROT
 #	select HAVE_IOREMAP_PROT

+ 38 - 0
arch/tile/include/asm/Kbuild

@@ -2,3 +2,41 @@ include include/asm-generic/Kbuild.asm
 
 
 header-y += ucontext.h
 header-y += ucontext.h
 header-y += hardwall.h
 header-y += hardwall.h
+
+generic-y += bug.h
+generic-y += bugs.h
+generic-y += cputime.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += fb.h
+generic-y += fcntl.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipc.h
+generic-y += ipcbuf.h
+generic-y += irq_regs.h
+generic-y += kdebug.h
+generic-y += local.h
+generic-y += module.h
+generic-y += msgbuf.h
+generic-y += mutex.h
+generic-y += param.h
+generic-y += parport.h
+generic-y += poll.h
+generic-y += posix_types.h
+generic-y += resource.h
+generic-y += scatterlist.h
+generic-y += sembuf.h
+generic-y += serial.h
+generic-y += shmbuf.h
+generic-y += shmparam.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += statfs.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += types.h
+generic-y += ucontext.h
+generic-y += xor.h

+ 0 - 1
arch/tile/include/asm/bug.h

@@ -1 +0,0 @@
-#include <asm-generic/bug.h>

+ 0 - 1
arch/tile/include/asm/bugs.h

@@ -1 +0,0 @@
-#include <asm-generic/bugs.h>

+ 0 - 1
arch/tile/include/asm/cputime.h

@@ -1 +0,0 @@
-#include <asm-generic/cputime.h>

+ 0 - 1
arch/tile/include/asm/device.h

@@ -1 +0,0 @@
-#include <asm-generic/device.h>

+ 0 - 1
arch/tile/include/asm/div64.h

@@ -1 +0,0 @@
-#include <asm-generic/div64.h>

+ 0 - 1
arch/tile/include/asm/emergency-restart.h

@@ -1 +0,0 @@
-#include <asm-generic/emergency-restart.h>

+ 0 - 1
arch/tile/include/asm/errno.h

@@ -1 +0,0 @@
-#include <asm-generic/errno.h>

部分文件因为文件数量过多而无法显示