Browse Source

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Conflicts:
	drivers/net/ethernet/intel/e1000e/netdev.c

Minor conflict in e1000e, a line that got fixed in 'net'
has been removed in 'net-next'.

Signed-off-by: David S. Miller <davem@davemloft.net>
David S. Miller 12 years ago
parent
commit
e5f2ef7ab4
100 changed files with 869 additions and 469 deletions
  1. 3 3
      CREDITS
  2. 1 2
      Documentation/SubmittingPatches
  3. 1 1
      Documentation/hwmon/adm1275
  4. 10 1
      Documentation/hwmon/adt7410
  5. 1 1
      Documentation/hwmon/jc42
  6. 1 1
      Documentation/hwmon/lineage-pem
  7. 1 1
      Documentation/hwmon/lm25066
  8. 3 3
      Documentation/hwmon/ltc2978
  9. 1 1
      Documentation/hwmon/ltc4261
  10. 1 1
      Documentation/hwmon/max16064
  11. 1 1
      Documentation/hwmon/max16065
  12. 1 1
      Documentation/hwmon/max34440
  13. 1 1
      Documentation/hwmon/max8688
  14. 1 1
      Documentation/hwmon/pmbus
  15. 1 1
      Documentation/hwmon/smm665
  16. 1 1
      Documentation/hwmon/ucd9000
  17. 1 1
      Documentation/hwmon/ucd9200
  18. 1 1
      Documentation/hwmon/zl6100
  19. 60 7
      Documentation/input/alps.txt
  20. 77 0
      Documentation/networking/tuntap.txt
  21. 20 5
      Documentation/power/opp.txt
  22. 1 1
      Documentation/printk-formats.txt
  23. 1 1
      Documentation/trace/ftrace.txt
  24. 2 0
      MAINTAINERS
  25. 1 1
      Makefile
  26. 1 0
      arch/alpha/boot/head.S
  27. 1 1
      arch/arm/boot/compressed/Makefile
  28. 4 4
      arch/arm/include/asm/mmu.h
  29. 1 1
      arch/arm/include/asm/mmu_context.h
  30. 28 6
      arch/arm/include/asm/tlbflush.h
  31. 1 1
      arch/arm/include/uapi/asm/unistd.h
  32. 1 1
      arch/arm/kernel/asm-offsets.c
  33. 1 1
      arch/arm/kernel/calls.S
  34. 22 4
      arch/arm/kernel/head.S
  35. 1 1
      arch/arm/kernel/hw_breakpoint.c
  36. 2 2
      arch/arm/kernel/perf_event.c
  37. 1 1
      arch/arm/kernel/perf_event_v7.c
  38. 1 0
      arch/arm/kernel/smp.c
  39. 12 0
      arch/arm/kernel/smp_tlb.c
  40. 4 0
      arch/arm/kernel/smp_twd.c
  41. 1 0
      arch/arm/kernel/suspend.c
  42. 44 41
      arch/arm/lib/memset.S
  43. 1 1
      arch/arm/mach-netx/generic.c
  44. 32 32
      arch/arm/mach-netx/include/mach/irqs.h
  45. 18 11
      arch/arm/mm/context.c
  46. 1 0
      arch/arm/mm/idmap.c
  47. 1 1
      arch/arm/mm/proc-v7-3level.S
  48. 1 0
      arch/ia64/kernel/perfmon.c
  49. 0 3
      arch/metag/include/asm/elf.h
  50. 1 0
      arch/metag/mm/Kconfig
  51. 1 0
      arch/powerpc/platforms/cell/spufs/inode.c
  52. 1 0
      arch/s390/hypfs/inode.c
  53. 1 0
      arch/s390/include/asm/cpu_mf.h
  54. 3 0
      arch/tile/include/asm/compat.h
  55. 29 13
      arch/tile/kernel/compat.c
  56. 1 1
      arch/um/drivers/chan.h
  57. 2 2
      arch/um/drivers/chan_kern.c
  58. 6 6
      arch/um/drivers/chan_user.c
  59. 3 3
      arch/um/drivers/chan_user.h
  60. 24 18
      arch/um/drivers/line.c
  61. 2 0
      arch/um/drivers/net_kern.c
  62. 0 1
      arch/um/drivers/ssl.c
  63. 0 1
      arch/um/drivers/stdio_console.c
  64. 1 1
      arch/um/os-Linux/signal.c
  65. 2 0
      arch/um/os-Linux/start_up.c
  66. 18 2
      arch/x86/include/asm/bootparam_utils.h
  67. 8 2
      arch/x86/kernel/setup.c
  68. 1 2
      arch/x86/kernel/smpboot.c
  69. 2 3
      arch/x86/mm/init.c
  70. 7 0
      arch/x86/mm/pat.c
  71. 9 46
      drivers/acpi/glue.c
  72. 1 2
      drivers/acpi/processor_core.c
  73. 1 1
      drivers/acpi/processor_driver.c
  74. 11 5
      drivers/acpi/sleep.c
  75. 1 6
      drivers/ata/libata-acpi.c
  76. 0 2
      drivers/base/power/main.c
  77. 2 6
      drivers/base/power/power.h
  78. 123 94
      drivers/base/power/qos.c
  79. 1 0
      drivers/base/power/sysfs.c
  80. 1 0
      drivers/base/regmap/regmap-irq.c
  81. 8 4
      drivers/char/random.c
  82. 1 1
      drivers/cpufreq/cpufreq_governor.h
  83. 1 7
      drivers/cpufreq/highbank-cpufreq.c
  84. 14 28
      drivers/cpufreq/intel_pstate.c
  85. 2 3
      drivers/firmware/dmi_scan.c
  86. 97 34
      drivers/firmware/efivars.c
  87. 18 7
      drivers/gpu/drm/i915/i915_drv.c
  88. 24 2
      drivers/gpu/drm/i915/i915_irq.c
  89. 2 2
      drivers/gpu/drm/i915/i915_reg.h
  90. 1 1
      drivers/gpu/drm/i915/intel_crt.c
  91. 1 1
      drivers/gpu/drm/i915/intel_ddi.c
  92. 30 7
      drivers/gpu/drm/i915/intel_display.c
  93. 2 1
      drivers/gpu/drm/i915/intel_dp.c
  94. 1 1
      drivers/gpu/drm/i915/intel_pm.c
  95. 0 1
      drivers/gpu/drm/mgag200/mgag200_drv.h
  96. 1 0
      drivers/gpu/drm/mgag200/mgag200_i2c.c
  97. 27 0
      drivers/gpu/drm/mgag200/mgag200_mode.c
  98. 1 1
      drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
  99. 1 1
      drivers/gpu/drm/nouveau/core/subdev/bios/init.c
  100. 1 0
      drivers/gpu/drm/nouveau/core/subdev/i2c/base.c

+ 3 - 3
CREDITS

@@ -953,11 +953,11 @@ S: Blacksburg, Virginia 24061
 S: USA
 
 N: Randy Dunlap
-E: rdunlap@xenotime.net
-W: http://www.xenotime.net/linux/linux.html
-W: http://www.linux-usb.org
+E: rdunlap@infradead.org
+W: http://www.infradead.org/~rdunlap/
 D: Linux-USB subsystem, USB core/UHCI/printer/storage drivers
 D: x86 SMP, ACPI, bootflag hacking
+D: documentation, builds
 S: (ask for current address)
 S: USA
 

+ 1 - 2
Documentation/SubmittingPatches

@@ -60,8 +60,7 @@ own source tree.  For example:
 "dontdiff" is a list of files which are generated by the kernel during
 the build process, and should be ignored in any diff(1)-generated
 patch.  The "dontdiff" file is included in the kernel tree in
-2.6.12 and later.  For earlier kernel versions, you can get it
-from <http://www.xenotime.net/linux/doc/dontdiff>.
+2.6.12 and later.
 
 Make sure your patch does not include any extra files which do not
 belong in a patch submission.  Make sure to review your patch -after-

+ 1 - 1
Documentation/hwmon/adm1275

@@ -15,7 +15,7 @@ Supported chips:
     Addresses scanned: -
     Datasheet: www.analog.com/static/imported-files/data_sheets/ADM1276.pdf
 
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
 
 
 Description

+ 10 - 1
Documentation/hwmon/adt7410

@@ -4,9 +4,14 @@ Kernel driver adt7410
 Supported chips:
   * Analog Devices ADT7410
     Prefix: 'adt7410'
-    Addresses scanned: I2C 0x48 - 0x4B
+    Addresses scanned: None
     Datasheet: Publicly available at the Analog Devices website
                http://www.analog.com/static/imported-files/data_sheets/ADT7410.pdf
+  * Analog Devices ADT7420
+    Prefix: 'adt7420'
+    Addresses scanned: None
+    Datasheet: Publicly available at the Analog Devices website
+               http://www.analog.com/static/imported-files/data_sheets/ADT7420.pdf
 
 Author: Hartmut Knaack <knaack.h@gmx.de>
 
@@ -27,6 +32,10 @@ value per second or even justget one sample on demand for power saving.
 Besides, it can completely power down its ADC, if power management is
 required.
 
+The ADT7420 is register compatible, the only differences being the package,
+a slightly narrower operating temperature range (-40°C to +150°C), and a
+better accuracy (0.25°C instead of 0.50°C.)
+
 Configuration Notes
 -------------------
 

+ 1 - 1
Documentation/hwmon/jc42

@@ -49,7 +49,7 @@ Supported chips:
     Addresses scanned: I2C 0x18 - 0x1f
 
 Author:
-	Guenter Roeck <guenter.roeck@ericsson.com>
+	Guenter Roeck <linux@roeck-us.net>
 
 
 Description

+ 1 - 1
Documentation/hwmon/lineage-pem

@@ -8,7 +8,7 @@ Supported devices:
     Documentation:
         http://www.lineagepower.com/oem/pdf/CPLI2C.pdf
 
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
 
 
 Description

+ 1 - 1
Documentation/hwmon/lm25066

@@ -19,7 +19,7 @@ Supported chips:
     Datasheet:
 	http://www.national.com/pf/LM/LM5066.html
 
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
 
 
 Description

+ 3 - 3
Documentation/hwmon/ltc2978

@@ -5,13 +5,13 @@ Supported chips:
   * Linear Technology LTC2978
     Prefix: 'ltc2978'
     Addresses scanned: -
-    Datasheet: http://cds.linear.com/docs/Datasheet/2978fa.pdf
+    Datasheet: http://www.linear.com/product/ltc2978
   * Linear Technology LTC3880
     Prefix: 'ltc3880'
     Addresses scanned: -
-    Datasheet: http://cds.linear.com/docs/Datasheet/3880f.pdf
+    Datasheet: http://www.linear.com/product/ltc3880
 
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
 
 
 Description

+ 1 - 1
Documentation/hwmon/ltc4261

@@ -8,7 +8,7 @@ Supported chips:
     Datasheet:
         http://cds.linear.com/docs/Datasheet/42612fb.pdf
 
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
 
 
 Description

+ 1 - 1
Documentation/hwmon/max16064

@@ -7,7 +7,7 @@ Supported chips:
     Addresses scanned: -
     Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX16064.pdf
 
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
 
 
 Description

+ 1 - 1
Documentation/hwmon/max16065

@@ -24,7 +24,7 @@ Supported chips:
 	http://datasheets.maxim-ic.com/en/ds/MAX16070-MAX16071.pdf
 
 
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
 
 
 Description

+ 1 - 1
Documentation/hwmon/max34440

@@ -27,7 +27,7 @@ Supported chips:
     Addresses scanned: -
     Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34461.pdf
 
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
 
 
 Description

+ 1 - 1
Documentation/hwmon/max8688

@@ -7,7 +7,7 @@ Supported chips:
     Addresses scanned: -
     Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX8688.pdf
 
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
 
 
 Description

+ 1 - 1
Documentation/hwmon/pmbus

@@ -34,7 +34,7 @@ Supported chips:
     Addresses scanned: -
     Datasheet: n.a.
 
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
 
 
 Description

+ 1 - 1
Documentation/hwmon/smm665

@@ -29,7 +29,7 @@ Supported chips:
       http://www.summitmicro.com/prod_select/summary/SMM766/SMM766_2086.pdf
       http://www.summitmicro.com/prod_select/summary/SMM766B/SMM766B_2122.pdf
 
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
 
 
 Module Parameters

+ 1 - 1
Documentation/hwmon/ucd9000

@@ -11,7 +11,7 @@ Supported chips:
 	http://focus.ti.com/lit/ds/symlink/ucd9090.pdf
 	http://focus.ti.com/lit/ds/symlink/ucd90910.pdf
 
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
 
 
 Description

+ 1 - 1
Documentation/hwmon/ucd9200

@@ -15,7 +15,7 @@ Supported chips:
 	http://focus.ti.com/lit/ds/symlink/ucd9246.pdf
 	http://focus.ti.com/lit/ds/symlink/ucd9248.pdf
 
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
 
 
 Description

+ 1 - 1
Documentation/hwmon/zl6100

@@ -54,7 +54,7 @@ http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146401
 http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146256
 
 
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
 
 
 Description

+ 60 - 7
Documentation/input/alps.txt

@@ -3,10 +3,26 @@ ALPS Touchpad Protocol
 
 Introduction
 ------------
-
-Currently the ALPS touchpad driver supports four protocol versions in use by
-ALPS touchpads, called versions 1, 2, 3, and 4. Information about the various
-protocol versions is contained in the following sections.
+Currently the ALPS touchpad driver supports five protocol versions in use by
+ALPS touchpads, called versions 1, 2, 3, 4 and 5.
+
+Since roughly mid-2010 several new ALPS touchpads have been released and
+integrated into a variety of laptops and netbooks.  These new touchpads
+have enough behavior differences that the alps_model_data definition
+table, describing the properties of the different versions, is no longer
+adequate.  The design choices were to re-define the alps_model_data
+table, with the risk of regression testing existing devices, or isolate
+the new devices outside of the alps_model_data table.  The latter design
+choice was made.  The new touchpad signatures are named: "Rushmore",
+"Pinnacle", and "Dolphin", which you will see in the alps.c code.
+For the purposes of this document, this group of ALPS touchpads will
+generically be called "new ALPS touchpads".
+
+We experimented with probing the ACPI interface _HID (Hardware ID)/_CID
+(Compatibility ID) definition as a way to uniquely identify the
+different ALPS variants but there did not appear to be a 1:1 mapping.
+In fact, it appeared to be an m:n mapping between the _HID and actual
+hardware type.
 
 Detection
 ---------
@@ -20,9 +36,13 @@ If the E6 report is successful, the touchpad model is identified using the "E7
 report" sequence: E8-E7-E7-E7-E9. The response is the model signature and is
 matched against known models in the alps_model_data_array.
 
-With protocol versions 3 and 4, the E7 report model signature is always
-73-02-64. To differentiate between these versions, the response from the
-"Enter Command Mode" sequence must be inspected as described below.
+For older touchpads supporting protocol versions 3 and 4, the E7 report
+model signature is always 73-02-64. To differentiate between these
+versions, the response from the "Enter Command Mode" sequence must be
+inspected as described below.
+
+The new ALPS touchpads have an E7 signature of 73-03-50 or 73-03-0A but
+seem to be better differentiated by the EC Command Mode response.
 
 Command Mode
 ------------
@@ -47,6 +67,14 @@ address of the register being read, and the third contains the value of the
 register. Registers are written by writing the value one nibble at a time
 using the same encoding used for addresses.
 
+For the new ALPS touchpads, the EC command is used to enter command
+mode. The response in the new ALPS touchpads is significantly different,
+and more important in determining the behavior.  This code has been
+separated from the original alps_model_data table and put in the
+alps_identify function.  For example, there seem to be two hardware init
+sequences for the "Dolphin" touchpads as determined by the second byte
+of the EC response.
+
 Packet Format
 -------------
 
@@ -187,3 +215,28 @@ There are several things worth noting here.
     well.
 
 So far no v4 devices with tracksticks have been encountered.
+
+ALPS Absolute Mode - Protocol Version 5
+---------------------------------------
+This is basically Protocol Version 3 but with different logic for packet
+decode.  It uses the same alps_process_touchpad_packet_v3 call with a
+specialized decode_fields function pointer to correctly interpret the
+packets.  This appears to only be used by the Dolphin devices.
+
+For single-touch, the 6-byte packet format is:
+
+ byte 0:    1    1    0    0    1    0    0    0
+ byte 1:    0   x6   x5   x4   x3   x2   x1   x0
+ byte 2:    0   y6   y5   y4   y3   y2   y1   y0
+ byte 3:    0    M    R    L    1    m    r    l
+ byte 4:   y10  y9   y8   y7  x10   x9   x8   x7
+ byte 5:    0   z6   z5   z4   z3   z2   z1   z0
+
+For mt, the format is:
+
+ byte 0:    1    1    1    n3   1   n2   n1   x24
+ byte 1:    1   y7   y6    y5  y4   y3   y2    y1
+ byte 2:    ?   x2   x1   y12 y11  y10   y9    y8
+ byte 3:    0  x23  x22   x21 x20  x19  x18   x17
+ byte 4:    0   x9   x8    x7  x6   x5   x4    x3
+ byte 5:    0  x16  x15   x14 x13  x12  x11   x10

+ 77 - 0
Documentation/networking/tuntap.txt

@@ -105,6 +105,83 @@ Copyright (C) 1999-2000 Maxim Krasnyansky <max_mk@yahoo.com>
      Proto [2 bytes]
      Raw protocol(IP, IPv6, etc) frame.
 
+  3.3 Multiqueue tuntap interface:
+
+  From version 3.8, Linux supports multiqueue tuntap which can uses multiple
+  file descriptors (queues) to parallelize packets sending or receiving. The
+  device allocation is the same as before, and if user wants to create multiple
+  queues, TUNSETIFF with the same device name must be called many times with
+  IFF_MULTI_QUEUE flag.
+
+  char *dev should be the name of the device, queues is the number of queues to
+  be created, fds is used to store and return the file descriptors (queues)
+  created to the caller. Each file descriptor were served as the interface of a
+  queue which could be accessed by userspace.
+
+  #include <linux/if.h>
+  #include <linux/if_tun.h>
+
+  int tun_alloc_mq(char *dev, int queues, int *fds)
+  {
+      struct ifreq ifr;
+      int fd, err, i;
+
+      if (!dev)
+          return -1;
+
+      memset(&ifr, 0, sizeof(ifr));
+      /* Flags: IFF_TUN   - TUN device (no Ethernet headers)
+       *        IFF_TAP   - TAP device
+       *
+       *        IFF_NO_PI - Do not provide packet information
+       *        IFF_MULTI_QUEUE - Create a queue of multiqueue device
+       */
+      ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_MULTI_QUEUE;
+      strcpy(ifr.ifr_name, dev);
+
+      for (i = 0; i < queues; i++) {
+          if ((fd = open("/dev/net/tun", O_RDWR)) < 0)
+             goto err;
+          err = ioctl(fd, TUNSETIFF, (void *)&ifr);
+          if (err) {
+             close(fd);
+             goto err;
+          }
+          fds[i] = fd;
+      }
+
+      return 0;
+  err:
+      for (--i; i >= 0; i--)
+          close(fds[i]);
+      return err;
+  }
+
+  A new ioctl(TUNSETQUEUE) were introduced to enable or disable a queue. When
+  calling it with IFF_DETACH_QUEUE flag, the queue were disabled. And when
+  calling it with IFF_ATTACH_QUEUE flag, the queue were enabled. The queue were
+  enabled by default after it was created through TUNSETIFF.
+
+  fd is the file descriptor (queue) that we want to enable or disable, when
+  enable is true we enable it, otherwise we disable it
+
+  #include <linux/if.h>
+  #include <linux/if_tun.h>
+
+  int tun_set_queue(int fd, int enable)
+  {
+      struct ifreq ifr;
+
+      memset(&ifr, 0, sizeof(ifr));
+
+      if (enable)
+         ifr.ifr_flags = IFF_ATTACH_QUEUE;
+      else
+         ifr.ifr_flags = IFF_DETACH_QUEUE;
+
+      return ioctl(fd, TUNSETQUEUE, (void *)&ifr);
+  }
+
 Universal TUN/TAP device driver Frequently Asked Question.
    
 1. What platforms are supported by TUN/TAP driver ?

+ 20 - 5
Documentation/power/opp.txt

@@ -1,6 +1,5 @@
-*=============*
-* OPP Library *
-*=============*
+Operating Performance Points (OPP) Library
+==========================================
 
 (C) 2009-2010 Nishanth Menon <nm@ti.com>, Texas Instruments Incorporated
 
@@ -16,15 +15,31 @@ Contents
 
 1. Introduction
 ===============
+1.1 What is an Operating Performance Point (OPP)?
+
 Complex SoCs of today consists of a multiple sub-modules working in conjunction.
 In an operational system executing varied use cases, not all modules in the SoC
 need to function at their highest performing frequency all the time. To
 facilitate this, sub-modules in a SoC are grouped into domains, allowing some
-domains to run at lower voltage and frequency while other domains are loaded
-more. The set of discrete tuples consisting of frequency and voltage pairs that
+domains to run at lower voltage and frequency while other domains run at
+voltage/frequency pairs that are higher.
+
+The set of discrete tuples consisting of frequency and voltage pairs that
 the device will support per domain are called Operating Performance Points or
 OPPs.
 
+As an example:
+Let us consider an MPU device which supports the following:
+{300MHz at minimum voltage of 1V}, {800MHz at minimum voltage of 1.2V},
+{1GHz at minimum voltage of 1.3V}
+
+We can represent these as three OPPs as the following {Hz, uV} tuples:
+{300000000, 1000000}
+{800000000, 1200000}
+{1000000000, 1300000}
+
+1.2 Operating Performance Points Library
+
 OPP library provides a set of helper functions to organize and query the OPP
 information. The library is located in drivers/base/power/opp.c and the header
 is located in include/linux/opp.h. OPP library can be enabled by enabling

+ 1 - 1
Documentation/printk-formats.txt

@@ -170,5 +170,5 @@ Reminder: sizeof() result is of type size_t.
 Thank you for your cooperation and attention.
 
 
-By Randy Dunlap <rdunlap@xenotime.net> and
+By Randy Dunlap <rdunlap@infradead.org> and
 Andrew Murray <amurray@mpc-data.co.uk>

+ 1 - 1
Documentation/trace/ftrace.txt

@@ -1873,7 +1873,7 @@ feature:
 
 	status\input  |     0      |     1      |    else    |
 	--------------+------------+------------+------------+
-	not allocated |(do nothing)| alloc+swap |   EINVAL   |
+	not allocated |(do nothing)| alloc+swap |(do nothing)|
 	--------------+------------+------------+------------+
 	allocated     |    free    |    swap    |   clear    |
 	--------------+------------+------------+------------+

+ 2 - 0
MAINTAINERS

@@ -6412,6 +6412,8 @@ F:	Documentation/networking/LICENSE.qla3xxx
 F:	drivers/net/ethernet/qlogic/qla3xxx.*
 
 QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
+M:	Rajesh Borundia <rajesh.borundia@qlogic.com>
+M:	Shahed Shaikh <shahed.shaikh@qlogic.com>
 M:	Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
 M:	Sony Chacko <sony.chacko@qlogic.com>
 M:	linux-driver@qlogic.com

+ 1 - 1
Makefile

@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 9
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
 NAME = Unicycling Gorilla
 
 # *DOCUMENTATION*

+ 1 - 0
arch/alpha/boot/head.S

@@ -4,6 +4,7 @@
  * initial bootloader stuff..
  */
 
+#include <asm/pal.h>
 
 	.set noreorder
 	.globl	__start

+ 1 - 1
arch/arm/boot/compressed/Makefile

@@ -120,7 +120,7 @@ ORIG_CFLAGS := $(KBUILD_CFLAGS)
 KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
 endif
 
-ccflags-y := -fpic -fno-builtin -I$(obj)
+ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
 asflags-y := -Wa,-march=all -DZIMAGE
 
 # Supply kernel BSS size to the decompressor via a linker symbol.

+ 4 - 4
arch/arm/include/asm/mmu.h

@@ -5,15 +5,15 @@
 
 typedef struct {
 #ifdef CONFIG_CPU_HAS_ASID
-	u64 id;
+	atomic64_t	id;
 #endif
-	unsigned int vmalloc_seq;
+	unsigned int	vmalloc_seq;
 } mm_context_t;
 
 #ifdef CONFIG_CPU_HAS_ASID
 #define ASID_BITS	8
 #define ASID_MASK	((~0ULL) << ASID_BITS)
-#define ASID(mm)	((mm)->context.id & ~ASID_MASK)
+#define ASID(mm)	((mm)->context.id.counter & ~ASID_MASK)
 #else
 #define ASID(mm)	(0)
 #endif
@@ -26,7 +26,7 @@ typedef struct {
  *  modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com>
  */
 typedef struct {
-	unsigned long		end_brk;
+	unsigned long	end_brk;
 } mm_context_t;
 
 #endif

+ 1 - 1
arch/arm/include/asm/mmu_context.h

@@ -25,7 +25,7 @@ void __check_vmalloc_seq(struct mm_struct *mm);
 #ifdef CONFIG_CPU_HAS_ASID
 
 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
-#define init_new_context(tsk,mm)	({ mm->context.id = 0; })
+#define init_new_context(tsk,mm)	({ atomic64_set(&mm->context.id, 0); 0; })
 
 #else	/* !CONFIG_CPU_HAS_ASID */
 

+ 28 - 6
arch/arm/include/asm/tlbflush.h

@@ -34,10 +34,13 @@
 #define TLB_V6_D_ASID	(1 << 17)
 #define TLB_V6_I_ASID	(1 << 18)
 
+#define TLB_V6_BP	(1 << 19)
+
 /* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */
-#define TLB_V7_UIS_PAGE	(1 << 19)
-#define TLB_V7_UIS_FULL (1 << 20)
-#define TLB_V7_UIS_ASID (1 << 21)
+#define TLB_V7_UIS_PAGE	(1 << 20)
+#define TLB_V7_UIS_FULL (1 << 21)
+#define TLB_V7_UIS_ASID (1 << 22)
+#define TLB_V7_UIS_BP	(1 << 23)
 
 #define TLB_BARRIER	(1 << 28)
 #define TLB_L2CLEAN_FR	(1 << 29)		/* Feroceon */
@@ -150,7 +153,8 @@
 #define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
 			 TLB_V6_I_FULL | TLB_V6_D_FULL | \
 			 TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
-			 TLB_V6_I_ASID | TLB_V6_D_ASID)
+			 TLB_V6_I_ASID | TLB_V6_D_ASID | \
+			 TLB_V6_BP)
 
 #ifdef CONFIG_CPU_TLB_V6
 # define v6wbi_possible_flags	v6wbi_tlb_flags
@@ -166,9 +170,11 @@
 #endif
 
 #define v7wbi_tlb_flags_smp	(TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
-			 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID)
+				 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \
+				 TLB_V7_UIS_ASID | TLB_V7_UIS_BP)
 #define v7wbi_tlb_flags_up	(TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
-			 TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID)
+				 TLB_V6_U_FULL | TLB_V6_U_PAGE | \
+				 TLB_V6_U_ASID | TLB_V6_BP)
 
 #ifdef CONFIG_CPU_TLB_V7
 
@@ -430,6 +436,20 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
 	}
 }
 
+static inline void local_flush_bp_all(void)
+{
+	const int zero = 0;
+	const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+	if (tlb_flag(TLB_V7_UIS_BP))
+		asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
+	else if (tlb_flag(TLB_V6_BP))
+		asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero));
+
+	if (tlb_flag(TLB_BARRIER))
+		isb();
+}
+
 /*
  *	flush_pmd_entry
  *
@@ -480,6 +500,7 @@ static inline void clean_pmd_entry(void *pmd)
 #define flush_tlb_kernel_page	local_flush_tlb_kernel_page
 #define flush_tlb_range		local_flush_tlb_range
 #define flush_tlb_kernel_range	local_flush_tlb_kernel_range
+#define flush_bp_all		local_flush_bp_all
 #else
 extern void flush_tlb_all(void);
 extern void flush_tlb_mm(struct mm_struct *mm);
@@ -487,6 +508,7 @@ extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
 extern void flush_tlb_kernel_page(unsigned long kaddr);
 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+extern void flush_bp_all(void);
 #endif
 
 /*

+ 1 - 1
arch/arm/include/uapi/asm/unistd.h

@@ -404,7 +404,7 @@
 #define __NR_setns			(__NR_SYSCALL_BASE+375)
 #define __NR_process_vm_readv		(__NR_SYSCALL_BASE+376)
 #define __NR_process_vm_writev		(__NR_SYSCALL_BASE+377)
-					/* 378 for kcmp */
+#define __NR_kcmp			(__NR_SYSCALL_BASE+378)
 #define __NR_finit_module		(__NR_SYSCALL_BASE+379)
 
 /*

+ 1 - 1
arch/arm/kernel/asm-offsets.c

@@ -110,7 +110,7 @@ int main(void)
   BLANK();
 #endif
 #ifdef CONFIG_CPU_HAS_ASID
-  DEFINE(MM_CONTEXT_ID,		offsetof(struct mm_struct, context.id));
+  DEFINE(MM_CONTEXT_ID,		offsetof(struct mm_struct, context.id.counter));
   BLANK();
 #endif
   DEFINE(VMA_VM_MM,		offsetof(struct vm_area_struct, vm_mm));

+ 1 - 1
arch/arm/kernel/calls.S

@@ -387,7 +387,7 @@
 /* 375 */	CALL(sys_setns)
 		CALL(sys_process_vm_readv)
 		CALL(sys_process_vm_writev)
-		CALL(sys_ni_syscall)	/* reserved for sys_kcmp */
+		CALL(sys_kcmp)
 		CALL(sys_finit_module)
 #ifndef syscalls_counted
 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls

+ 22 - 4
arch/arm/kernel/head.S

@@ -184,13 +184,22 @@ __create_page_tables:
 	orr	r3, r3, #3			@ PGD block type
 	mov	r6, #4				@ PTRS_PER_PGD
 	mov	r7, #1 << (55 - 32)		@ L_PGD_SWAPPER
-1:	str	r3, [r0], #4			@ set bottom PGD entry bits
+1:
+#ifdef CONFIG_CPU_ENDIAN_BE8
 	str	r7, [r0], #4			@ set top PGD entry bits
+	str	r3, [r0], #4			@ set bottom PGD entry bits
+#else
+	str	r3, [r0], #4			@ set bottom PGD entry bits
+	str	r7, [r0], #4			@ set top PGD entry bits
+#endif
 	add	r3, r3, #0x1000			@ next PMD table
 	subs	r6, r6, #1
 	bne	1b
 
 	add	r4, r4, #0x1000			@ point to the PMD tables
+#ifdef CONFIG_CPU_ENDIAN_BE8
+	add	r4, r4, #4			@ we only write the bottom word
+#endif
 #endif
 
 	ldr	r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags
@@ -258,6 +267,11 @@ __create_page_tables:
 	addne	r6, r6, #1 << SECTION_SHIFT
 	strne	r6, [r3]
 
+#if defined(CONFIG_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
+	sub	r4, r4, #4			@ Fixup page table pointer
+						@ for 64-bit descriptors
+#endif
+
 #ifdef CONFIG_DEBUG_LL
 #if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING)
 	/*
@@ -276,13 +290,17 @@ __create_page_tables:
 	orr	r3, r7, r3, lsl #SECTION_SHIFT
 #ifdef CONFIG_ARM_LPAE
 	mov	r7, #1 << (54 - 32)		@ XN
+#ifdef CONFIG_CPU_ENDIAN_BE8
+	str	r7, [r0], #4
+	str	r3, [r0], #4
 #else
-	orr	r3, r3, #PMD_SECT_XN
-#endif
 	str	r3, [r0], #4
-#ifdef CONFIG_ARM_LPAE
 	str	r7, [r0], #4
 #endif
+#else
+	orr	r3, r3, #PMD_SECT_XN
+	str	r3, [r0], #4
+#endif
 
 #else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */
 	/* we don't need any serial debugging mappings */

+ 1 - 1
arch/arm/kernel/hw_breakpoint.c

@@ -1023,7 +1023,7 @@ out_mdbgen:
 static int __cpuinit dbg_reset_notify(struct notifier_block *self,
 				      unsigned long action, void *cpu)
 {
-	if (action == CPU_ONLINE)
+	if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
 		smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1);
 
 	return NOTIFY_OK;

+ 2 - 2
arch/arm/kernel/perf_event.c

@@ -400,7 +400,7 @@ __hw_perf_event_init(struct perf_event *event)
 	}
 
 	if (event->group_leader != event) {
-		if (validate_group(event) != 0);
+		if (validate_group(event) != 0)
 			return -EINVAL;
 	}
 
@@ -484,7 +484,7 @@ const struct dev_pm_ops armpmu_dev_pm_ops = {
 	SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL)
 };
 
-static void __init armpmu_init(struct arm_pmu *armpmu)
+static void armpmu_init(struct arm_pmu *armpmu)
 {
 	atomic_set(&armpmu->active_events, 0);
 	mutex_init(&armpmu->reserve_mutex);

+ 1 - 1
arch/arm/kernel/perf_event_v7.c

@@ -774,7 +774,7 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 /*
  * PMXEVTYPER: Event selection reg
  */
-#define	ARMV7_EVTYPE_MASK	0xc00000ff	/* Mask for writable bits */
+#define	ARMV7_EVTYPE_MASK	0xc80000ff	/* Mask for writable bits */
 #define	ARMV7_EVTYPE_EVENT	0xff		/* Mask for EVENT bits */
 
 /*

+ 1 - 0
arch/arm/kernel/smp.c

@@ -285,6 +285,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
 	 * switch away from it before attempting any exclusive accesses.
 	 */
 	cpu_switch_mm(mm->pgd, mm);
+	local_flush_bp_all();
 	enter_lazy_tlb(mm, current);
 	local_flush_tlb_all();
 

+ 12 - 0
arch/arm/kernel/smp_tlb.c

@@ -64,6 +64,11 @@ static inline void ipi_flush_tlb_kernel_range(void *arg)
 	local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
 }
 
+static inline void ipi_flush_bp_all(void *ignored)
+{
+	local_flush_bp_all();
+}
+
 void flush_tlb_all(void)
 {
 	if (tlb_ops_need_broadcast())
@@ -127,3 +132,10 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 		local_flush_tlb_kernel_range(start, end);
 }
 
+void flush_bp_all(void)
+{
+	if (tlb_ops_need_broadcast())
+		on_each_cpu(ipi_flush_bp_all, NULL, 1);
+	else
+		local_flush_bp_all();
+}

+ 4 - 0
arch/arm/kernel/smp_twd.c

@@ -22,6 +22,7 @@
 #include <linux/of_irq.h>
 #include <linux/of_address.h>
 
+#include <asm/smp_plat.h>
 #include <asm/smp_twd.h>
 #include <asm/localtimer.h>
 
@@ -373,6 +374,9 @@ void __init twd_local_timer_of_register(void)
 	struct device_node *np;
 	int err;
 
+	if (!is_smp() || !setup_max_cpus)
+		return;
+
 	np = of_find_matching_node(NULL, twd_of_match);
 	if (!np)
 		return;

+ 1 - 0
arch/arm/kernel/suspend.c

@@ -68,6 +68,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
 	ret = __cpu_suspend(arg, fn);
 	if (ret == 0) {
 		cpu_switch_mm(mm->pgd, mm);
+		local_flush_bp_all();
 		local_flush_tlb_all();
 	}
 

+ 44 - 41
arch/arm/lib/memset.S

@@ -19,9 +19,9 @@
 1:	subs	r2, r2, #4		@ 1 do we have enough
 	blt	5f			@ 1 bytes to align with?
 	cmp	r3, #2			@ 1
-	strltb	r1, [r0], #1		@ 1
-	strleb	r1, [r0], #1		@ 1
-	strb	r1, [r0], #1		@ 1
+	strltb	r1, [ip], #1		@ 1
+	strleb	r1, [ip], #1		@ 1
+	strb	r1, [ip], #1		@ 1
 	add	r2, r2, r3		@ 1 (r2 = r2 - (4 - r3))
 /*
  * The pointer is now aligned and the length is adjusted.  Try doing the
@@ -29,10 +29,14 @@
  */
 
 ENTRY(memset)
-	ands	r3, r0, #3		@ 1 unaligned?
+/*
+ * Preserve the contents of r0 for the return value.
+ */
+	mov	ip, r0
+	ands	r3, ip, #3		@ 1 unaligned?
 	bne	1b			@ 1
 /*
- * we know that the pointer in r0 is aligned to a word boundary.
+ * we know that the pointer in ip is aligned to a word boundary.
  */
 	orr	r1, r1, r1, lsl #8
 	orr	r1, r1, r1, lsl #16
@@ -43,29 +47,28 @@ ENTRY(memset)
 #if ! CALGN(1)+0
 
 /*
- * We need an extra register for this loop - save the return address and
- * use the LR
+ * We need 2 extra registers for this loop - use r8 and the LR
  */
-	str	lr, [sp, #-4]!
-	mov	ip, r1
+	stmfd	sp!, {r8, lr}
+	mov	r8, r1
 	mov	lr, r1
 
 2:	subs	r2, r2, #64
-	stmgeia	r0!, {r1, r3, ip, lr}	@ 64 bytes at a time.
-	stmgeia	r0!, {r1, r3, ip, lr}
-	stmgeia	r0!, {r1, r3, ip, lr}
-	stmgeia	r0!, {r1, r3, ip, lr}
+	stmgeia	ip!, {r1, r3, r8, lr}	@ 64 bytes at a time.
+	stmgeia	ip!, {r1, r3, r8, lr}
+	stmgeia	ip!, {r1, r3, r8, lr}
+	stmgeia	ip!, {r1, r3, r8, lr}
 	bgt	2b
-	ldmeqfd	sp!, {pc}		@ Now <64 bytes to go.
+	ldmeqfd	sp!, {r8, pc}		@ Now <64 bytes to go.
 /*
  * No need to correct the count; we're only testing bits from now on
  */
 	tst	r2, #32
-	stmneia	r0!, {r1, r3, ip, lr}
-	stmneia	r0!, {r1, r3, ip, lr}
+	stmneia	ip!, {r1, r3, r8, lr}
+	stmneia	ip!, {r1, r3, r8, lr}
 	tst	r2, #16
-	stmneia	r0!, {r1, r3, ip, lr}
-	ldr	lr, [sp], #4
+	stmneia	ip!, {r1, r3, r8, lr}
+	ldmfd	sp!, {r8, lr}
 
 #else
 
@@ -74,54 +77,54 @@ ENTRY(memset)
  * whole cache lines at once.
  */
 
-	stmfd	sp!, {r4-r7, lr}
+	stmfd	sp!, {r4-r8, lr}
 	mov	r4, r1
 	mov	r5, r1
 	mov	r6, r1
 	mov	r7, r1
-	mov	ip, r1
+	mov	r8, r1
 	mov	lr, r1
 
 	cmp	r2, #96
-	tstgt	r0, #31
+	tstgt	ip, #31
 	ble	3f
 
-	and	ip, r0, #31
-	rsb	ip, ip, #32
-	sub	r2, r2, ip
-	movs	ip, ip, lsl #(32 - 4)
-	stmcsia	r0!, {r4, r5, r6, r7}
-	stmmiia	r0!, {r4, r5}
-	tst	ip, #(1 << 30)
-	mov	ip, r1
-	strne	r1, [r0], #4
+	and	r8, ip, #31
+	rsb	r8, r8, #32
+	sub	r2, r2, r8
+	movs	r8, r8, lsl #(32 - 4)
+	stmcsia	ip!, {r4, r5, r6, r7}
+	stmmiia	ip!, {r4, r5}
+	tst	r8, #(1 << 30)
+	mov	r8, r1
+	strne	r1, [ip], #4
 
 3:	subs	r2, r2, #64
-	stmgeia	r0!, {r1, r3-r7, ip, lr}
-	stmgeia	r0!, {r1, r3-r7, ip, lr}
+	stmgeia	ip!, {r1, r3-r8, lr}
+	stmgeia	ip!, {r1, r3-r8, lr}
 	bgt	3b
-	ldmeqfd	sp!, {r4-r7, pc}
+	ldmeqfd	sp!, {r4-r8, pc}
 
 	tst	r2, #32
-	stmneia	r0!, {r1, r3-r7, ip, lr}
+	stmneia	ip!, {r1, r3-r8, lr}
 	tst	r2, #16
-	stmneia	r0!, {r4-r7}
-	ldmfd	sp!, {r4-r7, lr}
+	stmneia	ip!, {r4-r7}
+	ldmfd	sp!, {r4-r8, lr}
 
 #endif
 
 4:	tst	r2, #8
-	stmneia	r0!, {r1, r3}
+	stmneia	ip!, {r1, r3}
 	tst	r2, #4
-	strne	r1, [r0], #4
+	strne	r1, [ip], #4
 /*
  * When we get here, we've got less than 4 bytes to zero.  We
  * may have an unaligned pointer as well.
  */
 5:	tst	r2, #2
-	strneb	r1, [r0], #1
-	strneb	r1, [r0], #1
+	strneb	r1, [ip], #1
+	strneb	r1, [ip], #1
 	tst	r2, #1
-	strneb	r1, [r0], #1
+	strneb	r1, [ip], #1
 	mov	pc, lr
 ENDPROC(memset)

+ 1 - 1
arch/arm/mach-netx/generic.c

@@ -168,7 +168,7 @@ void __init netx_init_irq(void)
 {
 	int irq;
 
-	vic_init(io_p2v(NETX_PA_VIC), 0, ~0, 0);
+	vic_init(io_p2v(NETX_PA_VIC), NETX_IRQ_VIC_START, ~0, 0);
 
 	for (irq = NETX_IRQ_HIF_CHAINED(0); irq <= NETX_IRQ_HIF_LAST; irq++) {
 		irq_set_chip_and_handler(irq, &netx_hif_chip,

+ 32 - 32
arch/arm/mach-netx/include/mach/irqs.h

@@ -17,42 +17,42 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
-#define NETX_IRQ_VIC_START   0
-#define NETX_IRQ_SOFTINT     0
-#define NETX_IRQ_TIMER0      1
-#define NETX_IRQ_TIMER1      2
-#define NETX_IRQ_TIMER2      3
-#define NETX_IRQ_SYSTIME_NS  4
-#define NETX_IRQ_SYSTIME_S   5
-#define NETX_IRQ_GPIO_15     6
-#define NETX_IRQ_WATCHDOG    7
-#define NETX_IRQ_UART0       8
-#define NETX_IRQ_UART1       9
-#define NETX_IRQ_UART2      10
-#define NETX_IRQ_USB        11
-#define NETX_IRQ_SPI        12
-#define NETX_IRQ_I2C        13
-#define NETX_IRQ_LCD        14
-#define NETX_IRQ_HIF        15
-#define NETX_IRQ_GPIO_0_14  16
-#define NETX_IRQ_XPEC0      17
-#define NETX_IRQ_XPEC1      18
-#define NETX_IRQ_XPEC2      19
-#define NETX_IRQ_XPEC3      20
-#define NETX_IRQ_XPEC(no)   (17 + (no))
-#define NETX_IRQ_MSYNC0     21
-#define NETX_IRQ_MSYNC1     22
-#define NETX_IRQ_MSYNC2     23
-#define NETX_IRQ_MSYNC3     24
-#define NETX_IRQ_IRQ_PHY    25
-#define NETX_IRQ_ISO_AREA   26
+#define NETX_IRQ_VIC_START	64
+#define NETX_IRQ_SOFTINT	(NETX_IRQ_VIC_START + 0)
+#define NETX_IRQ_TIMER0		(NETX_IRQ_VIC_START + 1)
+#define NETX_IRQ_TIMER1		(NETX_IRQ_VIC_START + 2)
+#define NETX_IRQ_TIMER2		(NETX_IRQ_VIC_START + 3)
+#define NETX_IRQ_SYSTIME_NS	(NETX_IRQ_VIC_START + 4)
+#define NETX_IRQ_SYSTIME_S	(NETX_IRQ_VIC_START + 5)
+#define NETX_IRQ_GPIO_15	(NETX_IRQ_VIC_START + 6)
+#define NETX_IRQ_WATCHDOG	(NETX_IRQ_VIC_START + 7)
+#define NETX_IRQ_UART0		(NETX_IRQ_VIC_START + 8)
+#define NETX_IRQ_UART1		(NETX_IRQ_VIC_START + 9)
+#define NETX_IRQ_UART2		(NETX_IRQ_VIC_START + 10)
+#define NETX_IRQ_USB		(NETX_IRQ_VIC_START + 11)
+#define NETX_IRQ_SPI		(NETX_IRQ_VIC_START + 12)
+#define NETX_IRQ_I2C		(NETX_IRQ_VIC_START + 13)
+#define NETX_IRQ_LCD		(NETX_IRQ_VIC_START + 14)
+#define NETX_IRQ_HIF		(NETX_IRQ_VIC_START + 15)
+#define NETX_IRQ_GPIO_0_14	(NETX_IRQ_VIC_START + 16)
+#define NETX_IRQ_XPEC0		(NETX_IRQ_VIC_START + 17)
+#define NETX_IRQ_XPEC1		(NETX_IRQ_VIC_START + 18)
+#define NETX_IRQ_XPEC2		(NETX_IRQ_VIC_START + 19)
+#define NETX_IRQ_XPEC3		(NETX_IRQ_VIC_START + 20)
+#define NETX_IRQ_XPEC(no)	(NETX_IRQ_VIC_START + 17 + (no))
+#define NETX_IRQ_MSYNC0		(NETX_IRQ_VIC_START + 21)
+#define NETX_IRQ_MSYNC1		(NETX_IRQ_VIC_START + 22)
+#define NETX_IRQ_MSYNC2		(NETX_IRQ_VIC_START + 23)
+#define NETX_IRQ_MSYNC3		(NETX_IRQ_VIC_START + 24)
+#define NETX_IRQ_IRQ_PHY	(NETX_IRQ_VIC_START + 25)
+#define NETX_IRQ_ISO_AREA	(NETX_IRQ_VIC_START + 26)
 /* int 27 is reserved */
 /* int 28 is reserved */
-#define NETX_IRQ_TIMER3     29
-#define NETX_IRQ_TIMER4     30
+#define NETX_IRQ_TIMER3		(NETX_IRQ_VIC_START + 29)
+#define NETX_IRQ_TIMER4		(NETX_IRQ_VIC_START + 30)
 /* int 31 is reserved */
 
-#define NETX_IRQS 32
+#define NETX_IRQS 		(NETX_IRQ_VIC_START + 32)
 
 /* for multiplexed irqs on gpio 0..14 */
 #define NETX_IRQ_GPIO(x) (NETX_IRQS + (x))

+ 18 - 11
arch/arm/mm/context.c

@@ -152,9 +152,9 @@ static int is_reserved_asid(u64 asid)
 	return 0;
 }
 
-static void new_context(struct mm_struct *mm, unsigned int cpu)
+static u64 new_context(struct mm_struct *mm, unsigned int cpu)
 {
-	u64 asid = mm->context.id;
+	u64 asid = atomic64_read(&mm->context.id);
 	u64 generation = atomic64_read(&asid_generation);
 
 	if (asid != 0 && is_reserved_asid(asid)) {
@@ -181,13 +181,14 @@ static void new_context(struct mm_struct *mm, unsigned int cpu)
 		cpumask_clear(mm_cpumask(mm));
 	}
 
-	mm->context.id = asid;
+	return asid;
 }
 
 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
 {
 	unsigned long flags;
 	unsigned int cpu = smp_processor_id();
+	u64 asid;
 
 	if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
 		__check_vmalloc_seq(mm);
@@ -198,20 +199,26 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
 	 */
 	cpu_set_reserved_ttbr0();
 
-	if (!((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS)
-	    && atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id))
+	asid = atomic64_read(&mm->context.id);
+	if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
+	    && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
 		goto switch_mm_fastpath;
 
 	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
 	/* Check that our ASID belongs to the current generation. */
-	if ((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS)
-		new_context(mm, cpu);
-
-	atomic64_set(&per_cpu(active_asids, cpu), mm->context.id);
-	cpumask_set_cpu(cpu, mm_cpumask(mm));
+	asid = atomic64_read(&mm->context.id);
+	if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
+		asid = new_context(mm, cpu);
+		atomic64_set(&mm->context.id, asid);
+	}
 
-	if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
+	if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
+		local_flush_bp_all();
 		local_flush_tlb_all();
+	}
+
+	atomic64_set(&per_cpu(active_asids, cpu), asid);
+	cpumask_set_cpu(cpu, mm_cpumask(mm));
 	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
 
 switch_mm_fastpath:

+ 1 - 0
arch/arm/mm/idmap.c

@@ -141,6 +141,7 @@ void setup_mm_for_reboot(void)
 {
 	/* Switch to the identity mapping. */
 	cpu_switch_mm(idmap_pgd, &init_mm);
+	local_flush_bp_all();
 
 #ifdef CONFIG_CPU_HAS_ASID
 	/*

+ 1 - 1
arch/arm/mm/proc-v7-3level.S

@@ -48,7 +48,7 @@
 ENTRY(cpu_v7_switch_mm)
 #ifdef CONFIG_MMU
 	mmid	r1, r1				@ get mm->context.id
-	and	r3, r1, #0xff
+	asid	r3, r1
 	mov	r3, r3, lsl #(48 - 32)		@ ASID
 	mcrr	p15, 0, r0, r3, c2		@ set TTB 0
 	isb

+ 1 - 0
arch/ia64/kernel/perfmon.c

@@ -619,6 +619,7 @@ static struct file_system_type pfm_fs_type = {
 	.mount    = pfmfs_mount,
 	.kill_sb  = kill_anon_super,
 };
+MODULE_ALIAS_FS("pfmfs");
 
 DEFINE_PER_CPU(unsigned long, pfm_syst_info);
 DEFINE_PER_CPU(struct task_struct *, pmu_owner);

+ 0 - 3
arch/metag/include/asm/elf.h

@@ -100,9 +100,6 @@ typedef unsigned long elf_fpregset_t;
 
 #define ELF_PLATFORM  (NULL)
 
-#define SET_PERSONALITY(ex) \
-	set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
-
 #define STACK_RND_MASK (0)
 
 #ifdef CONFIG_METAG_USER_TCM

+ 1 - 0
arch/metag/mm/Kconfig

@@ -40,6 +40,7 @@ endchoice
 
 config NUMA
 	bool "Non Uniform Memory Access (NUMA) Support"
+	select ARCH_WANT_NUMA_VARIABLE_LOCALITY
 	help
 	  Some Meta systems have MMU-mappable on-chip memories with
 	  lower latencies than main memory. This enables support for

+ 1 - 0
arch/powerpc/platforms/cell/spufs/inode.c

@@ -749,6 +749,7 @@ static struct file_system_type spufs_type = {
 	.mount = spufs_mount,
 	.kill_sb = kill_litter_super,
 };
+MODULE_ALIAS_FS("spufs");
 
 static int __init spufs_init(void)
 {

+ 1 - 0
arch/s390/hypfs/inode.c

@@ -456,6 +456,7 @@ static struct file_system_type hypfs_type = {
 	.mount		= hypfs_mount,
 	.kill_sb	= hypfs_kill_super
 };
+MODULE_ALIAS_FS("s390_hypfs");
 
 static const struct super_operations hypfs_s_ops = {
 	.statfs		= simple_statfs,

+ 1 - 0
arch/s390/include/asm/cpu_mf.h

@@ -12,6 +12,7 @@
 #ifndef _ASM_S390_CPU_MF_H
 #define _ASM_S390_CPU_MF_H
 
+#include <linux/errno.h>
 #include <asm/facility.h>
 
 #define CPU_MF_INT_SF_IAE	(1 << 31)	/* invalid entry address */

+ 3 - 0
arch/tile/include/asm/compat.h

@@ -288,6 +288,9 @@ long compat_sys_sync_file_range2(int fd, unsigned int flags,
 long compat_sys_fallocate(int fd, int mode,
 			  u32 offset_lo, u32 offset_hi,
 			  u32 len_lo, u32 len_hi);
+long compat_sys_llseek(unsigned int fd, unsigned int offset_high,
+		       unsigned int offset_low, loff_t __user * result,
+		       unsigned int origin);
 
 /* Assembly trampoline to avoid clobbering r0. */
 long _compat_sys_rt_sigreturn(void);

+ 29 - 13
arch/tile/kernel/compat.c

@@ -32,50 +32,65 @@
  * adapt the usual convention.
  */
 
-long compat_sys_truncate64(char __user *filename, u32 dummy, u32 low, u32 high)
+COMPAT_SYSCALL_DEFINE4(truncate64, char __user *, filename, u32, dummy,
+                       u32, low, u32, high)
 {
 	return sys_truncate(filename, ((loff_t)high << 32) | low);
 }
 
-long compat_sys_ftruncate64(unsigned int fd, u32 dummy, u32 low, u32 high)
+COMPAT_SYSCALL_DEFINE4(ftruncate64, unsigned int, fd, u32, dummy,
+                       u32, low, u32, high)
 {
 	return sys_ftruncate(fd, ((loff_t)high << 32) | low);
 }
 
-long compat_sys_pread64(unsigned int fd, char __user *ubuf, size_t count,
-			u32 dummy, u32 low, u32 high)
+COMPAT_SYSCALL_DEFINE6(pread64, unsigned int, fd, char __user *, ubuf,
+                       size_t, count, u32, dummy, u32, low, u32, high)
 {
 	return sys_pread64(fd, ubuf, count, ((loff_t)high << 32) | low);
 }
 
-long compat_sys_pwrite64(unsigned int fd, char __user *ubuf, size_t count,
-			 u32 dummy, u32 low, u32 high)
+COMPAT_SYSCALL_DEFINE6(pwrite64, unsigned int, fd, char __user *, ubuf,
+                       size_t, count, u32, dummy, u32, low, u32, high)
 {
 	return sys_pwrite64(fd, ubuf, count, ((loff_t)high << 32) | low);
 }
 
-long compat_sys_lookup_dcookie(u32 low, u32 high, char __user *buf, size_t len)
+COMPAT_SYSCALL_DEFINE4(lookup_dcookie, u32, low, u32, high,
+                       char __user *, buf, size_t, len)
 {
 	return sys_lookup_dcookie(((loff_t)high << 32) | low, buf, len);
 }
 
-long compat_sys_sync_file_range2(int fd, unsigned int flags,
-				 u32 offset_lo, u32 offset_hi,
-				 u32 nbytes_lo, u32 nbytes_hi)
+COMPAT_SYSCALL_DEFINE6(sync_file_range2, int, fd, unsigned int, flags,
+                       u32, offset_lo, u32, offset_hi,
+                       u32, nbytes_lo, u32, nbytes_hi)
 {
 	return sys_sync_file_range(fd, ((loff_t)offset_hi << 32) | offset_lo,
 				   ((loff_t)nbytes_hi << 32) | nbytes_lo,
 				   flags);
 }
 
-long compat_sys_fallocate(int fd, int mode,
-			  u32 offset_lo, u32 offset_hi,
-			  u32 len_lo, u32 len_hi)
+COMPAT_SYSCALL_DEFINE6(fallocate, int, fd, int, mode,
+                       u32, offset_lo, u32, offset_hi,
+                       u32, len_lo, u32, len_hi)
 {
 	return sys_fallocate(fd, mode, ((loff_t)offset_hi << 32) | offset_lo,
 			     ((loff_t)len_hi << 32) | len_lo);
 }
 
+/*
+ * Avoid bug in generic sys_llseek() that specifies offset_high and
+ * offset_low as "unsigned long", thus making it possible to pass
+ * a sign-extended high 32 bits in offset_low.
+ */
+COMPAT_SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned int, offset_high,
+		       unsigned int, offset_low, loff_t __user *, result,
+		       unsigned int, origin)
+{
+	return sys_llseek(fd, offset_high, offset_low, result, origin);
+}
+ 
 /* Provide the compat syscall number to call mapping. */
 #undef __SYSCALL
 #define __SYSCALL(nr, call) [nr] = (call),
@@ -83,6 +98,7 @@ long compat_sys_fallocate(int fd, int mode,
 /* See comments in sys.c */
 #define compat_sys_fadvise64_64 sys32_fadvise64_64
 #define compat_sys_readahead sys32_readahead
+#define sys_llseek compat_sys_llseek
 
 /* Call the assembly trampolines where necessary. */
 #define compat_sys_rt_sigreturn _compat_sys_rt_sigreturn

+ 1 - 1
arch/um/drivers/chan.h

@@ -37,7 +37,7 @@ extern int console_write_chan(struct chan *chan, const char *buf,
 extern int console_open_chan(struct line *line, struct console *co);
 extern void deactivate_chan(struct chan *chan, int irq);
 extern void reactivate_chan(struct chan *chan, int irq);
-extern void chan_enable_winch(struct chan *chan, struct tty_struct *tty);
+extern void chan_enable_winch(struct chan *chan, struct tty_port *port);
 extern int enable_chan(struct line *line);
 extern void close_chan(struct line *line);
 extern int chan_window_size(struct line *line, 

+ 2 - 2
arch/um/drivers/chan_kern.c

@@ -122,10 +122,10 @@ static int open_chan(struct list_head *chans)
 	return err;
 }
 
-void chan_enable_winch(struct chan *chan, struct tty_struct *tty)
+void chan_enable_winch(struct chan *chan, struct tty_port *port)
 {
 	if (chan && chan->primary && chan->ops->winch)
-		register_winch(chan->fd, tty);
+		register_winch(chan->fd, port);
 }
 
 static void line_timer_cb(struct work_struct *work)

+ 6 - 6
arch/um/drivers/chan_user.c

@@ -216,7 +216,7 @@ static int winch_thread(void *arg)
 	}
 }
 
-static int winch_tramp(int fd, struct tty_struct *tty, int *fd_out,
+static int winch_tramp(int fd, struct tty_port *port, int *fd_out,
 		       unsigned long *stack_out)
 {
 	struct winch_data data;
@@ -271,7 +271,7 @@ static int winch_tramp(int fd, struct tty_struct *tty, int *fd_out,
 	return err;
 }
 
-void register_winch(int fd, struct tty_struct *tty)
+void register_winch(int fd, struct tty_port *port)
 {
 	unsigned long stack;
 	int pid, thread, count, thread_fd = -1;
@@ -281,17 +281,17 @@ void register_winch(int fd, struct tty_struct *tty)
 		return;
 
 	pid = tcgetpgrp(fd);
-	if (is_skas_winch(pid, fd, tty)) {
-		register_winch_irq(-1, fd, -1, tty, 0);
+	if (is_skas_winch(pid, fd, port)) {
+		register_winch_irq(-1, fd, -1, port, 0);
 		return;
 	}
 
 	if (pid == -1) {
-		thread = winch_tramp(fd, tty, &thread_fd, &stack);
+		thread = winch_tramp(fd, port, &thread_fd, &stack);
 		if (thread < 0)
 			return;
 
-		register_winch_irq(thread_fd, fd, thread, tty, stack);
+		register_winch_irq(thread_fd, fd, thread, port, stack);
 
 		count = write(thread_fd, &c, sizeof(c));
 		if (count != sizeof(c))

+ 3 - 3
arch/um/drivers/chan_user.h

@@ -38,10 +38,10 @@ extern int generic_window_size(int fd, void *unused, unsigned short *rows_out,
 			       unsigned short *cols_out);
 extern void generic_free(void *data);
 
-struct tty_struct;
-extern void register_winch(int fd,  struct tty_struct *tty);
+struct tty_port;
+extern void register_winch(int fd,  struct tty_port *port);
 extern void register_winch_irq(int fd, int tty_fd, int pid,
-			       struct tty_struct *tty, unsigned long stack);
+			       struct tty_port *port, unsigned long stack);
 
 #define __channel_help(fn, prefix) \
 __uml_help(fn, prefix "[0-9]*=<channel description>\n" \

+ 24 - 18
arch/um/drivers/line.c

@@ -305,7 +305,7 @@ static int line_activate(struct tty_port *port, struct tty_struct *tty)
 		return ret;
 
 	if (!line->sigio) {
-		chan_enable_winch(line->chan_out, tty);
+		chan_enable_winch(line->chan_out, port);
 		line->sigio = 1;
 	}
 
@@ -315,8 +315,22 @@ static int line_activate(struct tty_port *port, struct tty_struct *tty)
 	return 0;
 }
 
+static void unregister_winch(struct tty_struct *tty);
+
+static void line_destruct(struct tty_port *port)
+{
+	struct tty_struct *tty = tty_port_tty_get(port);
+	struct line *line = tty->driver_data;
+
+	if (line->sigio) {
+		unregister_winch(tty);
+		line->sigio = 0;
+	}
+}
+
 static const struct tty_port_operations line_port_ops = {
 	.activate = line_activate,
+	.destruct = line_destruct,
 };
 
 int line_open(struct tty_struct *tty, struct file *filp)
@@ -340,18 +354,6 @@ int line_install(struct tty_driver *driver, struct tty_struct *tty,
 	return 0;
 }
 
-static void unregister_winch(struct tty_struct *tty);
-
-void line_cleanup(struct tty_struct *tty)
-{
-	struct line *line = tty->driver_data;
-
-	if (line->sigio) {
-		unregister_winch(tty);
-		line->sigio = 0;
-	}
-}
-
 void line_close(struct tty_struct *tty, struct file * filp)
 {
 	struct line *line = tty->driver_data;
@@ -601,7 +603,7 @@ struct winch {
 	int fd;
 	int tty_fd;
 	int pid;
-	struct tty_struct *tty;
+	struct tty_port *port;
 	unsigned long stack;
 	struct work_struct work;
 };
@@ -655,7 +657,7 @@ static irqreturn_t winch_interrupt(int irq, void *data)
 			goto out;
 		}
 	}
-	tty = winch->tty;
+	tty = tty_port_tty_get(winch->port);
 	if (tty != NULL) {
 		line = tty->driver_data;
 		if (line != NULL) {
@@ -663,6 +665,7 @@ static irqreturn_t winch_interrupt(int irq, void *data)
 					 &tty->winsize.ws_col);
 			kill_pgrp(tty->pgrp, SIGWINCH, 1);
 		}
+		tty_kref_put(tty);
 	}
  out:
 	if (winch->fd != -1)
@@ -670,7 +673,7 @@ static irqreturn_t winch_interrupt(int irq, void *data)
 	return IRQ_HANDLED;
 }
 
-void register_winch_irq(int fd, int tty_fd, int pid, struct tty_struct *tty,
+void register_winch_irq(int fd, int tty_fd, int pid, struct tty_port *port,
 			unsigned long stack)
 {
 	struct winch *winch;
@@ -685,7 +688,7 @@ void register_winch_irq(int fd, int tty_fd, int pid, struct tty_struct *tty,
 				   .fd  	= fd,
 				   .tty_fd 	= tty_fd,
 				   .pid  	= pid,
-				   .tty 	= tty,
+				   .port 	= port,
 				   .stack	= stack });
 
 	if (um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt,
@@ -714,15 +717,18 @@ static void unregister_winch(struct tty_struct *tty)
 {
 	struct list_head *ele, *next;
 	struct winch *winch;
+	struct tty_struct *wtty;
 
 	spin_lock(&winch_handler_lock);
 
 	list_for_each_safe(ele, next, &winch_handlers) {
 		winch = list_entry(ele, struct winch, list);
-		if (winch->tty == tty) {
+		wtty = tty_port_tty_get(winch->port);
+		if (wtty == tty) {
 			free_winch(winch);
 			break;
 		}
+		tty_kref_put(wtty);
 	}
 	spin_unlock(&winch_handler_lock);
 }

+ 2 - 0
arch/um/drivers/net_kern.c

@@ -218,6 +218,7 @@ static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	spin_lock_irqsave(&lp->lock, flags);
 
 	len = (*lp->write)(lp->fd, skb, lp);
+	skb_tx_timestamp(skb);
 
 	if (len == skb->len) {
 		dev->stats.tx_packets++;
@@ -281,6 +282,7 @@ static void uml_net_get_drvinfo(struct net_device *dev,
 static const struct ethtool_ops uml_net_ethtool_ops = {
 	.get_drvinfo	= uml_net_get_drvinfo,
 	.get_link	= ethtool_op_get_link,
+	.get_ts_info	= ethtool_op_get_ts_info,
 };
 
 static void uml_net_user_timer_expire(unsigned long _conn)

+ 0 - 1
arch/um/drivers/ssl.c

@@ -105,7 +105,6 @@ static const struct tty_operations ssl_ops = {
 	.throttle 		= line_throttle,
 	.unthrottle 		= line_unthrottle,
 	.install		= ssl_install,
-	.cleanup		= line_cleanup,
 	.hangup			= line_hangup,
 };
 

+ 0 - 1
arch/um/drivers/stdio_console.c

@@ -110,7 +110,6 @@ static const struct tty_operations console_ops = {
 	.set_termios 		= line_set_termios,
 	.throttle 		= line_throttle,
 	.unthrottle 		= line_unthrottle,
-	.cleanup		= line_cleanup,
 	.hangup			= line_hangup,
 };
 

+ 1 - 1
arch/um/os-Linux/signal.c

@@ -15,7 +15,7 @@
 #include <sysdep/mcontext.h>
 #include "internal.h"
 
-void (*sig_info[NSIG])(int, siginfo_t *, struct uml_pt_regs *) = {
+void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = {
 	[SIGTRAP]	= relay_signal,
 	[SIGFPE]	= relay_signal,
 	[SIGILL]	= relay_signal,

+ 2 - 0
arch/um/os-Linux/start_up.c

@@ -15,6 +15,8 @@
 #include <sys/mman.h>
 #include <sys/stat.h>
 #include <sys/wait.h>
+#include <sys/time.h>
+#include <sys/resource.h>
 #include <asm/unistd.h>
 #include <init.h>
 #include <os.h>

+ 18 - 2
arch/x86/include/asm/bootparam_utils.h

@@ -14,13 +14,29 @@
  * analysis of kexec-tools; if other broken bootloaders initialize a
  * different set of fields we will need to figure out how to disambiguate.
  *
+ * Note: efi_info is commonly left uninitialized, but that field has a
+ * private magic, so it is better to leave it unchanged.
  */
 static void sanitize_boot_params(struct boot_params *boot_params)
 {
+	/* 
+	 * IMPORTANT NOTE TO BOOTLOADER AUTHORS: do not simply clear
+	 * this field.  The purpose of this field is to guarantee
+	 * compliance with the x86 boot spec located in
+	 * Documentation/x86/boot.txt .  That spec says that the
+	 * *whole* structure should be cleared, after which only the
+	 * portion defined by struct setup_header (boot_params->hdr)
+	 * should be copied in.
+	 *
+	 * If you're having an issue because the sentinel is set, you
+	 * need to change the whole structure to be cleared, not this
+	 * (or any other) individual field, or you will soon have
+	 * problems again.
+	 */
 	if (boot_params->sentinel) {
-		/*fields in boot_params are not valid, clear them */
+		/* fields in boot_params are left uninitialized, clear them */
 		memset(&boot_params->olpc_ofw_header, 0,
-		       (char *)&boot_params->alt_mem_k -
+		       (char *)&boot_params->efi_info -
 			(char *)&boot_params->olpc_ofw_header);
 		memset(&boot_params->kbd_status, 0,
 		       (char *)&boot_params->hdr -

+ 8 - 2
arch/x86/kernel/setup.c

@@ -171,9 +171,15 @@ static struct resource bss_resource = {
 
 #ifdef CONFIG_X86_32
 /* cpu data as detected by the assembly code in head.S */
-struct cpuinfo_x86 new_cpu_data __cpuinitdata = {0, 0, 0, 0, -1, 1, 0, 0, -1};
+struct cpuinfo_x86 new_cpu_data __cpuinitdata = {
+	.wp_works_ok = -1,
+	.fdiv_bug = -1,
+};
 /* common cpu data for all cpus */
-struct cpuinfo_x86 boot_cpu_data __read_mostly = {0, 0, 0, 0, -1, 1, 0, 0, -1};
+struct cpuinfo_x86 boot_cpu_data __read_mostly = {
+	.wp_works_ok = -1,
+	.fdiv_bug = -1,
+};
 EXPORT_SYMBOL(boot_cpu_data);
 
 unsigned int def_to_bigsmp;

+ 1 - 2
arch/x86/kernel/smpboot.c

@@ -1365,9 +1365,8 @@ static inline void mwait_play_dead(void)
 	unsigned int eax, ebx, ecx, edx;
 	unsigned int highest_cstate = 0;
 	unsigned int highest_subcstate = 0;
-	int i;
 	void *mwait_ptr;
-	struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info);
+	int i;
 
 	if (!this_cpu_has(X86_FEATURE_MWAIT))
 		return;

+ 2 - 3
arch/x86/mm/init.c

@@ -410,9 +410,8 @@ void __init init_mem_mapping(void)
 	/* the ISA range is always mapped regardless of memory holes */
 	init_memory_mapping(0, ISA_END_ADDRESS);
 
-	/* xen has big range in reserved near end of ram, skip it at first */
-	addr = memblock_find_in_range(ISA_END_ADDRESS, end, PMD_SIZE,
-			 PAGE_SIZE);
+	/* xen has big range in reserved near end of ram, skip it at first.*/
+	addr = memblock_find_in_range(ISA_END_ADDRESS, end, PMD_SIZE, PMD_SIZE);
 	real_end = addr + PMD_SIZE;
 
 	/* step_size need to be small so pgt_buf from BRK could cover it */

+ 7 - 0
arch/x86/mm/pat.c

@@ -563,6 +563,13 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
 	if (base > __pa(high_memory-1))
 		return 0;
 
+	/*
+	 * some areas in the middle of the kernel identity range
+	 * are not mapped, like the PCI space.
+	 */
+	if (!page_is_ram(base >> PAGE_SHIFT))
+		return 0;
+
 	id_sz = (__pa(high_memory-1) <= base + size) ?
 				__pa(high_memory) - base :
 				size;

+ 9 - 46
drivers/acpi/glue.c

@@ -36,12 +36,11 @@ int register_acpi_bus_type(struct acpi_bus_type *type)
 {
 	if (acpi_disabled)
 		return -ENODEV;
-	if (type && type->bus && type->find_device) {
+	if (type && type->match && type->find_device) {
 		down_write(&bus_type_sem);
 		list_add_tail(&type->list, &bus_type_list);
 		up_write(&bus_type_sem);
-		printk(KERN_INFO PREFIX "bus type %s registered\n",
-		       type->bus->name);
+		printk(KERN_INFO PREFIX "bus type %s registered\n", type->name);
 		return 0;
 	}
 	return -ENODEV;
@@ -56,24 +55,21 @@ int unregister_acpi_bus_type(struct acpi_bus_type *type)
 		down_write(&bus_type_sem);
 		list_del_init(&type->list);
 		up_write(&bus_type_sem);
-		printk(KERN_INFO PREFIX "ACPI bus type %s unregistered\n",
-		       type->bus->name);
+		printk(KERN_INFO PREFIX "bus type %s unregistered\n",
+		       type->name);
 		return 0;
 	}
 	return -ENODEV;
 }
 EXPORT_SYMBOL_GPL(unregister_acpi_bus_type);
 
-static struct acpi_bus_type *acpi_get_bus_type(struct bus_type *type)
+static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
 {
 	struct acpi_bus_type *tmp, *ret = NULL;
 
-	if (!type)
-		return NULL;
-
 	down_read(&bus_type_sem);
 	list_for_each_entry(tmp, &bus_type_list, list) {
-		if (tmp->bus == type) {
+		if (tmp->match(dev)) {
 			ret = tmp;
 			break;
 		}
@@ -82,22 +78,6 @@ static struct acpi_bus_type *acpi_get_bus_type(struct bus_type *type)
 	return ret;
 }
 
-static int acpi_find_bridge_device(struct device *dev, acpi_handle * handle)
-{
-	struct acpi_bus_type *tmp;
-	int ret = -ENODEV;
-
-	down_read(&bus_type_sem);
-	list_for_each_entry(tmp, &bus_type_list, list) {
-		if (tmp->find_bridge && !tmp->find_bridge(dev, handle)) {
-			ret = 0;
-			break;
-		}
-	}
-	up_read(&bus_type_sem);
-	return ret;
-}
-
 static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used,
 				      void *addr_p, void **ret_p)
 {
@@ -261,29 +241,12 @@ err:
 
 static int acpi_platform_notify(struct device *dev)
 {
-	struct acpi_bus_type *type;
+	struct acpi_bus_type *type = acpi_get_bus_type(dev);
 	acpi_handle handle;
 	int ret;
 
 	ret = acpi_bind_one(dev, NULL);
-	if (ret && (!dev->bus || !dev->parent)) {
-		/* bridge devices genernally haven't bus or parent */
-		ret = acpi_find_bridge_device(dev, &handle);
-		if (!ret) {
-			ret = acpi_bind_one(dev, handle);
-			if (ret)
-				goto out;
-		}
-	}
-
-	type = acpi_get_bus_type(dev->bus);
-	if (ret) {
-		if (!type || !type->find_device) {
-			DBG("No ACPI bus support for %s\n", dev_name(dev));
-			ret = -EINVAL;
-			goto out;
-		}
-
+	if (ret && type) {
 		ret = type->find_device(dev, &handle);
 		if (ret) {
 			DBG("Unable to get handle for %s\n", dev_name(dev));
@@ -316,7 +279,7 @@ static int acpi_platform_notify_remove(struct device *dev)
 {
 	struct acpi_bus_type *type;
 
-	type = acpi_get_bus_type(dev->bus);
+	type = acpi_get_bus_type(dev);
 	if (type && type->cleanup)
 		type->cleanup(dev);
 

+ 1 - 2
drivers/acpi/processor_core.c

@@ -158,8 +158,7 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
 	}
 
 exit:
-	if (buffer.pointer)
-		kfree(buffer.pointer);
+	kfree(buffer.pointer);
 	return apic_id;
 }
 

+ 1 - 1
drivers/acpi/processor_driver.c

@@ -559,7 +559,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
 		return 0;
 #endif
 
-	BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
+	BUG_ON(pr->id >= nr_cpu_ids);
 
 	/*
 	 * Buggy BIOS check

+ 11 - 5
drivers/acpi/sleep.c

@@ -599,7 +599,6 @@ static void acpi_sleep_suspend_setup(void)
 		status = acpi_get_sleep_type_data(i, &type_a, &type_b);
 		if (ACPI_SUCCESS(status)) {
 			sleep_states[i] = 1;
-			pr_cont(" S%d", i);
 		}
 	}
 
@@ -742,7 +741,6 @@ static void acpi_sleep_hibernate_setup(void)
 	hibernation_set_ops(old_suspend_ordering ?
 			&acpi_hibernation_ops_old : &acpi_hibernation_ops);
 	sleep_states[ACPI_STATE_S4] = 1;
-	pr_cont(KERN_CONT " S4");
 	if (nosigcheck)
 		return;
 
@@ -788,6 +786,9 @@ int __init acpi_sleep_init(void)
 {
 	acpi_status status;
 	u8 type_a, type_b;
+	char supported[ACPI_S_STATE_COUNT * 3 + 1];
+	char *pos = supported;
+	int i;
 
 	if (acpi_disabled)
 		return 0;
@@ -795,7 +796,6 @@ int __init acpi_sleep_init(void)
 	acpi_sleep_dmi_check();
 
 	sleep_states[ACPI_STATE_S0] = 1;
-	pr_info(PREFIX "(supports S0");
 
 	acpi_sleep_suspend_setup();
 	acpi_sleep_hibernate_setup();
@@ -803,11 +803,17 @@ int __init acpi_sleep_init(void)
 	status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
 	if (ACPI_SUCCESS(status)) {
 		sleep_states[ACPI_STATE_S5] = 1;
-		pr_cont(" S5");
 		pm_power_off_prepare = acpi_power_off_prepare;
 		pm_power_off = acpi_power_off;
 	}
-	pr_cont(")\n");
+
+	supported[0] = 0;
+	for (i = 0; i < ACPI_S_STATE_COUNT; i++) {
+		if (sleep_states[i])
+			pos += sprintf(pos, " S%d", i);
+	}
+	pr_info(PREFIX "(supports%s)\n", supported);
+
 	/*
 	 * Register the tts_notifier to reboot notifier list so that the _TTS
 	 * object can also be evaluated when the system enters S5.

+ 1 - 6
drivers/ata/libata-acpi.c

@@ -1144,13 +1144,8 @@ static int ata_acpi_find_device(struct device *dev, acpi_handle *handle)
 		return -ENODEV;
 }
 
-static int ata_acpi_find_dummy(struct device *dev, acpi_handle *handle)
-{
-	return -ENODEV;
-}
-
 static struct acpi_bus_type ata_acpi_bus = {
-	.find_bridge = ata_acpi_find_dummy,
+	.name = "ATA",
 	.find_device = ata_acpi_find_device,
 };
 

+ 0 - 2
drivers/base/power/main.c

@@ -99,7 +99,6 @@ void device_pm_add(struct device *dev)
 		dev_warn(dev, "parent %s should not be sleeping\n",
 			dev_name(dev->parent));
 	list_add_tail(&dev->power.entry, &dpm_list);
-	dev_pm_qos_constraints_init(dev);
 	mutex_unlock(&dpm_list_mtx);
 }
 
@@ -113,7 +112,6 @@ void device_pm_remove(struct device *dev)
 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 	complete_all(&dev->power.completion);
 	mutex_lock(&dpm_list_mtx);
-	dev_pm_qos_constraints_destroy(dev);
 	list_del_init(&dev->power.entry);
 	mutex_unlock(&dpm_list_mtx);
 	device_wakeup_disable(dev);

+ 2 - 6
drivers/base/power/power.h

@@ -4,7 +4,7 @@ static inline void device_pm_init_common(struct device *dev)
 {
 	if (!dev->power.early_init) {
 		spin_lock_init(&dev->power.lock);
-		dev->power.power_state = PMSG_INVALID;
+		dev->power.qos = NULL;
 		dev->power.early_init = true;
 	}
 }
@@ -56,14 +56,10 @@ extern void device_pm_move_last(struct device *);
 
 static inline void device_pm_sleep_init(struct device *dev) {}
 
-static inline void device_pm_add(struct device *dev)
-{
-	dev_pm_qos_constraints_init(dev);
-}
+static inline void device_pm_add(struct device *dev) {}
 
 static inline void device_pm_remove(struct device *dev)
 {
-	dev_pm_qos_constraints_destroy(dev);
 	pm_runtime_remove(dev);
 }
 

+ 123 - 94
drivers/base/power/qos.c

@@ -41,6 +41,7 @@
 #include <linux/mutex.h>
 #include <linux/export.h>
 #include <linux/pm_runtime.h>
+#include <linux/err.h>
 
 #include "power.h"
 
@@ -61,7 +62,7 @@ enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
 	struct pm_qos_flags *pqf;
 	s32 val;
 
-	if (!qos)
+	if (IS_ERR_OR_NULL(qos))
 		return PM_QOS_FLAGS_UNDEFINED;
 
 	pqf = &qos->flags;
@@ -101,7 +102,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
  */
 s32 __dev_pm_qos_read_value(struct device *dev)
 {
-	return dev->power.qos ? pm_qos_read_value(&dev->power.qos->latency) : 0;
+	return IS_ERR_OR_NULL(dev->power.qos) ?
+		0 : pm_qos_read_value(&dev->power.qos->latency);
 }
 
 /**
@@ -198,20 +200,8 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
 	return 0;
 }
 
-/**
- * dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer.
- * @dev: target device
- *
- * Called from the device PM subsystem during device insertion under
- * device_pm_lock().
- */
-void dev_pm_qos_constraints_init(struct device *dev)
-{
-	mutex_lock(&dev_pm_qos_mtx);
-	dev->power.qos = NULL;
-	dev->power.power_state = PMSG_ON;
-	mutex_unlock(&dev_pm_qos_mtx);
-}
+static void __dev_pm_qos_hide_latency_limit(struct device *dev);
+static void __dev_pm_qos_hide_flags(struct device *dev);
 
 /**
  * dev_pm_qos_constraints_destroy
@@ -226,16 +216,15 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
 	struct pm_qos_constraints *c;
 	struct pm_qos_flags *f;
 
+	mutex_lock(&dev_pm_qos_mtx);
+
 	/*
 	 * If the device's PM QoS resume latency limit or PM QoS flags have been
 	 * exposed to user space, they have to be hidden at this point.
 	 */
-	dev_pm_qos_hide_latency_limit(dev);
-	dev_pm_qos_hide_flags(dev);
+	__dev_pm_qos_hide_latency_limit(dev);
+	__dev_pm_qos_hide_flags(dev);
 
-	mutex_lock(&dev_pm_qos_mtx);
-
-	dev->power.power_state = PMSG_INVALID;
 	qos = dev->power.qos;
 	if (!qos)
 		goto out;
@@ -257,7 +246,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
 	}
 
 	spin_lock_irq(&dev->power.lock);
-	dev->power.qos = NULL;
+	dev->power.qos = ERR_PTR(-ENODEV);
 	spin_unlock_irq(&dev->power.lock);
 
 	kfree(c->notifiers);
@@ -301,32 +290,19 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
 		 "%s() called for already added request\n", __func__))
 		return -EINVAL;
 
-	req->dev = dev;
-
 	mutex_lock(&dev_pm_qos_mtx);
 
-	if (!dev->power.qos) {
-		if (dev->power.power_state.event == PM_EVENT_INVALID) {
-			/* The device has been removed from the system. */
-			req->dev = NULL;
-			ret = -ENODEV;
-			goto out;
-		} else {
-			/*
-			 * Allocate the constraints data on the first call to
-			 * add_request, i.e. only if the data is not already
-			 * allocated and if the device has not been removed.
-			 */
-			ret = dev_pm_qos_constraints_allocate(dev);
-		}
-	}
+	if (IS_ERR(dev->power.qos))
+		ret = -ENODEV;
+	else if (!dev->power.qos)
+		ret = dev_pm_qos_constraints_allocate(dev);
 
 	if (!ret) {
+		req->dev = dev;
 		req->type = type;
 		ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
 	}
 
- out:
 	mutex_unlock(&dev_pm_qos_mtx);
 
 	return ret;
@@ -344,7 +320,14 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
 	s32 curr_value;
 	int ret = 0;
 
-	if (!req->dev->power.qos)
+	if (!req) /*guard against callers passing in null */
+		return -EINVAL;
+
+	if (WARN(!dev_pm_qos_request_active(req),
+		 "%s() called for unknown object\n", __func__))
+		return -EINVAL;
+
+	if (IS_ERR_OR_NULL(req->dev->power.qos))
 		return -ENODEV;
 
 	switch(req->type) {
@@ -386,6 +369,17 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
 {
 	int ret;
 
+	mutex_lock(&dev_pm_qos_mtx);
+	ret = __dev_pm_qos_update_request(req, new_value);
+	mutex_unlock(&dev_pm_qos_mtx);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
+
+static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
+{
+	int ret;
+
 	if (!req) /*guard against callers passing in null */
 		return -EINVAL;
 
@@ -393,13 +387,13 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
 		 "%s() called for unknown object\n", __func__))
 		return -EINVAL;
 
-	mutex_lock(&dev_pm_qos_mtx);
-	ret = __dev_pm_qos_update_request(req, new_value);
-	mutex_unlock(&dev_pm_qos_mtx);
+	if (IS_ERR_OR_NULL(req->dev->power.qos))
+		return -ENODEV;
 
+	ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+	memset(req, 0, sizeof(*req));
 	return ret;
 }
-EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
 
 /**
  * dev_pm_qos_remove_request - modifies an existing qos request
@@ -418,26 +412,10 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
  */
 int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
 {
-	int ret = 0;
-
-	if (!req) /*guard against callers passing in null */
-		return -EINVAL;
-
-	if (WARN(!dev_pm_qos_request_active(req),
-		 "%s() called for unknown object\n", __func__))
-		return -EINVAL;
+	int ret;
 
 	mutex_lock(&dev_pm_qos_mtx);
-
-	if (req->dev->power.qos) {
-		ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
-				       PM_QOS_DEFAULT_VALUE);
-		memset(req, 0, sizeof(*req));
-	} else {
-		/* Return if the device has been removed */
-		ret = -ENODEV;
-	}
-
+	ret = __dev_pm_qos_remove_request(req);
 	mutex_unlock(&dev_pm_qos_mtx);
 	return ret;
 }
@@ -462,9 +440,10 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
 
 	mutex_lock(&dev_pm_qos_mtx);
 
-	if (!dev->power.qos)
-		ret = dev->power.power_state.event != PM_EVENT_INVALID ?
-			dev_pm_qos_constraints_allocate(dev) : -ENODEV;
+	if (IS_ERR(dev->power.qos))
+		ret = -ENODEV;
+	else if (!dev->power.qos)
+		ret = dev_pm_qos_constraints_allocate(dev);
 
 	if (!ret)
 		ret = blocking_notifier_chain_register(
@@ -493,7 +472,7 @@ int dev_pm_qos_remove_notifier(struct device *dev,
 	mutex_lock(&dev_pm_qos_mtx);
 
 	/* Silently return if the constraints object is not present. */
-	if (dev->power.qos)
+	if (!IS_ERR_OR_NULL(dev->power.qos))
 		retval = blocking_notifier_chain_unregister(
 				dev->power.qos->latency.notifiers,
 				notifier);
@@ -563,16 +542,20 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
 static void __dev_pm_qos_drop_user_request(struct device *dev,
 					   enum dev_pm_qos_req_type type)
 {
+	struct dev_pm_qos_request *req = NULL;
+
 	switch(type) {
 	case DEV_PM_QOS_LATENCY:
-		dev_pm_qos_remove_request(dev->power.qos->latency_req);
+		req = dev->power.qos->latency_req;
 		dev->power.qos->latency_req = NULL;
 		break;
 	case DEV_PM_QOS_FLAGS:
-		dev_pm_qos_remove_request(dev->power.qos->flags_req);
+		req = dev->power.qos->flags_req;
 		dev->power.qos->flags_req = NULL;
 		break;
 	}
+	__dev_pm_qos_remove_request(req);
+	kfree(req);
 }
 
 /**
@@ -588,36 +571,57 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
 	if (!device_is_registered(dev) || value < 0)
 		return -EINVAL;
 
-	if (dev->power.qos && dev->power.qos->latency_req)
-		return -EEXIST;
-
 	req = kzalloc(sizeof(*req), GFP_KERNEL);
 	if (!req)
 		return -ENOMEM;
 
 	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value);
-	if (ret < 0)
+	if (ret < 0) {
+		kfree(req);
 		return ret;
+	}
+
+	mutex_lock(&dev_pm_qos_mtx);
+
+	if (IS_ERR_OR_NULL(dev->power.qos))
+		ret = -ENODEV;
+	else if (dev->power.qos->latency_req)
+		ret = -EEXIST;
+
+	if (ret < 0) {
+		__dev_pm_qos_remove_request(req);
+		kfree(req);
+		goto out;
+	}
 
 	dev->power.qos->latency_req = req;
 	ret = pm_qos_sysfs_add_latency(dev);
 	if (ret)
 		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
 
+ out:
+	mutex_unlock(&dev_pm_qos_mtx);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
 
+static void __dev_pm_qos_hide_latency_limit(struct device *dev)
+{
+	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) {
+		pm_qos_sysfs_remove_latency(dev);
+		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
+	}
+}
+
 /**
  * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
  * @dev: Device whose PM QoS latency limit is to be hidden from user space.
  */
 void dev_pm_qos_hide_latency_limit(struct device *dev)
 {
-	if (dev->power.qos && dev->power.qos->latency_req) {
-		pm_qos_sysfs_remove_latency(dev);
-		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
-	}
+	mutex_lock(&dev_pm_qos_mtx);
+	__dev_pm_qos_hide_latency_limit(dev);
+	mutex_unlock(&dev_pm_qos_mtx);
 }
 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
 
@@ -634,41 +638,61 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)
 	if (!device_is_registered(dev))
 		return -EINVAL;
 
-	if (dev->power.qos && dev->power.qos->flags_req)
-		return -EEXIST;
-
 	req = kzalloc(sizeof(*req), GFP_KERNEL);
 	if (!req)
 		return -ENOMEM;
 
-	pm_runtime_get_sync(dev);
 	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
-	if (ret < 0)
-		goto fail;
+	if (ret < 0) {
+		kfree(req);
+		return ret;
+	}
+
+	pm_runtime_get_sync(dev);
+	mutex_lock(&dev_pm_qos_mtx);
+
+	if (IS_ERR_OR_NULL(dev->power.qos))
+		ret = -ENODEV;
+	else if (dev->power.qos->flags_req)
+		ret = -EEXIST;
+
+	if (ret < 0) {
+		__dev_pm_qos_remove_request(req);
+		kfree(req);
+		goto out;
+	}
 
 	dev->power.qos->flags_req = req;
 	ret = pm_qos_sysfs_add_flags(dev);
 	if (ret)
 		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
 
-fail:
+ out:
+	mutex_unlock(&dev_pm_qos_mtx);
 	pm_runtime_put(dev);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
 
+static void __dev_pm_qos_hide_flags(struct device *dev)
+{
+	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) {
+		pm_qos_sysfs_remove_flags(dev);
+		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
+	}
+}
+
 /**
  * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
  * @dev: Device whose PM QoS flags are to be hidden from user space.
  */
 void dev_pm_qos_hide_flags(struct device *dev)
 {
-	if (dev->power.qos && dev->power.qos->flags_req) {
-		pm_qos_sysfs_remove_flags(dev);
-		pm_runtime_get_sync(dev);
-		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
-		pm_runtime_put(dev);
-	}
+	pm_runtime_get_sync(dev);
+	mutex_lock(&dev_pm_qos_mtx);
+	__dev_pm_qos_hide_flags(dev);
+	mutex_unlock(&dev_pm_qos_mtx);
+	pm_runtime_put(dev);
 }
 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
 
@@ -683,12 +707,14 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
 	s32 value;
 	int ret;
 
-	if (!dev->power.qos || !dev->power.qos->flags_req)
-		return -EINVAL;
-
 	pm_runtime_get_sync(dev);
 	mutex_lock(&dev_pm_qos_mtx);
 
+	if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
+		ret = -EINVAL;
+		goto out;
+	}
+
 	value = dev_pm_qos_requested_flags(dev);
 	if (set)
 		value |= mask;
@@ -697,9 +723,12 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
 
 	ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
 
+ out:
 	mutex_unlock(&dev_pm_qos_mtx);
 	pm_runtime_put(dev);
-
 	return ret;
 }
+#else /* !CONFIG_PM_RUNTIME */
+static void __dev_pm_qos_hide_latency_limit(struct device *dev) {}
+static void __dev_pm_qos_hide_flags(struct device *dev) {}
 #endif /* CONFIG_PM_RUNTIME */

+ 1 - 0
drivers/base/power/sysfs.c

@@ -708,6 +708,7 @@ void rpm_sysfs_remove(struct device *dev)
 
 void dpm_sysfs_remove(struct device *dev)
 {
+	dev_pm_qos_constraints_destroy(dev);
 	rpm_sysfs_remove(dev);
 	sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
 	sysfs_remove_group(&dev->kobj, &pm_attr_group);

+ 1 - 0
drivers/base/regmap/regmap-irq.c

@@ -184,6 +184,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
 		if (ret < 0) {
 			dev_err(map->dev, "IRQ thread failed to resume: %d\n",
 				ret);
+			pm_runtime_put(map->dev);
 			return IRQ_NONE;
 		}
 	}

+ 8 - 4
drivers/char/random.c

@@ -852,6 +852,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
 		      int reserved)
 {
 	unsigned long flags;
+	int wakeup_write = 0;
 
 	/* Hold lock while accounting */
 	spin_lock_irqsave(&r->lock, flags);
@@ -873,10 +874,8 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
 		else
 			r->entropy_count = reserved;
 
-		if (r->entropy_count < random_write_wakeup_thresh) {
-			wake_up_interruptible(&random_write_wait);
-			kill_fasync(&fasync, SIGIO, POLL_OUT);
-		}
+		if (r->entropy_count < random_write_wakeup_thresh)
+			wakeup_write = 1;
 	}
 
 	DEBUG_ENT("debiting %zu entropy credits from %s%s\n",
@@ -884,6 +883,11 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
 
 	spin_unlock_irqrestore(&r->lock, flags);
 
+	if (wakeup_write) {
+		wake_up_interruptible(&random_write_wait);
+		kill_fasync(&fasync, SIGIO, POLL_OUT);
+	}
+
 	return nbytes;
 }
 

+ 1 - 1
drivers/cpufreq/cpufreq_governor.h

@@ -64,7 +64,7 @@ static void *get_cpu_dbs_info_s(int cpu)				\
  * dbs: used as a shortform for demand based switching It helps to keep variable
  *	names smaller, simpler
  * cdbs: common dbs
- * on_*: On-demand governor
+ * od_*: On-demand governor
  * cs_*: Conservative governor
  */
 

+ 1 - 7
drivers/cpufreq/highbank-cpufreq.c

@@ -28,13 +28,7 @@
 
 static int hb_voltage_change(unsigned int freq)
 {
-	int i;
-	u32 msg[HB_CPUFREQ_IPC_LEN];
-
-	msg[0] = HB_CPUFREQ_CHANGE_NOTE;
-	msg[1] = freq / 1000000;
-	for (i = 2; i < HB_CPUFREQ_IPC_LEN; i++)
-		msg[i] = 0;
+	u32 msg[HB_CPUFREQ_IPC_LEN] = {HB_CPUFREQ_CHANGE_NOTE, freq / 1000000};
 
 	return pl320_ipc_transmit(msg);
 }

+ 14 - 28
drivers/cpufreq/intel_pstate.c

@@ -662,6 +662,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 
 	cpu = all_cpu_data[policy->cpu];
 
+	if (!policy->cpuinfo.max_freq)
+		return -ENODEV;
+
 	intel_pstate_get_min_max(cpu, &min, &max);
 
 	limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
@@ -747,37 +750,11 @@ static struct cpufreq_driver intel_pstate_driver = {
 	.owner		= THIS_MODULE,
 };
 
-static void intel_pstate_exit(void)
-{
-	int cpu;
-
-	sysfs_remove_group(intel_pstate_kobject,
-				&intel_pstate_attr_group);
-	debugfs_remove_recursive(debugfs_parent);
-
-	cpufreq_unregister_driver(&intel_pstate_driver);
-
-	if (!all_cpu_data)
-		return;
-
-	get_online_cpus();
-	for_each_online_cpu(cpu) {
-		if (all_cpu_data[cpu]) {
-			del_timer_sync(&all_cpu_data[cpu]->timer);
-			kfree(all_cpu_data[cpu]);
-		}
-	}
-
-	put_online_cpus();
-	vfree(all_cpu_data);
-}
-module_exit(intel_pstate_exit);
-
 static int __initdata no_load;
 
 static int __init intel_pstate_init(void)
 {
-	int rc = 0;
+	int cpu, rc = 0;
 	const struct x86_cpu_id *id;
 
 	if (no_load)
@@ -802,7 +779,16 @@ static int __init intel_pstate_init(void)
 	intel_pstate_sysfs_expose_params();
 	return rc;
 out:
-	intel_pstate_exit();
+	get_online_cpus();
+	for_each_online_cpu(cpu) {
+		if (all_cpu_data[cpu]) {
+			del_timer_sync(&all_cpu_data[cpu]->timer);
+			kfree(all_cpu_data[cpu]);
+		}
+	}
+
+	put_online_cpus();
+	vfree(all_cpu_data);
 	return -ENODEV;
 }
 device_initcall(intel_pstate_init);

+ 2 - 3
drivers/firmware/dmi_scan.c

@@ -442,7 +442,6 @@ static int __init dmi_present(const char __iomem *p)
 static int __init smbios_present(const char __iomem *p)
 {
 	u8 buf[32];
-	int offset = 0;
 
 	memcpy_fromio(buf, p, 32);
 	if ((buf[5] < 32) && dmi_checksum(buf, buf[5])) {
@@ -461,9 +460,9 @@ static int __init smbios_present(const char __iomem *p)
 			dmi_ver = 0x0206;
 			break;
 		}
-		offset = 16;
+		return memcmp(p + 16, "_DMI_", 5) || dmi_present(p + 16);
 	}
-	return dmi_present(buf + offset);
+	return 1;
 }
 
 void __init dmi_scan_machine(void)

+ 97 - 34
drivers/firmware/efivars.c

@@ -426,6 +426,44 @@ get_var_data(struct efivars *efivars, struct efi_variable *var)
 	return status;
 }
 
+static efi_status_t
+check_var_size_locked(struct efivars *efivars, u32 attributes,
+			unsigned long size)
+{
+	u64 storage_size, remaining_size, max_size;
+	efi_status_t status;
+	const struct efivar_operations *fops = efivars->ops;
+
+	if (!efivars->ops->query_variable_info)
+		return EFI_UNSUPPORTED;
+
+	status = fops->query_variable_info(attributes, &storage_size,
+					   &remaining_size, &max_size);
+
+	if (status != EFI_SUCCESS)
+		return status;
+
+	if (!storage_size || size > remaining_size || size > max_size ||
+	    (remaining_size - size) < (storage_size / 2))
+		return EFI_OUT_OF_RESOURCES;
+
+	return status;
+}
+
+
+static efi_status_t
+check_var_size(struct efivars *efivars, u32 attributes, unsigned long size)
+{
+	efi_status_t status;
+	unsigned long flags;
+
+	spin_lock_irqsave(&efivars->lock, flags);
+	status = check_var_size_locked(efivars, attributes, size);
+	spin_unlock_irqrestore(&efivars->lock, flags);
+
+	return status;
+}
+
 static ssize_t
 efivar_guid_read(struct efivar_entry *entry, char *buf)
 {
@@ -547,11 +585,16 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
 	}
 
 	spin_lock_irq(&efivars->lock);
-	status = efivars->ops->set_variable(new_var->VariableName,
-					    &new_var->VendorGuid,
-					    new_var->Attributes,
-					    new_var->DataSize,
-					    new_var->Data);
+
+	status = check_var_size_locked(efivars, new_var->Attributes,
+	       new_var->DataSize + utf16_strsize(new_var->VariableName, 1024));
+
+	if (status == EFI_SUCCESS || status == EFI_UNSUPPORTED)
+		status = efivars->ops->set_variable(new_var->VariableName,
+						    &new_var->VendorGuid,
+						    new_var->Attributes,
+						    new_var->DataSize,
+						    new_var->Data);
 
 	spin_unlock_irq(&efivars->lock);
 
@@ -702,8 +745,7 @@ static ssize_t efivarfs_file_write(struct file *file,
 	u32 attributes;
 	struct inode *inode = file->f_mapping->host;
 	unsigned long datasize = count - sizeof(attributes);
-	unsigned long newdatasize;
-	u64 storage_size, remaining_size, max_size;
+	unsigned long newdatasize, varsize;
 	ssize_t bytes = 0;
 
 	if (count < sizeof(attributes))
@@ -722,28 +764,18 @@ static ssize_t efivarfs_file_write(struct file *file,
 	 * amounts of memory. Pick a default size of 64K if
 	 * QueryVariableInfo() isn't supported by the firmware.
 	 */
-	spin_lock_irq(&efivars->lock);
-
-	if (!efivars->ops->query_variable_info)
-		status = EFI_UNSUPPORTED;
-	else {
-		const struct efivar_operations *fops = efivars->ops;
-		status = fops->query_variable_info(attributes, &storage_size,
-						   &remaining_size, &max_size);
-	}
 
-	spin_unlock_irq(&efivars->lock);
+	varsize = datasize + utf16_strsize(var->var.VariableName, 1024);
+	status = check_var_size(efivars, attributes, varsize);
 
 	if (status != EFI_SUCCESS) {
 		if (status != EFI_UNSUPPORTED)
 			return efi_status_to_err(status);
 
-		remaining_size = 65536;
+		if (datasize > 65536)
+			return -ENOSPC;
 	}
 
-	if (datasize > remaining_size)
-		return -ENOSPC;
-
 	data = kmalloc(datasize, GFP_KERNEL);
 	if (!data)
 		return -ENOMEM;
@@ -765,6 +797,19 @@ static ssize_t efivarfs_file_write(struct file *file,
 	 */
 	spin_lock_irq(&efivars->lock);
 
+	/*
+	 * Ensure that the available space hasn't shrunk below the safe level
+	 */
+
+	status = check_var_size_locked(efivars, attributes, varsize);
+
+	if (status != EFI_SUCCESS && status != EFI_UNSUPPORTED) {
+		spin_unlock_irq(&efivars->lock);
+		kfree(data);
+
+		return efi_status_to_err(status);
+	}
+
 	status = efivars->ops->set_variable(var->var.VariableName,
 					    &var->var.VendorGuid,
 					    attributes, datasize,
@@ -929,8 +974,8 @@ static bool efivarfs_valid_name(const char *str, int len)
 	if (len < GUID_LEN + 2)
 		return false;
 
-	/* GUID should be right after the first '-' */
-	if (s - 1 != strchr(str, '-'))
+	/* GUID must be preceded by a '-' */
+	if (*(s - 1) != '-')
 		return false;
 
 	/*
@@ -1118,15 +1163,22 @@ static struct dentry_operations efivarfs_d_ops = {
 
 static struct dentry *efivarfs_alloc_dentry(struct dentry *parent, char *name)
 {
+	struct dentry *d;
 	struct qstr q;
+	int err;
 
 	q.name = name;
 	q.len = strlen(name);
 
-	if (efivarfs_d_hash(NULL, NULL, &q))
-		return NULL;
+	err = efivarfs_d_hash(NULL, NULL, &q);
+	if (err)
+		return ERR_PTR(err);
+
+	d = d_alloc(parent, &q);
+	if (d)
+		return d;
 
-	return d_alloc(parent, &q);
+	return ERR_PTR(-ENOMEM);
 }
 
 static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
@@ -1136,6 +1188,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
 	struct efivar_entry *entry, *n;
 	struct efivars *efivars = &__efivars;
 	char *name;
+	int err = -ENOMEM;
 
 	efivarfs_sb = sb;
 
@@ -1186,8 +1239,10 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
 			goto fail_name;
 
 		dentry = efivarfs_alloc_dentry(root, name);
-		if (!dentry)
+		if (IS_ERR(dentry)) {
+			err = PTR_ERR(dentry);
 			goto fail_inode;
+		}
 
 		/* copied by the above to local storage in the dentry. */
 		kfree(name);
@@ -1214,7 +1269,7 @@ fail_inode:
 fail_name:
 	kfree(name);
 fail:
-	return -ENOMEM;
+	return err;
 }
 
 static struct dentry *efivarfs_mount(struct file_system_type *fs_type,
@@ -1234,6 +1289,7 @@ static struct file_system_type efivarfs_type = {
 	.mount   = efivarfs_mount,
 	.kill_sb = efivarfs_kill_sb,
 };
+MODULE_ALIAS_FS("efivarfs");
 
 /*
  * Handle negative dentry.
@@ -1345,7 +1401,6 @@ static int efi_pstore_write(enum pstore_type_id type,
 	efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
 	struct efivars *efivars = psi->data;
 	int i, ret = 0;
-	u64 storage_space, remaining_space, max_variable_size;
 	efi_status_t status = EFI_NOT_FOUND;
 	unsigned long flags;
 
@@ -1365,11 +1420,11 @@ static int efi_pstore_write(enum pstore_type_id type,
 	 * size: a size of logging data
 	 * DUMP_NAME_LEN * 2: a maximum size of variable name
 	 */
-	status = efivars->ops->query_variable_info(PSTORE_EFI_ATTRIBUTES,
-						   &storage_space,
-						   &remaining_space,
-						   &max_variable_size);
-	if (status || remaining_space < size + DUMP_NAME_LEN * 2) {
+
+	status = check_var_size_locked(efivars, PSTORE_EFI_ATTRIBUTES,
+					 size + DUMP_NAME_LEN * 2);
+
+	if (status) {
 		spin_unlock_irqrestore(&efivars->lock, flags);
 		*id = part;
 		return -ENOSPC;
@@ -1544,6 +1599,14 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
 		return -EINVAL;
 	}
 
+	status = check_var_size_locked(efivars, new_var->Attributes,
+	       new_var->DataSize + utf16_strsize(new_var->VariableName, 1024));
+
+	if (status && status != EFI_UNSUPPORTED) {
+		spin_unlock_irq(&efivars->lock);
+		return efi_status_to_err(status);
+	}
+
 	/* now *really* create the variable via EFI */
 	status = efivars->ops->set_variable(new_var->VariableName,
 					    &new_var->VendorGuid,

+ 18 - 7
drivers/gpu/drm/i915/i915_drv.c

@@ -379,15 +379,15 @@ static const struct pci_device_id pciidlist[] = {		/* aka */
 	INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
 	INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
 	INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */
-	INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT1 desktop */
+	INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
+	INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
 	INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */
-	INTEL_VGA_DEVICE(0x0D32, &intel_haswell_d_info), /* CRW GT2 desktop */
-	INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT1 server */
+	INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
+	INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
 	INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */
-	INTEL_VGA_DEVICE(0x0D3A, &intel_haswell_d_info), /* CRW GT2 server */
-	INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT1 mobile */
+	INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
+	INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
 	INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */
-	INTEL_VGA_DEVICE(0x0D36, &intel_haswell_m_info), /* CRW GT2 mobile */
 	INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
 	INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
 	INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
@@ -495,6 +495,7 @@ static int i915_drm_freeze(struct drm_device *dev)
 		intel_modeset_disable(dev);
 
 		drm_irq_uninstall(dev);
+		dev_priv->enable_hotplug_processing = false;
 	}
 
 	i915_save_state(dev);
@@ -568,10 +569,20 @@ static int __i915_drm_thaw(struct drm_device *dev)
 		error = i915_gem_init_hw(dev);
 		mutex_unlock(&dev->struct_mutex);
 
+		/* We need working interrupts for modeset enabling ... */
+		drm_irq_install(dev);
+
 		intel_modeset_init_hw(dev);
 		intel_modeset_setup_hw_state(dev, false);
-		drm_irq_install(dev);
+
+		/*
+		 * ... but also need to make sure that hotplug processing
+		 * doesn't cause havoc. Like in the driver load code we don't
+		 * bother with the tiny race here where we might loose hotplug
+		 * notifications.
+		 * */
 		intel_hpd_init(dev);
+		dev_priv->enable_hotplug_processing = true;
 	}
 
 	intel_opregion_init(dev);

+ 24 - 2
drivers/gpu/drm/i915/i915_irq.c

@@ -701,7 +701,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
 {
 	struct drm_device *dev = (struct drm_device *) arg;
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-	u32 de_iir, gt_iir, de_ier, pm_iir;
+	u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
 	irqreturn_t ret = IRQ_NONE;
 	int i;
 
@@ -711,6 +711,15 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
 	de_ier = I915_READ(DEIER);
 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
 
+	/* Disable south interrupts. We'll only write to SDEIIR once, so further
+	 * interrupts will will be stored on its back queue, and then we'll be
+	 * able to process them after we restore SDEIER (as soon as we restore
+	 * it, we'll get an interrupt if SDEIIR still has something to process
+	 * due to its back queue). */
+	sde_ier = I915_READ(SDEIER);
+	I915_WRITE(SDEIER, 0);
+	POSTING_READ(SDEIER);
+
 	gt_iir = I915_READ(GTIIR);
 	if (gt_iir) {
 		snb_gt_irq_handler(dev, dev_priv, gt_iir);
@@ -759,6 +768,8 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
 
 	I915_WRITE(DEIER, de_ier);
 	POSTING_READ(DEIER);
+	I915_WRITE(SDEIER, sde_ier);
+	POSTING_READ(SDEIER);
 
 	return ret;
 }
@@ -778,7 +789,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
 	struct drm_device *dev = (struct drm_device *) arg;
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	int ret = IRQ_NONE;
-	u32 de_iir, gt_iir, de_ier, pm_iir;
+	u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
 
 	atomic_inc(&dev_priv->irq_received);
 
@@ -787,6 +798,15 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
 	POSTING_READ(DEIER);
 
+	/* Disable south interrupts. We'll only write to SDEIIR once, so further
+	 * interrupts will will be stored on its back queue, and then we'll be
+	 * able to process them after we restore SDEIER (as soon as we restore
+	 * it, we'll get an interrupt if SDEIIR still has something to process
+	 * due to its back queue). */
+	sde_ier = I915_READ(SDEIER);
+	I915_WRITE(SDEIER, 0);
+	POSTING_READ(SDEIER);
+
 	de_iir = I915_READ(DEIIR);
 	gt_iir = I915_READ(GTIIR);
 	pm_iir = I915_READ(GEN6_PMIIR);
@@ -849,6 +869,8 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
 done:
 	I915_WRITE(DEIER, de_ier);
 	POSTING_READ(DEIER);
+	I915_WRITE(SDEIER, sde_ier);
+	POSTING_READ(SDEIER);
 
 	return ret;
 }

+ 2 - 2
drivers/gpu/drm/i915/i915_reg.h

@@ -1613,9 +1613,9 @@
 #define   ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
 #define   ADPA_USE_VGA_HVPOLARITY (1<<15)
 #define   ADPA_SETS_HVPOLARITY	0
-#define   ADPA_VSYNC_CNTL_DISABLE (1<<11)
+#define   ADPA_VSYNC_CNTL_DISABLE (1<<10)
 #define   ADPA_VSYNC_CNTL_ENABLE 0
-#define   ADPA_HSYNC_CNTL_DISABLE (1<<10)
+#define   ADPA_HSYNC_CNTL_DISABLE (1<<11)
 #define   ADPA_HSYNC_CNTL_ENABLE 0
 #define   ADPA_VSYNC_ACTIVE_HIGH (1<<4)
 #define   ADPA_VSYNC_ACTIVE_LOW	0

+ 1 - 1
drivers/gpu/drm/i915/intel_crt.c

@@ -88,7 +88,7 @@ static void intel_disable_crt(struct intel_encoder *encoder)
 	u32 temp;
 
 	temp = I915_READ(crt->adpa_reg);
-	temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
+	temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
 	temp &= ~ADPA_DAC_ENABLE;
 	I915_WRITE(crt->adpa_reg, temp);
 }

+ 1 - 1
drivers/gpu/drm/i915/intel_ddi.c

@@ -1391,8 +1391,8 @@ void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
 	struct intel_dp *intel_dp = &intel_dig_port->dp;
 	struct drm_i915_private *dev_priv = encoder->dev->dev_private;
 	enum port port = intel_dig_port->port;
-	bool wait;
 	uint32_t val;
+	bool wait = false;
 
 	if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {
 		val = I915_READ(DDI_BUF_CTL(port));

+ 30 - 7
drivers/gpu/drm/i915/intel_display.c

@@ -3604,6 +3604,30 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
 	 */
 }
 
+/**
+ * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
+ * cursor plane briefly if not already running after enabling the display
+ * plane.
+ * This workaround avoids occasional blank screens when self refresh is
+ * enabled.
+ */
+static void
+g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+	u32 cntl = I915_READ(CURCNTR(pipe));
+
+	if ((cntl & CURSOR_MODE) == 0) {
+		u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
+
+		I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
+		I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
+		intel_wait_for_vblank(dev_priv->dev, pipe);
+		I915_WRITE(CURCNTR(pipe), cntl);
+		I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
+		I915_WRITE(FW_BLC_SELF, fw_bcl_self);
+	}
+}
+
 static void i9xx_crtc_enable(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
@@ -3629,6 +3653,8 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
 
 	intel_enable_pipe(dev_priv, pipe, false);
 	intel_enable_plane(dev_priv, plane, pipe);
+	if (IS_G4X(dev))
+		g4x_fixup_plane(dev_priv, pipe);
 
 	intel_crtc_load_lut(crtc);
 	intel_update_fbc(dev);
@@ -7256,8 +7282,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_framebuffer *intel_fb;
-	struct drm_i915_gem_object *obj;
+	struct drm_framebuffer *old_fb = crtc->fb;
+	struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_unpin_work *work;
 	unsigned long flags;
@@ -7282,8 +7308,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 
 	work->event = event;
 	work->crtc = crtc;
-	intel_fb = to_intel_framebuffer(crtc->fb);
-	work->old_fb_obj = intel_fb->obj;
+	work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
 	INIT_WORK(&work->work, intel_unpin_work_fn);
 
 	ret = drm_vblank_get(dev, intel_crtc->pipe);
@@ -7303,9 +7328,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 	intel_crtc->unpin_work = work;
 	spin_unlock_irqrestore(&dev->event_lock, flags);
 
-	intel_fb = to_intel_framebuffer(fb);
-	obj = intel_fb->obj;
-
 	if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
 		flush_workqueue(dev_priv->wq);
 
@@ -7340,6 +7362,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 
 cleanup_pending:
 	atomic_dec(&intel_crtc->unpin_work_count);
+	crtc->fb = old_fb;
 	drm_gem_object_unreference(&work->old_fb_obj->base);
 	drm_gem_object_unreference(&obj->base);
 	mutex_unlock(&dev->struct_mutex);

+ 2 - 1
drivers/gpu/drm/i915/intel_dp.c

@@ -353,7 +353,8 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
 
 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
 	if (has_aux_irq)
-		done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 10);
+		done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
+					  msecs_to_jiffies(10));
 	else
 		done = wait_for_atomic(C, 10) == 0;
 	if (!done)

+ 1 - 1
drivers/gpu/drm/i915/intel_pm.c

@@ -2574,7 +2574,7 @@ static void gen6_enable_rps(struct drm_device *dev)
 	I915_WRITE(GEN6_RC_SLEEP, 0);
 	I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
 	I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
-	I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
+	I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
 	I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
 
 	/* Check if we are enabling RC6 */

+ 0 - 1
drivers/gpu/drm/mgag200/mgag200_drv.h

@@ -112,7 +112,6 @@ struct mga_framebuffer {
 struct mga_fbdev {
 	struct drm_fb_helper helper;
 	struct mga_framebuffer mfb;
-	struct list_head fbdev_list;
 	void *sysram;
 	int size;
 	struct ttm_bo_kmap_obj mapping;

+ 1 - 0
drivers/gpu/drm/mgag200/mgag200_i2c.c

@@ -92,6 +92,7 @@ struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev)
 	int ret;
 	int data, clock;
 
+	WREG_DAC(MGA1064_GEN_IO_CTL2, 1);
 	WREG_DAC(MGA1064_GEN_IO_DATA, 0xff);
 	WREG_DAC(MGA1064_GEN_IO_CTL, 0);
 

+ 27 - 0
drivers/gpu/drm/mgag200/mgag200_mode.c

@@ -1406,6 +1406,14 @@ static int mga_vga_get_modes(struct drm_connector *connector)
 static int mga_vga_mode_valid(struct drm_connector *connector,
 				 struct drm_display_mode *mode)
 {
+	struct drm_device *dev = connector->dev;
+	struct mga_device *mdev = (struct mga_device*)dev->dev_private;
+	struct mga_fbdev *mfbdev = mdev->mfbdev;
+	struct drm_fb_helper *fb_helper = &mfbdev->helper;
+	struct drm_fb_helper_connector *fb_helper_conn = NULL;
+	int bpp = 32;
+	int i = 0;
+
 	/* FIXME: Add bandwidth and g200se limitations */
 
 	if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 ||
@@ -1415,6 +1423,25 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
 		return MODE_BAD;
 	}
 
+	/* Validate the mode input by the user */
+	for (i = 0; i < fb_helper->connector_count; i++) {
+		if (fb_helper->connector_info[i]->connector == connector) {
+			/* Found the helper for this connector */
+			fb_helper_conn = fb_helper->connector_info[i];
+			if (fb_helper_conn->cmdline_mode.specified) {
+				if (fb_helper_conn->cmdline_mode.bpp_specified) {
+					bpp = fb_helper_conn->cmdline_mode.bpp;
+				}
+			}
+		}
+	}
+
+	if ((mode->hdisplay * mode->vdisplay * (bpp/8)) > mdev->mc.vram_size) {
+		if (fb_helper_conn)
+			fb_helper_conn->cmdline_mode.specified = false;
+		return MODE_BAD;
+	}
+
 	return MODE_OK;
 }
 

+ 1 - 1
drivers/gpu/drm/nouveau/core/engine/graph/nve0.c

@@ -350,7 +350,7 @@ nve0_graph_init_gpc_0(struct nvc0_graph_priv *priv)
 		nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
 	}
 
-	nv_wr32(priv, GPC_BCAST(0x1bd4), magicgpc918);
+	nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918);
 	nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
 }
 

+ 1 - 1
drivers/gpu/drm/nouveau/core/subdev/bios/init.c

@@ -869,7 +869,7 @@ init_idx_addr_latched(struct nvbios_init *init)
 		init->offset += 2;
 
 		init_wr32(init, dreg, idata);
-		init_mask(init, creg, ~mask, data | idata);
+		init_mask(init, creg, ~mask, data | iaddr);
 	}
 }
 

+ 1 - 0
drivers/gpu/drm/nouveau/core/subdev/i2c/base.c

@@ -142,6 +142,7 @@ nouveau_i2c_port_create_(struct nouveau_object *parent,
 	/* drop port's i2c subdev refcount, i2c handles this itself */
 	if (ret == 0) {
 		list_add_tail(&port->head, &i2c->ports);
+		atomic_dec(&parent->refcount);
 		atomic_dec(&engine->refcount);
 	}
 

Some files were not shown because too many files changed in this diff