Browse Source

Merge branch 'akpm' (patches from Andrew Morton)

Merge first patch-bomb from Andrew Morton:
 - Some pidns/fork/exec tweaks
 - OCFS2 updates
 - Most of MM - there remain quite a few memcg parts which depend on
   pending core cgroups changes.  Which might have been already merged -
   I'll check tomorrow...
 - Various misc stuff all over the place
 - A few block bits which I never got around to sending to Jens -
   relatively minor things.
 - MAINTAINERS maintenance
 - A small number of lib/ updates
 - checkpatch updates
 - epoll
 - firmware/dmi-scan
 - Some kprobes work for S390
 - drivers/rtc updates
 - hfsplus feature work
 - vmcore feature work
 - rbtree upgrades
 - AOE updates
 - pktcdvd cleanups
 - PPS
 - memstick
 - w1
 - New "inittmpfs" feature, which does the obvious
 - More IPC work from Davidlohr.

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (303 commits)
  lz4: fix compression/decompression signedness mismatch
  ipc: drop ipc_lock_check
  ipc, shm: drop shm_lock_check
  ipc: drop ipc_lock_by_ptr
  ipc, shm: guard against non-existant vma in shmdt(2)
  ipc: document general ipc locking scheme
  ipc,msg: drop msg_unlock
  ipc: rename ids->rw_mutex
  ipc,shm: shorten critical region for shmat
  ipc,shm: cleanup do_shmat pasta
  ipc,shm: shorten critical region for shmctl
  ipc,shm: make shmctl_nolock lockless
  ipc,shm: introduce shmctl_nolock
  ipc: drop ipcctl_pre_down
  ipc,shm: shorten critical region in shmctl_down
  ipc,shm: introduce lockless functions to obtain the ipc object
  initmpfs: use initramfs if rootfstype= or root= specified
  initmpfs: make rootfs use tmpfs when CONFIG_TMPFS enabled
  initmpfs: move rootfs code from fs/ramfs/ to init/
  initmpfs: move bdi setup from init_rootfs to init_ramfs
  ...
Linus Torvalds 11 years ago
parent
commit
c2d95729e3
100 changed files with 4707 additions and 1001 deletions
  1. 1 1
      Documentation/aoe/udev.txt
  2. 39 0
      Documentation/block/cmdline-partition.txt
  3. 17 0
      Documentation/devicetree/bindings/rtc/moxa,moxart-rtc.txt
  4. 5 1
      Documentation/devicetree/bindings/rtc/rtc-omap.txt
  5. 33 0
      Documentation/devicetree/bindings/rtc/rtc-palmas.txt
  6. 9 10
      Documentation/filesystems/proc.txt
  7. 4 0
      Documentation/filesystems/ramfs-rootfs-initramfs.txt
  8. 1 0
      Documentation/sysctl/kernel.txt
  9. 19 11
      Documentation/sysctl/vm.txt
  10. 12 13
      Documentation/vm/hugetlbpage.txt
  11. 7 0
      Documentation/vm/soft-dirty.txt
  12. 53 72
      MAINTAINERS
  13. 5 0
      arch/alpha/lib/csum_partial_copy.c
  14. 5 0
      arch/arm/mm/hugetlbpage.c
  15. 5 0
      arch/arm64/mm/hugetlbpage.c
  16. 5 0
      arch/ia64/mm/hugetlbpage.c
  17. 5 0
      arch/metag/mm/hugetlbpage.c
  18. 5 0
      arch/mips/mm/hugetlbpage.c
  19. 10 0
      arch/powerpc/mm/hugetlbpage.c
  20. 2 1
      arch/s390/Kconfig
  21. 3 1
      arch/s390/include/asm/kprobes.h
  22. 1 0
      arch/s390/include/asm/sclp.h
  23. 179 40
      arch/s390/kernel/crash_dump.c
  24. 137 7
      arch/s390/kernel/kprobes.c
  25. 5 0
      arch/s390/mm/hugetlbpage.c
  26. 5 0
      arch/sh/mm/hugetlbpage.c
  27. 6 6
      arch/sparc/kernel/sys_sparc32.c
  28. 5 0
      arch/sparc/mm/hugetlbpage.c
  29. 5 0
      arch/tile/mm/hugetlbpage.c
  30. 19 15
      arch/x86/include/asm/pgtable.h
  31. 3 0
      arch/x86/include/asm/pgtable_types.h
  32. 31 6
      arch/x86/include/asm/tlbflush.h
  33. 2 0
      arch/x86/kernel/cpu/mtrr/generic.c
  34. 8 0
      arch/x86/mm/hugetlbpage.c
  35. 11 3
      arch/x86/mm/tlb.c
  36. 6 0
      block/Kconfig
  37. 1 0
      block/Makefile
  38. 1 1
      block/blk-ioc.c
  39. 1 1
      block/blk-sysfs.c
  40. 250 0
      block/cmdline-parser.c
  41. 1 1
      block/compat_ioctl.c
  42. 7 0
      block/partitions/Kconfig
  43. 1 0
      block/partitions/Makefile
  44. 4 0
      block/partitions/check.c
  45. 99 0
      block/partitions/cmdline.c
  46. 2 0
      block/partitions/cmdline.h
  47. 109 62
      block/partitions/efi.c
  48. 18 20
      block/partitions/efi.h
  49. 3 1
      drivers/block/aoe/aoe.h
  50. 98 2
      drivers/block/aoe/aoeblk.c
  51. 0 4
      drivers/block/aoe/aoecmd.c
  52. 4 6
      drivers/block/aoe/aoedev.c
  53. 7 0
      drivers/block/cciss.c
  54. 1 1
      drivers/block/mg_disk.c
  55. 1 1
      drivers/block/osdblk.c
  56. 140 138
      drivers/block/pktcdvd.c
  57. 1 1
      drivers/block/rbd.c
  58. 0 2
      drivers/block/swim.c
  59. 1 1
      drivers/block/xen-blkback/xenbus.c
  60. 24 36
      drivers/char/tpm/tpm_tis.c
  61. 29 44
      drivers/firmware/dmi_scan.c
  62. 1 1
      drivers/firmware/google/gsmi.c
  63. 0 2
      drivers/iommu/msm_iommu_dev.c
  64. 0 2
      drivers/iommu/omap-iommu.c
  65. 12 0
      drivers/memstick/core/Kconfig
  66. 1 1
      drivers/memstick/core/Makefile
  67. 2385 0
      drivers/memstick/core/ms_block.c
  68. 290 0
      drivers/memstick/core/ms_block.h
  69. 0 2
      drivers/memstick/host/rtsx_pci_ms.c
  70. 14 4
      drivers/platform/x86/apple-gmux.c
  71. 13 0
      drivers/pnp/driver.c
  72. 0 1
      drivers/pps/clients/pps-gpio.c
  73. 9 0
      drivers/rtc/Kconfig
  74. 1 0
      drivers/rtc/Makefile
  75. 5 19
      drivers/rtc/rtc-cmos.c
  76. 5 12
      drivers/rtc/rtc-ds1511.c
  77. 4 9
      drivers/rtc/rtc-ds1553.c
  78. 7 19
      drivers/rtc/rtc-ds1742.c
  79. 3 11
      drivers/rtc/rtc-ep93xx.c
  80. 15 7
      drivers/rtc/rtc-hid-sensor-time.c
  81. 4 12
      drivers/rtc/rtc-imxdi.c
  82. 4 20
      drivers/rtc/rtc-lpc32xx.c
  83. 2 2
      drivers/rtc/rtc-max77686.c
  84. 330 0
      drivers/rtc/rtc-moxart.c
  85. 4 13
      drivers/rtc/rtc-mv.c
  86. 4 10
      drivers/rtc/rtc-mxc.c
  87. 1 1
      drivers/rtc/rtc-nuc900.c
  88. 52 8
      drivers/rtc/rtc-omap.c
  89. 35 0
      drivers/rtc/rtc-palmas.c
  90. 0 6
      drivers/rtc/rtc-pcf2127.c
  91. 9 4
      drivers/rtc/rtc-sirfsoc.c
  92. 5 10
      drivers/rtc/rtc-stk17ta8.c
  93. 4 10
      drivers/rtc/rtc-tx4939.c
  94. 3 3
      drivers/s390/char/zcore.c
  95. 1 265
      drivers/video/acornfb.c
  96. 0 29
      drivers/video/acornfb.h
  97. 0 2
      drivers/w1/masters/mxc_w1.c
  98. 8 4
      drivers/w1/w1.c
  99. 4 2
      drivers/watchdog/hpwdt.c
  100. 1 1
      fs/affs/file.c

+ 1 - 1
Documentation/aoe/udev.txt

@@ -23,4 +23,4 @@ SUBSYSTEM=="aoe", KERNEL=="revalidate",	NAME="etherd/%k", GROUP="disk", MODE="02
 SUBSYSTEM=="aoe", KERNEL=="flush",	NAME="etherd/%k", GROUP="disk", MODE="0220"
 
 # aoe block devices     
-KERNEL=="etherd*",       NAME="%k", GROUP="disk"
+KERNEL=="etherd*",       GROUP="disk"

+ 39 - 0
Documentation/block/cmdline-partition.txt

@@ -0,0 +1,39 @@
+Embedded device command line partition
+=====================================================================
+
+Read block device partition table from command line.
+The partition used for fixed block device (eMMC) embedded device.
+It is no MBR, save storage space. Bootloader can be easily accessed
+by absolute address of data on the block device.
+Users can easily change the partition.
+
+The format for the command line is just like mtdparts:
+
+blkdevparts=<blkdev-def>[;<blkdev-def>]
+  <blkdev-def> := <blkdev-id>:<partdef>[,<partdef>]
+    <partdef> := <size>[@<offset>](part-name)
+
+<blkdev-id>
+    block device disk name, embedded device used fixed block device,
+    it's disk name also fixed. such as: mmcblk0, mmcblk1, mmcblk0boot0.
+
+<size>
+    partition size, in bytes, such as: 512, 1m, 1G.
+
+<offset>
+    partition start address, in bytes.
+
+(part-name)
+    partition name, kernel send uevent with "PARTNAME". application can create
+    a link to block device partition with the name "PARTNAME".
+    user space application can access partition by partition name.
+
+Example:
+    eMMC disk name is "mmcblk0" and "mmcblk0boot0"
+
+  bootargs:
+    'blkdevparts=mmcblk0:1G(data0),1G(data1),-;mmcblk0boot0:1m(boot),-(kernel)'
+
+  dmesg:
+    mmcblk0: p1(data0) p2(data1) p3()
+    mmcblk0boot0: p1(boot) p2(kernel)

+ 17 - 0
Documentation/devicetree/bindings/rtc/moxa,moxart-rtc.txt

@@ -0,0 +1,17 @@
+MOXA ART real-time clock
+
+Required properties:
+
+- compatible : Should be "moxa,moxart-rtc"
+- gpio-rtc-sclk : RTC sclk gpio, with zero flags
+- gpio-rtc-data : RTC data gpio, with zero flags
+- gpio-rtc-reset : RTC reset gpio, with zero flags
+
+Example:
+
+	rtc: rtc {
+		compatible = "moxa,moxart-rtc";
+		gpio-rtc-sclk = <&gpio 5 0>;
+		gpio-rtc-data = <&gpio 6 0>;
+		gpio-rtc-reset = <&gpio 7 0>;
+	};

+ 5 - 1
Documentation/devicetree/bindings/rtc/rtc-omap.txt

@@ -1,7 +1,11 @@
 TI Real Time Clock
 
 Required properties:
-- compatible: "ti,da830-rtc"
+- compatible:
+	- "ti,da830-rtc"  - for RTC IP used similar to that on DA8xx SoC family.
+	- "ti,am3352-rtc" - for RTC IP used similar to that on AM335x SoC family.
+			    This RTC IP has special WAKE-EN Register to enable
+			    Wakeup generation for event Alarm.
 - reg: Address range of rtc register set
 - interrupts: rtc timer, alarm interrupts in order
 - interrupt-parent: phandle for the interrupt controller

+ 33 - 0
Documentation/devicetree/bindings/rtc/rtc-palmas.txt

@@ -0,0 +1,33 @@
+Palmas RTC controller bindings
+
+Required properties:
+- compatible:
+  - "ti,palmas-rtc" for palma series of the RTC controller
+- interrupt-parent: Parent interrupt device, must be handle of palmas node.
+- interrupts: Interrupt number of RTC submodule on device.
+
+Optional properties:
+
+- ti,backup-battery-chargeable: The Palmas series device like TPS65913 or
+	TPS80036 supports the backup battery for powering the RTC when main
+	battery is removed or in very low power state. The backup battery
+	can be chargeable or non-chargeable. This flag will tells whether
+	battery is chargeable or not. If charging battery then driver can
+	enable the charging.
+- ti,backup-battery-charge-high-current: Enable high current charging in
+	backup battery. Device supports the < 100mA and > 100mA charging.
+	The high current will be > 100mA. Absence of this property will
+	charge battery to lower current i.e. < 100mA.
+
+Example:
+	palmas: tps65913@58 {
+		...
+		palmas_rtc: rtc {
+			compatible = "ti,palmas-rtc";
+			interrupt-parent = <&palmas>;
+			interrupts = <8 0>;
+			ti,backup-battery-chargeable;
+			ti,backup-battery-charge-high-current;
+		};
+		...
+	};

+ 9 - 10
Documentation/filesystems/proc.txt

@@ -854,16 +854,15 @@ Committed_AS: The amount of memory presently allocated on the system.
               The committed memory is a sum of all of the memory which
               has been allocated by processes, even if it has not been
               "used" by them as of yet. A process which malloc()'s 1G
-              of memory, but only touches 300M of it will only show up
-              as using 300M of memory even if it has the address space
-              allocated for the entire 1G. This 1G is memory which has
-              been "committed" to by the VM and can be used at any time
-              by the allocating application. With strict overcommit
-              enabled on the system (mode 2 in 'vm.overcommit_memory'),
-              allocations which would exceed the CommitLimit (detailed
-              above) will not be permitted. This is useful if one needs
-              to guarantee that processes will not fail due to lack of
-              memory once that memory has been successfully allocated.
+              of memory, but only touches 300M of it will show up as
+	      using 1G. This 1G is memory which has been "committed" to
+              by the VM and can be used at any time by the allocating
+              application. With strict overcommit enabled on the system
+              (mode 2 in 'vm.overcommit_memory'),allocations which would
+              exceed the CommitLimit (detailed above) will not be permitted.
+              This is useful if one needs to guarantee that processes will
+              not fail due to lack of memory once that memory has been
+              successfully allocated.
 VmallocTotal: total size of vmalloc memory area
  VmallocUsed: amount of vmalloc area which is used
 VmallocChunk: largest contiguous block of vmalloc area which is free

+ 4 - 0
Documentation/filesystems/ramfs-rootfs-initramfs.txt

@@ -79,6 +79,10 @@ to just make sure certain lists can't become empty.
 Most systems just mount another filesystem over rootfs and ignore it.  The
 amount of space an empty instance of ramfs takes up is tiny.
 
+If CONFIG_TMPFS is enabled, rootfs will use tmpfs instead of ramfs by
+default.  To force ramfs, add "rootfstype=ramfs" to the kernel command
+line.
+
 What is initramfs?
 ------------------
 

+ 1 - 0
Documentation/sysctl/kernel.txt

@@ -182,6 +182,7 @@ core_pattern is used to specify a core dumpfile pattern name.
 	%<NUL>	'%' is dropped
 	%%	output one '%'
 	%p	pid
+	%P	global pid (init PID namespace)
 	%u	uid
 	%g	gid
 	%d	dump mode, matches PR_SET_DUMPABLE and

+ 19 - 11
Documentation/sysctl/vm.txt

@@ -200,17 +200,25 @@ fragmentation index is <= extfrag_threshold. The default value is 500.
 
 hugepages_treat_as_movable
 
-This parameter is only useful when kernelcore= is specified at boot time to
-create ZONE_MOVABLE for pages that may be reclaimed or migrated. Huge pages
-are not movable so are not normally allocated from ZONE_MOVABLE. A non-zero
-value written to hugepages_treat_as_movable allows huge pages to be allocated
-from ZONE_MOVABLE.
-
-Once enabled, the ZONE_MOVABLE is treated as an area of memory the huge
-pages pool can easily grow or shrink within. Assuming that applications are
-not running that mlock() a lot of memory, it is likely the huge pages pool
-can grow to the size of ZONE_MOVABLE by repeatedly entering the desired value
-into nr_hugepages and triggering page reclaim.
+This parameter controls whether we can allocate hugepages from ZONE_MOVABLE
+or not. If set to non-zero, hugepages can be allocated from ZONE_MOVABLE.
+ZONE_MOVABLE is created when kernel boot parameter kernelcore= is specified,
+so this parameter has no effect if used without kernelcore=.
+
+Hugepage migration is now available in some situations which depend on the
+architecture and/or the hugepage size. If a hugepage supports migration,
+allocation from ZONE_MOVABLE is always enabled for the hugepage regardless
+of the value of this parameter.
+IOW, this parameter affects only non-migratable hugepages.
+
+Assuming that hugepages are not migratable in your system, one usecase of
+this parameter is that users can make hugepage pool more extensible by
+enabling the allocation from ZONE_MOVABLE. This is because on ZONE_MOVABLE
+page reclaim/migration/compaction work more and you can get contiguous
+memory more likely. Note that using ZONE_MOVABLE for non-migratable
+hugepages can do harm to other features like memory hotremove (because
+memory hotremove expects that memory blocks on ZONE_MOVABLE are always
+removable,) so it's a trade-off responsible for the users.
 
 ==============================================================
 

+ 12 - 13
Documentation/vm/hugetlbpage.txt

@@ -165,6 +165,7 @@ which function as described above for the default huge page-sized case.
 
 
 Interaction of Task Memory Policy with Huge Page Allocation/Freeing
+===================================================================
 
 Whether huge pages are allocated and freed via the /proc interface or
 the /sysfs interface using the nr_hugepages_mempolicy attribute, the NUMA
@@ -229,6 +230,7 @@ resulting effect on persistent huge page allocation is as follows:
    of huge pages over all on-lines nodes with memory.
 
 Per Node Hugepages Attributes
+=============================
 
 A subset of the contents of the root huge page control directory in sysfs,
 described above, will be replicated under each the system device of each
@@ -258,6 +260,7 @@ applied, from which node the huge page allocation will be attempted.
 
 
 Using Huge Pages
+================
 
 If the user applications are going to request huge pages using mmap system
 call, then it is required that system administrator mount a file system of
@@ -296,20 +299,16 @@ calls, though the mount of filesystem will be required for using mmap calls
 without MAP_HUGETLB.  For an example of how to use mmap with MAP_HUGETLB see
 map_hugetlb.c.
 
-*******************************************************************
+Examples
+========
 
-/*
- * map_hugetlb: see tools/testing/selftests/vm/map_hugetlb.c
- */
+1) map_hugetlb: see tools/testing/selftests/vm/map_hugetlb.c
 
-*******************************************************************
+2) hugepage-shm:  see tools/testing/selftests/vm/hugepage-shm.c
 
-/*
- * hugepage-shm:  see tools/testing/selftests/vm/hugepage-shm.c
- */
+3) hugepage-mmap:  see tools/testing/selftests/vm/hugepage-mmap.c
 
-*******************************************************************
-
-/*
- * hugepage-mmap:  see tools/testing/selftests/vm/hugepage-mmap.c
- */
+4) The libhugetlbfs (http://libhugetlbfs.sourceforge.net) library provides a
+   wide range of userspace tools to help with huge page usability, environment
+   setup, and control. Furthermore it provides useful test cases that should be
+   used when modifying code to ensure no regressions are introduced.

+ 7 - 0
Documentation/vm/soft-dirty.txt

@@ -28,6 +28,13 @@ This is so, since the pages are still mapped to physical memory, and thus all
 the kernel does is finds this fact out and puts both writable and soft-dirty
 bits on the PTE.
 
+  While in most cases tracking memory changes by #PF-s is more than enough
+there is still a scenario when we can lose soft dirty bits -- a task
+unmaps a previously mapped memory region and then maps a new one at exactly
+the same place. When unmap is called, the kernel internally clears PTE values
+including soft dirty bits. To notify user space application about such
+memory region renewal the kernel always marks new memory regions (and
+expanded regions) as soft dirty.
 
   This feature is actively used by the checkpoint-restore project. You
 can find more details about it on http://criu.org

+ 53 - 72
MAINTAINERS

@@ -1028,7 +1028,7 @@ F:	arch/arm/mach-orion5x/ts78xx-*
 ARM/MICREL KS8695 ARCHITECTURE
 M:	Greg Ungerer <gerg@uclinux.org>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-F:	arch/arm/mach-ks8695
+F:	arch/arm/mach-ks8695/
 S:	Odd Fixes
 
 ARM/MIOA701 MACHINE SUPPORT
@@ -1048,7 +1048,6 @@ M:	STEricsson <STEricsson_nomadik_linux@list.st.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 F:	arch/arm/mach-nomadik/
-F:	arch/arm/plat-nomadik/
 F:	drivers/i2c/busses/i2c-nomadik.c
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-nomadik.git
 
@@ -1070,7 +1069,7 @@ F:	drivers/mmc/host/msm_sdcc.h
 F:	drivers/tty/serial/msm_serial.h
 F:	drivers/tty/serial/msm_serial.c
 F:	drivers/*/pm8???-*
-F:	drivers/ssbi/
+F:	drivers/mfd/ssbi/
 F:	include/linux/mfd/pm8xxx/
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/davidb/linux-msm.git
 S:	Maintained
@@ -1156,7 +1155,6 @@ L:	linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 W:	http://www.fluff.org/ben/linux/
 S:	Maintained
 F:	arch/arm/plat-samsung/
-F:	arch/arm/plat-s3c24xx/
 F:	arch/arm/mach-s3c24*/
 F:	arch/arm/mach-s3c64xx/
 F:	drivers/*/*s3c2410*
@@ -1179,8 +1177,6 @@ L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 F:	arch/arm/mach-s5pv210/mach-aquila.c
 F:	arch/arm/mach-s5pv210/mach-goni.c
-F:	arch/arm/mach-exynos/mach-universal_c210.c
-F:	arch/arm/mach-exynos/mach-nuri.c
 
 ARM/SAMSUNG S5P SERIES 2D GRAPHICS ACCELERATION (G2D) SUPPORT
 M:	Kyungmin Park <kyungmin.park@samsung.com>
@@ -1325,7 +1321,7 @@ F:	drivers/mmc/host/wmt-sdmmc.c
 F:	drivers/pwm/pwm-vt8500.c
 F:	drivers/rtc/rtc-vt8500.c
 F:	drivers/tty/serial/vt8500_serial.c
-F:	drivers/usb/host/ehci-vt8500.c
+F:	drivers/usb/host/ehci-platform.c
 F:	drivers/usb/host/uhci-platform.c
 F:	drivers/video/vt8500lcdfb.*
 F:	drivers/video/wm8505fb*
@@ -1815,6 +1811,17 @@ L:	netdev@vger.kernel.org
 S:	Supported
 F:	drivers/net/ethernet/broadcom/bnx2x/
 
+BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE
+M:	Christian Daudt <csd@broadcom.com>
+T:	git git://git.github.com/broadcom/bcm11351
+S:	Maintained
+F:	arch/arm/mach-bcm/
+F:	arch/arm/boot/dts/bcm113*
+F:	arch/arm/boot/dts/bcm281*
+F:	arch/arm/configs/bcm_defconfig
+F:	drivers/mmc/host/sdhci_bcm_kona.c
+F:	drivers/clocksource/bcm_kona_timer.c
+
 BROADCOM BCM2835 ARM ARCHICTURE
 M:	Stephen Warren <swarren@wwwdotorg.org>
 L:	linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -2035,10 +2042,10 @@ W:	http://ceph.com/
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
 S:	Supported
 F:	Documentation/filesystems/ceph.txt
-F:	fs/ceph
-F:	net/ceph
-F:	include/linux/ceph
-F:	include/linux/crush
+F:	fs/ceph/
+F:	net/ceph/
+F:	include/linux/ceph/
+F:	include/linux/crush/
 
 CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM:
 L:	linux-usb@vger.kernel.org
@@ -2335,7 +2342,7 @@ CPU POWER MONITORING SUBSYSTEM
 M:	Dominik Brodowski <linux@dominikbrodowski.net>
 M:	Thomas Renninger <trenn@suse.de>
 S:	Maintained
-F:	tools/power/cpupower
+F:	tools/power/cpupower/
 
 CPUSETS
 M:	Li Zefan <lizefan@huawei.com>
@@ -2773,7 +2780,7 @@ L:	intel-gfx@lists.freedesktop.org
 L:	dri-devel@lists.freedesktop.org
 T:	git git://people.freedesktop.org/~danvet/drm-intel
 S:	Supported
-F:	drivers/gpu/drm/i915
+F:	drivers/gpu/drm/i915/
 F:	include/drm/i915*
 F:	include/uapi/drm/i915*
 
@@ -2785,7 +2792,7 @@ M:	Kyungmin Park <kyungmin.park@samsung.com>
 L:	dri-devel@lists.freedesktop.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos.git
 S:	Supported
-F:	drivers/gpu/drm/exynos
+F:	drivers/gpu/drm/exynos/
 F:	include/drm/exynos*
 F:	include/uapi/drm/exynos*
 
@@ -3038,7 +3045,7 @@ M:	Mauro Carvalho Chehab <m.chehab@samsung.com>
 L:	linux-edac@vger.kernel.org
 W:	bluesmoke.sourceforge.net
 S:	Maintained
-F:	drivers/edac/ghes-edac.c
+F:	drivers/edac/ghes_edac.c
 
 EDAC-I82443BXGX
 M:	Tim Small <tim@buttersideup.com>
@@ -3644,8 +3651,8 @@ M:	Arnd Bergmann <arnd@arndb.de>
 L:	linux-arch@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic.git
 S:	Maintained
-F:	include/asm-generic
-F:	include/uapi/asm-generic
+F:	include/asm-generic/
+F:	include/uapi/asm-generic/
 
 GENERIC UIO DRIVER FOR PCI DEVICES
 M:	"Michael S. Tsirkin" <mst@redhat.com>
@@ -3687,7 +3694,8 @@ GRE DEMULTIPLEXER DRIVER
 M:	Dmitry Kozlov <xeb@mail.ru>
 L:	netdev@vger.kernel.org
 S:	Maintained
-F:	net/ipv4/gre.c
+F:	net/ipv4/gre_demux.c
+F:	net/ipv4/gre_offload.c
 F:	include/net/gre.h
 
 GRETH 10/100/1G Ethernet MAC device driver
@@ -3765,7 +3773,7 @@ L:	linux-media@vger.kernel.org
 T:	git git://linuxtv.org/media_tree.git
 W:	http://linuxtv.org
 S:	Odd Fixes
-F:	drivers/media/usb/hdpvr
+F:	drivers/media/usb/hdpvr/
 
 HWPOISON MEMORY FAILURE HANDLING
 M:	Andi Kleen <andi@firstfloor.org>
@@ -4574,7 +4582,7 @@ S:	Supported
 W:	http://www.openfabrics.org
 W:	www.open-iscsi.org
 Q:	http://patchwork.kernel.org/project/linux-rdma/list/
-F:	drivers/infiniband/ulp/iser
+F:	drivers/infiniband/ulp/iser/
 
 ISDN SUBSYSTEM
 M:	Karsten Keil <isdn@linux-pingi.de>
@@ -4628,7 +4636,7 @@ W:	http://palosaari.fi/linux/
 Q:	http://patchwork.linuxtv.org/project/linux-media/list/
 T:	git git://linuxtv.org/anttip/media_tree.git
 S:	Maintained
-F:	drivers/media/tuners/it913x*
+F:	drivers/media/tuners/tuner_it913x*
 
 IVTV VIDEO4LINUX DRIVER
 M:	Andy Walls <awalls@md.metrocast.net>
@@ -5964,15 +5972,12 @@ S:	Maintained
 F:	arch/arm/*omap*/*pm*
 F:	drivers/cpufreq/omap-cpufreq.c
 
-OMAP POWERDOMAIN/CLOCKDOMAIN SOC ADAPTATION LAYER SUPPORT
+OMAP POWERDOMAIN SOC ADAPTATION LAYER SUPPORT
 M:	Rajendra Nayak <rnayak@ti.com>
 M:	Paul Walmsley <paul@pwsan.com>
 L:	linux-omap@vger.kernel.org
 S:	Maintained
-F:	arch/arm/mach-omap2/powerdomain2xxx_3xxx.c
-F:	arch/arm/mach-omap2/powerdomain44xx.c
-F:	arch/arm/mach-omap2/clockdomain2xxx_3xxx.c
-F:	arch/arm/mach-omap2/clockdomain44xx.c
+F:	arch/arm/mach-omap2/prm*
 
 OMAP AUDIO SUPPORT
 M:	Peter Ujfalusi <peter.ujfalusi@ti.com>
@@ -6138,7 +6143,7 @@ W:	http://openrisc.net
 L:	linux@lists.openrisc.net (moderated for non-subscribers)
 S:	Maintained
 T:	git git://openrisc.net/~jonas/linux
-F:	arch/openrisc
+F:	arch/openrisc/
 
 OPENVSWITCH
 M:	Jesse Gross <jesse@nicira.com>
@@ -6429,7 +6434,7 @@ M:	Jamie Iles <jamie@jamieiles.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 T:	git git://github.com/jamieiles/linux-2.6-ji.git
 S:	Supported
-F:	arch/arm/mach-picoxcell
+F:	arch/arm/mach-picoxcell/
 F:	drivers/*/picoxcell*
 F:	drivers/*/*/picoxcell*
 
@@ -6702,7 +6707,7 @@ F:	drivers/spi/spi-pxa2xx*
 F:	drivers/usb/gadget/pxa2*
 F:	include/sound/pxa2xx-lib.h
 F:	sound/arm/pxa*
-F:	sound/soc/pxa
+F:	sound/soc/pxa/
 
 MMP SUPPORT
 M:	Eric Miao <eric.y.miao@gmail.com>
@@ -7155,7 +7160,7 @@ SAMSUNG AUDIO (ASoC) DRIVERS
 M:	Sangbeom Kim <sbkim73@samsung.com>
 L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:	Supported
-F:	sound/soc/samsung
+F:	sound/soc/samsung/
 
 SAMSUNG FRAMEBUFFER DRIVER
 M:	Jingoo Han <jg1.han@samsung.com>
@@ -7201,7 +7206,7 @@ SERIAL DRIVERS
 M:	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 L:	linux-serial@vger.kernel.org
 S:	Maintained
-F:	drivers/tty/serial
+F:	drivers/tty/serial/
 
 SYNOPSYS DESIGNWARE DMAC DRIVER
 M:	Viresh Kumar <viresh.linux@gmail.com>
@@ -7236,7 +7241,7 @@ TLG2300 VIDEO4LINUX-2 DRIVER
 M:	Huang Shijie <shijie8@gmail.com>
 M:	Hans Verkuil <hverkuil@xs4all.nl>
 S:	Odd Fixes
-F:	drivers/media/usb/tlg2300
+F:	drivers/media/usb/tlg2300/
 
 SC1200 WDT DRIVER
 M:	Zwane Mwaikambo <zwane@arm.linux.org.uk>
@@ -7497,7 +7502,7 @@ L:	linux-media@vger.kernel.org
 T:	git git://linuxtv.org/media_tree.git
 W:	http://linuxtv.org
 S:	Odd Fixes
-F:	drivers/media/radio/radio-si4713.h
+F:	drivers/media/radio/radio-si4713.c
 
 SIANO DVB DRIVER
 M:	Mauro Carvalho Chehab <m.chehab@samsung.com>
@@ -7506,9 +7511,9 @@ W:	http://linuxtv.org
 T:	git git://linuxtv.org/media_tree.git
 S:	Odd fixes
 F:	drivers/media/common/siano/
-F:	drivers/media/dvb/siano/
 F:	drivers/media/usb/siano/
-F:	drivers/media/mmc/siano
+F:	drivers/media/usb/siano/
+F:	drivers/media/mmc/siano/
 
 SH_VEU V4L2 MEM2MEM DRIVER
 M:	Guennadi Liakhovetski <g.liakhovetski@gmx.de>
@@ -7546,9 +7551,9 @@ P:	Vincent Sanders <vince@simtec.co.uk>
 M:	Simtec Linux Team <linux@simtec.co.uk>
 W:	http://www.simtec.co.uk/products/EB2410ITX/
 S:	Supported
-F:	arch/arm/mach-s3c2410/mach-bast.c
-F:	arch/arm/mach-s3c2410/bast-ide.c
-F:	arch/arm/mach-s3c2410/bast-irq.c
+F:	arch/arm/mach-s3c24xx/mach-bast.c
+F:	arch/arm/mach-s3c24xx/bast-ide.c
+F:	arch/arm/mach-s3c24xx/bast-irq.c
 
 TI DAVINCI MACHINE SUPPORT
 M:	Sekhar Nori <nsekhar@ti.com>
@@ -7557,7 +7562,7 @@ L:	davinci-linux-open-source@linux.davincidsp.com (moderated for non-subscribers
 T:	git git://gitorious.org/linux-davinci/linux-davinci.git
 Q:	http://patchwork.kernel.org/project/linux-davinci/list/
 S:	Supported
-F:	arch/arm/mach-davinci
+F:	arch/arm/mach-davinci/
 F:	drivers/i2c/busses/i2c-davinci.c
 
 TI DAVINCI SERIES MEDIA DRIVER
@@ -7642,7 +7647,7 @@ SMIA AND SMIA++ IMAGE SENSOR DRIVER
 M:	Sakari Ailus <sakari.ailus@iki.fi>
 L:	linux-media@vger.kernel.org
 S:	Maintained
-F:	drivers/media/i2c/smiapp
+F:	drivers/media/i2c/smiapp/
 F:	include/media/smiapp.h
 F:	drivers/media/i2c/smiapp-pll.c
 F:	drivers/media/i2c/smiapp-pll.h
@@ -7745,6 +7750,11 @@ W:	http://tifmxx.berlios.de/
 S:	Maintained
 F:	drivers/memstick/host/tifm_ms.c
 
+SONY MEMORYSTICK STANDARD SUPPORT
+M:	Maxim Levitsky <maximlevitsky@gmail.com>
+S:	Maintained
+F:	drivers/memstick/core/ms_block.*
+
 SOUND
 M:	Jaroslav Kysela <perex@perex.cz>
 M:	Takashi Iwai <tiwai@suse.de>
@@ -7821,35 +7831,7 @@ L:	spear-devel@list.st.com
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:	http://www.st.com/spear
 S:	Maintained
-F:	arch/arm/plat-spear/
-
-SPEAR13XX MACHINE SUPPORT
-M:	Viresh Kumar <viresh.linux@gmail.com>
-M:	Shiraz Hashim <shiraz.hashim@st.com>
-L:	spear-devel@list.st.com
-L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:	http://www.st.com/spear
-S:	Maintained
-F:	arch/arm/mach-spear13xx/
-
-SPEAR3XX MACHINE SUPPORT
-M:	Viresh Kumar <viresh.linux@gmail.com>
-M:	Shiraz Hashim <shiraz.hashim@st.com>
-L:	spear-devel@list.st.com
-L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:	http://www.st.com/spear
-S:	Maintained
-F:	arch/arm/mach-spear3xx/
-
-SPEAR6XX MACHINE SUPPORT
-M:	Rajeev Kumar <rajeev-dlh.kumar@st.com>
-M:	Shiraz Hashim <shiraz.hashim@st.com>
-M:	Viresh Kumar <viresh.linux@gmail.com>
-L:	spear-devel@list.st.com
-L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:	http://www.st.com/spear
-S:	Maintained
-F:	arch/arm/mach-spear6xx/
+F:	arch/arm/mach-spear/
 
 SPEAR CLOCK FRAMEWORK SUPPORT
 M:	Viresh Kumar <viresh.linux@gmail.com>
@@ -8118,7 +8100,7 @@ M:	Vineet Gupta <vgupta@synopsys.com>
 S:	Supported
 F:	arch/arc/
 F:	Documentation/devicetree/bindings/arc/
-F:	drivers/tty/serial/arc-uart.c
+F:	drivers/tty/serial/arc_uart.c
 
 SYSV FILESYSTEM
 M:	Christoph Hellwig <hch@infradead.org>
@@ -8808,7 +8790,6 @@ L:	linux-usb@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
 S:	Maintained
 F:	drivers/usb/phy/
-F:	drivers/usb/otg/
 
 USB PRINTER DRIVER (usblp)
 M:	Pete Zaitcev <zaitcev@redhat.com>
@@ -9339,7 +9320,7 @@ M:	Matthew Garrett <matthew.garrett@nebula.com>
 L:	platform-driver-x86@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86.git
 S:	Maintained
-F:	drivers/platform/x86
+F:	drivers/platform/x86/
 
 X86 MCE INFRASTRUCTURE
 M:	Tony Luck <tony.luck@intel.com>

+ 5 - 0
arch/alpha/lib/csum_partial_copy.c

@@ -338,6 +338,11 @@ csum_partial_copy_from_user(const void __user *src, void *dst, int len,
 	unsigned long doff = 7 & (unsigned long) dst;
 
 	if (len) {
+		if (!access_ok(VERIFY_READ, src, len)) {
+			*errp = -EFAULT;
+			memset(dst, 0, len);
+			return sum;
+		}
 		if (!doff) {
 			if (!soff)
 				checksum = csum_partial_cfu_aligned(

+ 5 - 0
arch/arm/mm/hugetlbpage.c

@@ -56,3 +56,8 @@ int pmd_huge(pmd_t pmd)
 {
 	return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
 }
+
+int pmd_huge_support(void)
+{
+	return 1;
+}

+ 5 - 0
arch/arm64/mm/hugetlbpage.c

@@ -54,6 +54,11 @@ int pud_huge(pud_t pud)
 	return !(pud_val(pud) & PUD_TABLE_BIT);
 }
 
+int pmd_huge_support(void)
+{
+	return 1;
+}
+
 static __init int setup_hugepagesz(char *opt)
 {
 	unsigned long ps = memparse(opt, &opt);

+ 5 - 0
arch/ia64/mm/hugetlbpage.c

@@ -114,6 +114,11 @@ int pud_huge(pud_t pud)
 	return 0;
 }
 
+int pmd_huge_support(void)
+{
+	return 0;
+}
+
 struct page *
 follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
 {

+ 5 - 0
arch/metag/mm/hugetlbpage.c

@@ -110,6 +110,11 @@ int pud_huge(pud_t pud)
 	return 0;
 }
 
+int pmd_huge_support(void)
+{
+	return 1;
+}
+
 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
 			     pmd_t *pmd, int write)
 {

+ 5 - 0
arch/mips/mm/hugetlbpage.c

@@ -85,6 +85,11 @@ int pud_huge(pud_t pud)
 	return (pud_val(pud) & _PAGE_HUGE) != 0;
 }
 
+int pmd_huge_support(void)
+{
+	return 1;
+}
+
 struct page *
 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
 		pmd_t *pmd, int write)

+ 10 - 0
arch/powerpc/mm/hugetlbpage.c

@@ -86,6 +86,11 @@ int pgd_huge(pgd_t pgd)
 	 */
 	return ((pgd_val(pgd) & 0x3) != 0x0);
 }
+
+int pmd_huge_support(void)
+{
+	return 1;
+}
 #else
 int pmd_huge(pmd_t pmd)
 {
@@ -101,6 +106,11 @@ int pgd_huge(pgd_t pgd)
 {
 	return 0;
 }
+
+int pmd_huge_support(void)
+{
+	return 0;
+}
 #endif
 
 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)

+ 2 - 1
arch/s390/Kconfig

@@ -526,6 +526,7 @@ config CRASH_DUMP
 	bool "kernel crash dumps"
 	depends on 64BIT && SMP
 	select KEXEC
+	select ZFCPDUMP
 	help
 	  Generate crash dump after being started by kexec.
 	  Crash dump kernels are loaded in the main kernel with kexec-tools
@@ -536,7 +537,7 @@ config CRASH_DUMP
 config ZFCPDUMP
 	def_bool n
 	prompt "zfcpdump support"
-	select SMP
+	depends on SMP
 	help
 	  Select this option if you want to build an zfcpdump enabled kernel.
 	  Refer to <file:Documentation/s390/zfcpdump.txt> for more details on this.

+ 3 - 1
arch/s390/include/asm/kprobes.h

@@ -31,6 +31,8 @@
 #include <linux/ptrace.h>
 #include <linux/percpu.h>
 
+#define __ARCH_WANT_KPROBES_INSN_SLOT
+
 struct pt_regs;
 struct kprobe;
 
@@ -57,7 +59,7 @@ typedef u16 kprobe_opcode_t;
 /* Architecture specific copy of original instruction */
 struct arch_specific_insn {
 	/* copy of original instruction */
-	kprobe_opcode_t insn[MAX_INSN_SIZE];
+	kprobe_opcode_t *insn;
 };
 
 struct prev_kprobe {

+ 1 - 0
arch/s390/include/asm/sclp.h

@@ -56,5 +56,6 @@ bool sclp_has_linemode(void);
 bool sclp_has_vt220(void);
 int sclp_pci_configure(u32 fid);
 int sclp_pci_deconfigure(u32 fid);
+int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode);
 
 #endif /* _ASM_S390_SCLP_H */

+ 179 - 40
arch/s390/kernel/crash_dump.c

@@ -16,6 +16,7 @@
 #include <asm/os_info.h>
 #include <asm/elf.h>
 #include <asm/ipl.h>
+#include <asm/sclp.h>
 
 #define PTR_ADD(x, y) (((char *) (x)) + ((unsigned long) (y)))
 #define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
@@ -64,22 +65,46 @@ static ssize_t copy_page_real(void *buf, void *src, size_t csize)
 }
 
 /*
- * Copy one page from "oldmem"
+ * Pointer to ELF header in new kernel
+ */
+static void *elfcorehdr_newmem;
+
+/*
+ * Copy one page from zfcpdump "oldmem"
+ *
+ * For pages below ZFCPDUMP_HSA_SIZE memory from the HSA is copied. Otherwise
+ * real memory copy is used.
+ */
+static ssize_t copy_oldmem_page_zfcpdump(char *buf, size_t csize,
+					 unsigned long src, int userbuf)
+{
+	int rc;
+
+	if (src < ZFCPDUMP_HSA_SIZE) {
+		rc = memcpy_hsa(buf, src, csize, userbuf);
+	} else {
+		if (userbuf)
+			rc = copy_to_user_real((void __force __user *) buf,
+					       (void *) src, csize);
+		else
+			rc = memcpy_real(buf, (void *) src, csize);
+	}
+	return rc ? rc : csize;
+}
+
+/*
+ * Copy one page from kdump "oldmem"
  *
  * For the kdump reserved memory this functions performs a swap operation:
  *  - [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE] is mapped to [0 - OLDMEM_SIZE].
  *  - [0 - OLDMEM_SIZE] is mapped to [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE]
  */
-ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
-			 size_t csize, unsigned long offset, int userbuf)
+static ssize_t copy_oldmem_page_kdump(char *buf, size_t csize,
+				      unsigned long src, int userbuf)
+
 {
-	unsigned long src;
 	int rc;
 
-	if (!csize)
-		return 0;
-
-	src = (pfn << PAGE_SHIFT) + offset;
 	if (src < OLDMEM_SIZE)
 		src += OLDMEM_BASE;
 	else if (src > OLDMEM_BASE &&
@@ -90,7 +115,88 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
 				       (void *) src, csize);
 	else
 		rc = copy_page_real(buf, (void *) src, csize);
-	return (rc == 0) ? csize : rc;
+	return (rc == 0) ? rc : csize;
+}
+
+/*
+ * Copy one page from "oldmem"
+ */
+ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
+			 unsigned long offset, int userbuf)
+{
+	unsigned long src;
+
+	if (!csize)
+		return 0;
+	src = (pfn << PAGE_SHIFT) + offset;
+	if (OLDMEM_BASE)
+		return copy_oldmem_page_kdump(buf, csize, src, userbuf);
+	else
+		return copy_oldmem_page_zfcpdump(buf, csize, src, userbuf);
+}
+
+/*
+ * Remap "oldmem" for kdump
+ *
+ * For the kdump reserved memory this functions performs a swap operation:
+ * [0 - OLDMEM_SIZE] is mapped to [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE]
+ */
+static int remap_oldmem_pfn_range_kdump(struct vm_area_struct *vma,
+					unsigned long from, unsigned long pfn,
+					unsigned long size, pgprot_t prot)
+{
+	unsigned long size_old;
+	int rc;
+
+	if (pfn < OLDMEM_SIZE >> PAGE_SHIFT) {
+		size_old = min(size, OLDMEM_SIZE - (pfn << PAGE_SHIFT));
+		rc = remap_pfn_range(vma, from,
+				     pfn + (OLDMEM_BASE >> PAGE_SHIFT),
+				     size_old, prot);
+		if (rc || size == size_old)
+			return rc;
+		size -= size_old;
+		from += size_old;
+		pfn += size_old >> PAGE_SHIFT;
+	}
+	return remap_pfn_range(vma, from, pfn, size, prot);
+}
+
+/*
+ * Remap "oldmem" for zfcpdump
+ *
+ * We only map available memory above ZFCPDUMP_HSA_SIZE. Memory below
+ * ZFCPDUMP_HSA_SIZE is read on demand using the copy_oldmem_page() function.
+ */
+static int remap_oldmem_pfn_range_zfcpdump(struct vm_area_struct *vma,
+					   unsigned long from,
+					   unsigned long pfn,
+					   unsigned long size, pgprot_t prot)
+{
+	unsigned long size_hsa;
+
+	if (pfn < ZFCPDUMP_HSA_SIZE >> PAGE_SHIFT) {
+		size_hsa = min(size, ZFCPDUMP_HSA_SIZE - (pfn << PAGE_SHIFT));
+		if (size == size_hsa)
+			return 0;
+		size -= size_hsa;
+		from += size_hsa;
+		pfn += size_hsa >> PAGE_SHIFT;
+	}
+	return remap_pfn_range(vma, from, pfn, size, prot);
+}
+
+/*
+ * Remap "oldmem" for kdump or zfcpdump
+ */
+int remap_oldmem_pfn_range(struct vm_area_struct *vma, unsigned long from,
+			   unsigned long pfn, unsigned long size, pgprot_t prot)
+{
+	if (OLDMEM_BASE)
+		return remap_oldmem_pfn_range_kdump(vma, from, pfn, size, prot);
+	else
+		return remap_oldmem_pfn_range_zfcpdump(vma, from, pfn, size,
+						       prot);
 }
 
 /*
@@ -101,11 +207,21 @@ int copy_from_oldmem(void *dest, void *src, size_t count)
 	unsigned long copied = 0;
 	int rc;
 
-	if ((unsigned long) src < OLDMEM_SIZE) {
-		copied = min(count, OLDMEM_SIZE - (unsigned long) src);
-		rc = memcpy_real(dest, src + OLDMEM_BASE, copied);
-		if (rc)
-			return rc;
+	if (OLDMEM_BASE) {
+		if ((unsigned long) src < OLDMEM_SIZE) {
+			copied = min(count, OLDMEM_SIZE - (unsigned long) src);
+			rc = memcpy_real(dest, src + OLDMEM_BASE, copied);
+			if (rc)
+				return rc;
+		}
+	} else {
+		if ((unsigned long) src < ZFCPDUMP_HSA_SIZE) {
+			copied = min(count,
+				     ZFCPDUMP_HSA_SIZE - (unsigned long) src);
+			rc = memcpy_hsa(dest, (unsigned long) src, copied, 0);
+			if (rc)
+				return rc;
+		}
 	}
 	return memcpy_real(dest + copied, src + copied, count - copied);
 }
@@ -367,14 +483,6 @@ static int get_mem_chunk_cnt(void)
 	return cnt;
 }
 
-/*
- * Relocate pointer in order to allow vmcore code access the data
- */
-static inline unsigned long relocate(unsigned long addr)
-{
-	return OLDMEM_BASE + addr;
-}
-
 /*
  * Initialize ELF loads (new kernel)
  */
@@ -426,7 +534,7 @@ static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
 	ptr = nt_vmcoreinfo(ptr);
 	memset(phdr, 0, sizeof(*phdr));
 	phdr->p_type = PT_NOTE;
-	phdr->p_offset = relocate(notes_offset);
+	phdr->p_offset = notes_offset;
 	phdr->p_filesz = (unsigned long) PTR_SUB(ptr, ptr_start);
 	phdr->p_memsz = phdr->p_filesz;
 	return ptr;
@@ -435,7 +543,7 @@ static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
 /*
  * Create ELF core header (new kernel)
  */
-static void s390_elf_corehdr_create(char **elfcorebuf, size_t *elfcorebuf_sz)
+int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
 {
 	Elf64_Phdr *phdr_notes, *phdr_loads;
 	int mem_chunk_cnt;
@@ -443,6 +551,12 @@ static void s390_elf_corehdr_create(char **elfcorebuf, size_t *elfcorebuf_sz)
 	u32 alloc_size;
 	u64 hdr_off;
 
+	/* If we are not in kdump or zfcpdump mode return */
+	if (!OLDMEM_BASE && ipl_info.type != IPL_TYPE_FCP_DUMP)
+		return 0;
+	/* If elfcorehdr= has been passed via cmdline, we use that one */
+	if (elfcorehdr_addr != ELFCORE_ADDR_MAX)
+		return 0;
 	mem_chunk_cnt = get_mem_chunk_cnt();
 
 	alloc_size = 0x1000 + get_cpu_cnt() * 0x300 +
@@ -460,27 +574,52 @@ static void s390_elf_corehdr_create(char **elfcorebuf, size_t *elfcorebuf_sz)
 	ptr = notes_init(phdr_notes, ptr, ((unsigned long) hdr) + hdr_off);
 	/* Init loads */
 	hdr_off = PTR_DIFF(ptr, hdr);
-	loads_init(phdr_loads, ((unsigned long) hdr) + hdr_off);
-	*elfcorebuf_sz = hdr_off;
-	*elfcorebuf = (void *) relocate((unsigned long) hdr);
-	BUG_ON(*elfcorebuf_sz > alloc_size);
+	loads_init(phdr_loads, hdr_off);
+	*addr = (unsigned long long) hdr;
+	elfcorehdr_newmem = hdr;
+	*size = (unsigned long long) hdr_off;
+	BUG_ON(elfcorehdr_size > alloc_size);
+	return 0;
 }
 
 /*
- * Create kdump ELF core header in new kernel, if it has not been passed via
- * the "elfcorehdr" kernel parameter
+ * Free ELF core header (new kernel)
  */
-static int setup_kdump_elfcorehdr(void)
+void elfcorehdr_free(unsigned long long addr)
 {
-	size_t elfcorebuf_sz;
-	char *elfcorebuf;
+	if (!elfcorehdr_newmem)
+		return;
+	kfree((void *)(unsigned long)addr);
+}
 
-	if (!OLDMEM_BASE || is_kdump_kernel())
-		return -EINVAL;
-	s390_elf_corehdr_create(&elfcorebuf, &elfcorebuf_sz);
-	elfcorehdr_addr = (unsigned long long) elfcorebuf;
-	elfcorehdr_size = elfcorebuf_sz;
-	return 0;
+/*
+ * Read from ELF header
+ */
+ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
+{
+	void *src = (void *)(unsigned long)*ppos;
+
+	src = elfcorehdr_newmem ? src : src - OLDMEM_BASE;
+	memcpy(buf, src, count);
+	*ppos += count;
+	return count;
 }
 
-subsys_initcall(setup_kdump_elfcorehdr);
+/*
+ * Read from ELF notes data
+ */
+ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
+{
+	void *src = (void *)(unsigned long)*ppos;
+	int rc;
+
+	if (elfcorehdr_newmem) {
+		memcpy(buf, src, count);
+	} else {
+		rc = copy_from_oldmem(buf, src, count);
+		if (rc)
+			return rc;
+	}
+	*ppos += count;
+	return count;
+}

+ 137 - 7
arch/s390/kernel/kprobes.c

@@ -37,6 +37,26 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
 
 struct kretprobe_blackpoint kretprobe_blacklist[] = { };
 
+DEFINE_INSN_CACHE_OPS(dmainsn);
+
+static void *alloc_dmainsn_page(void)
+{
+	return (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
+}
+
+static void free_dmainsn_page(void *page)
+{
+	free_page((unsigned long)page);
+}
+
+struct kprobe_insn_cache kprobe_dmainsn_slots = {
+	.mutex = __MUTEX_INITIALIZER(kprobe_dmainsn_slots.mutex),
+	.alloc = alloc_dmainsn_page,
+	.free = free_dmainsn_page,
+	.pages = LIST_HEAD_INIT(kprobe_dmainsn_slots.pages),
+	.insn_size = MAX_INSN_SIZE,
+};
+
 static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn)
 {
 	switch (insn[0] >> 8) {
@@ -100,9 +120,8 @@ static int __kprobes get_fixup_type(kprobe_opcode_t *insn)
 			fixup |= FIXUP_RETURN_REGISTER;
 		break;
 	case 0xc0:
-		if ((insn[0] & 0x0f) == 0x00 ||	/* larl  */
-		    (insn[0] & 0x0f) == 0x05)	/* brasl */
-		fixup |= FIXUP_RETURN_REGISTER;
+		if ((insn[0] & 0x0f) == 0x05)	/* brasl */
+			fixup |= FIXUP_RETURN_REGISTER;
 		break;
 	case 0xeb:
 		switch (insn[2] & 0xff) {
@@ -134,18 +153,128 @@ static int __kprobes get_fixup_type(kprobe_opcode_t *insn)
 	return fixup;
 }
 
+static int __kprobes is_insn_relative_long(kprobe_opcode_t *insn)
+{
+	/* Check if we have a RIL-b or RIL-c format instruction which
+	 * we need to modify in order to avoid instruction emulation. */
+	switch (insn[0] >> 8) {
+	case 0xc0:
+		if ((insn[0] & 0x0f) == 0x00) /* larl */
+			return true;
+		break;
+	case 0xc4:
+		switch (insn[0] & 0x0f) {
+		case 0x02: /* llhrl  */
+		case 0x04: /* lghrl  */
+		case 0x05: /* lhrl   */
+		case 0x06: /* llghrl */
+		case 0x07: /* sthrl  */
+		case 0x08: /* lgrl   */
+		case 0x0b: /* stgrl  */
+		case 0x0c: /* lgfrl  */
+		case 0x0d: /* lrl    */
+		case 0x0e: /* llgfrl */
+		case 0x0f: /* strl   */
+			return true;
+		}
+		break;
+	case 0xc6:
+		switch (insn[0] & 0x0f) {
+		case 0x00: /* exrl   */
+		case 0x02: /* pfdrl  */
+		case 0x04: /* cghrl  */
+		case 0x05: /* chrl   */
+		case 0x06: /* clghrl */
+		case 0x07: /* clhrl  */
+		case 0x08: /* cgrl   */
+		case 0x0a: /* clgrl  */
+		case 0x0c: /* cgfrl  */
+		case 0x0d: /* crl    */
+		case 0x0e: /* clgfrl */
+		case 0x0f: /* clrl   */
+			return true;
+		}
+		break;
+	}
+	return false;
+}
+
+static void __kprobes copy_instruction(struct kprobe *p)
+{
+	s64 disp, new_disp;
+	u64 addr, new_addr;
+
+	memcpy(p->ainsn.insn, p->addr, ((p->opcode >> 14) + 3) & -2);
+	if (!is_insn_relative_long(p->ainsn.insn))
+		return;
+	/*
+	 * For pc-relative instructions in RIL-b or RIL-c format patch the
+	 * RI2 displacement field. We have already made sure that the insn
+	 * slot for the patched instruction is within the same 2GB area
+	 * as the original instruction (either kernel image or module area).
+	 * Therefore the new displacement will always fit.
+	 */
+	disp = *(s32 *)&p->ainsn.insn[1];
+	addr = (u64)(unsigned long)p->addr;
+	new_addr = (u64)(unsigned long)p->ainsn.insn;
+	new_disp = ((addr + (disp * 2)) - new_addr) / 2;
+	*(s32 *)&p->ainsn.insn[1] = new_disp;
+}
+
+static inline int is_kernel_addr(void *addr)
+{
+	return addr < (void *)_end;
+}
+
+static inline int is_module_addr(void *addr)
+{
+#ifdef CONFIG_64BIT
+	BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
+	if (addr < (void *)MODULES_VADDR)
+		return 0;
+	if (addr > (void *)MODULES_END)
+		return 0;
+#endif
+	return 1;
+}
+
+static int __kprobes s390_get_insn_slot(struct kprobe *p)
+{
+	/*
+	 * Get an insn slot that is within the same 2GB area like the original
+	 * instruction. That way instructions with a 32bit signed displacement
+	 * field can be patched and executed within the insn slot.
+	 */
+	p->ainsn.insn = NULL;
+	if (is_kernel_addr(p->addr))
+		p->ainsn.insn = get_dmainsn_slot();
+	if (is_module_addr(p->addr))
+		p->ainsn.insn = get_insn_slot();
+	return p->ainsn.insn ? 0 : -ENOMEM;
+}
+
+static void __kprobes s390_free_insn_slot(struct kprobe *p)
+{
+	if (!p->ainsn.insn)
+		return;
+	if (is_kernel_addr(p->addr))
+		free_dmainsn_slot(p->ainsn.insn, 0);
+	else
+		free_insn_slot(p->ainsn.insn, 0);
+	p->ainsn.insn = NULL;
+}
+
 int __kprobes arch_prepare_kprobe(struct kprobe *p)
 {
 	if ((unsigned long) p->addr & 0x01)
 		return -EINVAL;
-
 	/* Make sure the probe isn't going on a difficult instruction */
 	if (is_prohibited_opcode(p->addr))
 		return -EINVAL;
-
+	if (s390_get_insn_slot(p))
+		return -ENOMEM;
 	p->opcode = *p->addr;
-	memcpy(p->ainsn.insn, p->addr, ((p->opcode >> 14) + 3) & -2);
-
+	copy_instruction(p);
 	return 0;
 }
 
@@ -186,6 +315,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
 
 void __kprobes arch_remove_kprobe(struct kprobe *p)
 {
+	s390_free_insn_slot(p);
 }
 
 static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb,

+ 5 - 0
arch/s390/mm/hugetlbpage.c

@@ -223,6 +223,11 @@ int pud_huge(pud_t pud)
 	return 0;
 }
 
+int pmd_huge_support(void)
+{
+	return 1;
+}
+
 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
 			     pmd_t *pmdp, int write)
 {

+ 5 - 0
arch/sh/mm/hugetlbpage.c

@@ -83,6 +83,11 @@ int pud_huge(pud_t pud)
 	return 0;
 }
 
+int pmd_huge_support(void)
+{
+	return 0;
+}
+
 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
 			     pmd_t *pmd, int write)
 {

+ 6 - 6
arch/sparc/kernel/sys_sparc32.c

@@ -169,10 +169,10 @@ COMPAT_SYSCALL_DEFINE5(rt_sigaction, int, sig,
 		new_ka.ka_restorer = restorer;
 		ret = get_user(u_handler, &act->sa_handler);
 		new_ka.sa.sa_handler =  compat_ptr(u_handler);
-		ret |= __copy_from_user(&set32, &act->sa_mask, sizeof(compat_sigset_t));
+		ret |= copy_from_user(&set32, &act->sa_mask, sizeof(compat_sigset_t));
 		sigset_from_compat(&new_ka.sa.sa_mask, &set32);
-		ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
-		ret |= __get_user(u_restorer, &act->sa_restorer);
+		ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
+		ret |= get_user(u_restorer, &act->sa_restorer);
 		new_ka.sa.sa_restorer = compat_ptr(u_restorer);
                 if (ret)
                 	return -EFAULT;
@@ -183,9 +183,9 @@ COMPAT_SYSCALL_DEFINE5(rt_sigaction, int, sig,
 	if (!ret && oact) {
 		sigset_to_compat(&set32, &old_ka.sa.sa_mask);
 		ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler);
-		ret |= __copy_to_user(&oact->sa_mask, &set32, sizeof(compat_sigset_t));
-		ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
-		ret |= __put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer);
+		ret |= copy_to_user(&oact->sa_mask, &set32, sizeof(compat_sigset_t));
+		ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+		ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer);
 		if (ret)
 			ret = -EFAULT;
         }

+ 5 - 0
arch/sparc/mm/hugetlbpage.c

@@ -234,6 +234,11 @@ int pud_huge(pud_t pud)
 	return 0;
 }
 
+int pmd_huge_support(void)
+{
+	return 0;
+}
+
 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
 			     pmd_t *pmd, int write)
 {

+ 5 - 0
arch/tile/mm/hugetlbpage.c

@@ -166,6 +166,11 @@ int pud_huge(pud_t pud)
 	return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
 }
 
+int pmd_huge_support(void)
+{
+	return 1;
+}
+
 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
 			     pmd_t *pmd, int write)
 {

+ 19 - 15
arch/x86/include/asm/pgtable.h

@@ -315,21 +315,6 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
 	return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
 }
 
-static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
-{
-	return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
-}
-
-static inline int pte_swp_soft_dirty(pte_t pte)
-{
-	return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
-}
-
-static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
-{
-	return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
-}
-
 static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
 {
 	return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
@@ -446,6 +431,7 @@ pte_t *populate_extra_pte(unsigned long vaddr);
 
 #ifndef __ASSEMBLY__
 #include <linux/mm_types.h>
+#include <linux/mmdebug.h>
 #include <linux/log2.h>
 
 static inline int pte_none(pte_t pte)
@@ -864,6 +850,24 @@ static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
 {
 }
 
+static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
+{
+	VM_BUG_ON(pte_present(pte));
+	return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
+}
+
+static inline int pte_swp_soft_dirty(pte_t pte)
+{
+	VM_BUG_ON(pte_present(pte));
+	return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
+}
+
+static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
+{
+	VM_BUG_ON(pte_present(pte));
+	return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
+}
+
 #include <asm-generic/pgtable.h>
 #endif	/* __ASSEMBLY__ */
 

+ 3 - 0
arch/x86/include/asm/pgtable_types.h

@@ -75,6 +75,9 @@
  * with swap entry format. On x86 bits 6 and 7 are *not* involved
  * into swap entry computation, but bit 6 is used for nonlinear
  * file mapping, so we borrow bit 7 for soft dirty tracking.
+ *
+ * Please note that this bit must be treated as swap dirty page
+ * mark if and only if the PTE has present bit clear!
  */
 #ifdef CONFIG_MEM_SOFT_DIRTY
 #define _PAGE_SWP_SOFT_DIRTY	_PAGE_PSE

+ 31 - 6
arch/x86/include/asm/tlbflush.h

@@ -62,6 +62,7 @@ static inline void __flush_tlb_all(void)
 
 static inline void __flush_tlb_one(unsigned long addr)
 {
+	count_vm_event(NR_TLB_LOCAL_FLUSH_ONE);
 	__flush_tlb_single(addr);
 }
 
@@ -84,14 +85,38 @@ static inline void __flush_tlb_one(unsigned long addr)
 
 #ifndef CONFIG_SMP
 
-#define flush_tlb() __flush_tlb()
-#define flush_tlb_all() __flush_tlb_all()
-#define local_flush_tlb() __flush_tlb()
+/* "_up" is for UniProcessor.
+ *
+ * This is a helper for other header functions.  *Not* intended to be called
+ * directly.  All global TLB flushes need to either call this, or to bump the
+ * vm statistics themselves.
+ */
+static inline void __flush_tlb_up(void)
+{
+	count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
+	__flush_tlb();
+}
+
+static inline void flush_tlb_all(void)
+{
+	count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
+	__flush_tlb_all();
+}
+
+static inline void flush_tlb(void)
+{
+	__flush_tlb_up();
+}
+
+static inline void local_flush_tlb(void)
+{
+	__flush_tlb_up();
+}
 
 static inline void flush_tlb_mm(struct mm_struct *mm)
 {
 	if (mm == current->active_mm)
-		__flush_tlb();
+		__flush_tlb_up();
 }
 
 static inline void flush_tlb_page(struct vm_area_struct *vma,
@@ -105,14 +130,14 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
 				   unsigned long start, unsigned long end)
 {
 	if (vma->vm_mm == current->active_mm)
-		__flush_tlb();
+		__flush_tlb_up();
 }
 
 static inline void flush_tlb_mm_range(struct mm_struct *mm,
 	   unsigned long start, unsigned long end, unsigned long vmflag)
 {
 	if (mm == current->active_mm)
-		__flush_tlb();
+		__flush_tlb_up();
 }
 
 static inline void native_flush_tlb_others(const struct cpumask *cpumask,

+ 2 - 0
arch/x86/kernel/cpu/mtrr/generic.c

@@ -683,6 +683,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
 	}
 
 	/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
+	count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
 	__flush_tlb();
 
 	/* Save MTRR state */
@@ -696,6 +697,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
 static void post_set(void) __releases(set_atomicity_lock)
 {
 	/* Flush TLBs (no need to flush caches - they are disabled) */
+	count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
 	__flush_tlb();
 
 	/* Intel (P6) standard MTRRs */

+ 8 - 0
arch/x86/mm/hugetlbpage.c

@@ -59,6 +59,10 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
 	return NULL;
 }
 
+int pmd_huge_support(void)
+{
+	return 0;
+}
 #else
 
 struct page *
@@ -77,6 +81,10 @@ int pud_huge(pud_t pud)
 	return !!(pud_val(pud) & _PAGE_PSE);
 }
 
+int pmd_huge_support(void)
+{
+	return 1;
+}
 #endif
 
 /* x86_64 also uses this file */

+ 11 - 3
arch/x86/mm/tlb.c

@@ -103,6 +103,7 @@ static void flush_tlb_func(void *info)
 	if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
 		return;
 
+	count_vm_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
 	if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
 		if (f->flush_end == TLB_FLUSH_ALL)
 			local_flush_tlb();
@@ -130,6 +131,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
 	info.flush_start = start;
 	info.flush_end = end;
 
+	count_vm_event(NR_TLB_REMOTE_FLUSH);
 	if (is_uv_system()) {
 		unsigned int cpu;
 
@@ -149,6 +151,7 @@ void flush_tlb_current_task(void)
 
 	preempt_disable();
 
+	count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
 	local_flush_tlb();
 	if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
 		flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
@@ -211,16 +214,19 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
 	act_entries = mm->total_vm > tlb_entries ? tlb_entries : mm->total_vm;
 
 	/* tlb_flushall_shift is on balance point, details in commit log */
-	if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift)
+	if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift) {
+		count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
 		local_flush_tlb();
-	else {
+	} else {
 		if (has_large_page(mm, start, end)) {
 			local_flush_tlb();
 			goto flush_all;
 		}
 		/* flush range by one by one 'invlpg' */
-		for (addr = start; addr < end;	addr += PAGE_SIZE)
+		for (addr = start; addr < end;	addr += PAGE_SIZE) {
+			count_vm_event(NR_TLB_LOCAL_FLUSH_ONE);
 			__flush_tlb_single(addr);
+		}
 
 		if (cpumask_any_but(mm_cpumask(mm),
 				smp_processor_id()) < nr_cpu_ids)
@@ -256,6 +262,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
 
 static void do_flush_tlb_all(void *info)
 {
+	count_vm_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
 	__flush_tlb_all();
 	if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
 		leave_mm(smp_processor_id());
@@ -263,6 +270,7 @@ static void do_flush_tlb_all(void *info)
 
 void flush_tlb_all(void)
 {
+	count_vm_event(NR_TLB_REMOTE_FLUSH);
 	on_each_cpu(do_flush_tlb_all, NULL, 1);
 }
 

+ 6 - 0
block/Kconfig

@@ -99,6 +99,12 @@ config BLK_DEV_THROTTLING
 
 	See Documentation/cgroups/blkio-controller.txt for more information.
 
+config CMDLINE_PARSER
+	bool "Block device command line partition parser"
+	default n
+	---help---
+	Parsing command line, get the partitions information.
+
 menu "Partition Types"
 
 source "block/partitions/Kconfig"

+ 1 - 0
block/Makefile

@@ -18,3 +18,4 @@ obj-$(CONFIG_IOSCHED_CFQ)	+= cfq-iosched.o
 
 obj-$(CONFIG_BLOCK_COMPAT)	+= compat_ioctl.o
 obj-$(CONFIG_BLK_DEV_INTEGRITY)	+= blk-integrity.o
+obj-$(CONFIG_CMDLINE_PARSER)	+= cmdline-parser.o

+ 1 - 1
block/blk-ioc.c

@@ -367,7 +367,7 @@ struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
 	if (!icq)
 		return NULL;
 
-	if (radix_tree_preload(gfp_mask) < 0) {
+	if (radix_tree_maybe_preload(gfp_mask) < 0) {
 		kmem_cache_free(et->icq_cache, icq);
 		return NULL;
 	}

+ 1 - 1
block/blk-sysfs.c

@@ -29,7 +29,7 @@ queue_var_store(unsigned long *var, const char *page, size_t count)
 	int err;
 	unsigned long v;
 
-	err = strict_strtoul(page, 10, &v);
+	err = kstrtoul(page, 10, &v);
 	if (err || v > UINT_MAX)
 		return -EINVAL;
 

+ 250 - 0
block/cmdline-parser.c

@@ -0,0 +1,250 @@
+/*
+ * Parse command line, get partition information
+ *
+ * Written by Cai Zhiyong <caizhiyong@huawei.com>
+ *
+ */
+#include <linux/buffer_head.h>
+#include <linux/module.h>
+#include <linux/cmdline-parser.h>
+
+static int parse_subpart(struct cmdline_subpart **subpart, char *partdef)
+{
+	int ret = 0;
+	struct cmdline_subpart *new_subpart;
+
+	*subpart = NULL;
+
+	new_subpart = kzalloc(sizeof(struct cmdline_subpart), GFP_KERNEL);
+	if (!new_subpart)
+		return -ENOMEM;
+
+	if (*partdef == '-') {
+		new_subpart->size = (sector_t)(~0ULL);
+		partdef++;
+	} else {
+		new_subpart->size = (sector_t)memparse(partdef, &partdef);
+		if (new_subpart->size < (sector_t)PAGE_SIZE) {
+			pr_warn("cmdline partition size is invalid.");
+			ret = -EINVAL;
+			goto fail;
+		}
+	}
+
+	if (*partdef == '@') {
+		partdef++;
+		new_subpart->from = (sector_t)memparse(partdef, &partdef);
+	} else {
+		new_subpart->from = (sector_t)(~0ULL);
+	}
+
+	if (*partdef == '(') {
+		int length;
+		char *next = strchr(++partdef, ')');
+
+		if (!next) {
+			pr_warn("cmdline partition format is invalid.");
+			ret = -EINVAL;
+			goto fail;
+		}
+
+		length = min_t(int, next - partdef,
+			       sizeof(new_subpart->name) - 1);
+		strncpy(new_subpart->name, partdef, length);
+		new_subpart->name[length] = '\0';
+
+		partdef = ++next;
+	} else
+		new_subpart->name[0] = '\0';
+
+	new_subpart->flags = 0;
+
+	if (!strncmp(partdef, "ro", 2)) {
+		new_subpart->flags |= PF_RDONLY;
+		partdef += 2;
+	}
+
+	if (!strncmp(partdef, "lk", 2)) {
+		new_subpart->flags |= PF_POWERUP_LOCK;
+		partdef += 2;
+	}
+
+	*subpart = new_subpart;
+	return 0;
+fail:
+	kfree(new_subpart);
+	return ret;
+}
+
+static void free_subpart(struct cmdline_parts *parts)
+{
+	struct cmdline_subpart *subpart;
+
+	while (parts->subpart) {
+		subpart = parts->subpart;
+		parts->subpart = subpart->next_subpart;
+		kfree(subpart);
+	}
+}
+
+static int parse_parts(struct cmdline_parts **parts, const char *bdevdef)
+{
+	int ret = -EINVAL;
+	char *next;
+	int length;
+	struct cmdline_subpart **next_subpart;
+	struct cmdline_parts *newparts;
+	char buf[BDEVNAME_SIZE + 32 + 4];
+
+	*parts = NULL;
+
+	newparts = kzalloc(sizeof(struct cmdline_parts), GFP_KERNEL);
+	if (!newparts)
+		return -ENOMEM;
+
+	next = strchr(bdevdef, ':');
+	if (!next) {
+		pr_warn("cmdline partition has no block device.");
+		goto fail;
+	}
+
+	length = min_t(int, next - bdevdef, sizeof(newparts->name) - 1);
+	strncpy(newparts->name, bdevdef, length);
+	newparts->name[length] = '\0';
+	newparts->nr_subparts = 0;
+
+	next_subpart = &newparts->subpart;
+
+	while (next && *(++next)) {
+		bdevdef = next;
+		next = strchr(bdevdef, ',');
+
+		length = (!next) ? (sizeof(buf) - 1) :
+			min_t(int, next - bdevdef, sizeof(buf) - 1);
+
+		strncpy(buf, bdevdef, length);
+		buf[length] = '\0';
+
+		ret = parse_subpart(next_subpart, buf);
+		if (ret)
+			goto fail;
+
+		newparts->nr_subparts++;
+		next_subpart = &(*next_subpart)->next_subpart;
+	}
+
+	if (!newparts->subpart) {
+		pr_warn("cmdline partition has no valid partition.");
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	*parts = newparts;
+
+	return 0;
+fail:
+	free_subpart(newparts);
+	kfree(newparts);
+	return ret;
+}
+
+void cmdline_parts_free(struct cmdline_parts **parts)
+{
+	struct cmdline_parts *next_parts;
+
+	while (*parts) {
+		next_parts = (*parts)->next_parts;
+		free_subpart(*parts);
+		kfree(*parts);
+		*parts = next_parts;
+	}
+}
+
+int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline)
+{
+	int ret;
+	char *buf;
+	char *pbuf;
+	char *next;
+	struct cmdline_parts **next_parts;
+
+	*parts = NULL;
+
+	next = pbuf = buf = kstrdup(cmdline, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	next_parts = parts;
+
+	while (next && *pbuf) {
+		next = strchr(pbuf, ';');
+		if (next)
+			*next = '\0';
+
+		ret = parse_parts(next_parts, pbuf);
+		if (ret)
+			goto fail;
+
+		if (next)
+			pbuf = ++next;
+
+		next_parts = &(*next_parts)->next_parts;
+	}
+
+	if (!*parts) {
+		pr_warn("cmdline partition has no valid partition.");
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	ret = 0;
+done:
+	kfree(buf);
+	return ret;
+
+fail:
+	cmdline_parts_free(parts);
+	goto done;
+}
+
+struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
+					 const char *bdev)
+{
+	while (parts && strncmp(bdev, parts->name, sizeof(parts->name)))
+		parts = parts->next_parts;
+	return parts;
+}
+
+/*
+ *  add_part()
+ *    0 success.
+ *    1 can not add so many partitions.
+ */
+void cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
+		       int slot,
+		       int (*add_part)(int, struct cmdline_subpart *, void *),
+		       void *param)
+
+{
+	sector_t from = 0;
+	struct cmdline_subpart *subpart;
+
+	for (subpart = parts->subpart; subpart;
+	     subpart = subpart->next_subpart, slot++) {
+		if (subpart->from == (sector_t)(~0ULL))
+			subpart->from = from;
+		else
+			from = subpart->from;
+
+		if (from >= disk_size)
+			break;
+
+		if (subpart->size > (disk_size - from))
+			subpart->size = disk_size - from;
+
+		from += subpart->size;
+
+		if (add_part(slot, subpart, param))
+			break;
+	}
+}

+ 1 - 1
block/compat_ioctl.c

@@ -70,7 +70,7 @@ static int compat_hdio_getgeo(struct gendisk *disk, struct block_device *bdev,
 		return ret;
 
 	ret = copy_to_user(ugeo, &geo, 4);
-	ret |= __put_user(geo.start, &ugeo->start);
+	ret |= put_user(geo.start, &ugeo->start);
 	if (ret)
 		ret = -EFAULT;
 

+ 7 - 0
block/partitions/Kconfig

@@ -260,3 +260,10 @@ config SYSV68_PARTITION
 	  partition table format used by Motorola Delta machines (using
 	  sysv68).
 	  Otherwise, say N.
+
+config CMDLINE_PARTITION
+	bool "Command line partition support" if PARTITION_ADVANCED
+	select CMDLINE_PARSER
+	help
+	  Say Y here if you would read the partitions table from bootargs.
+	  The format for the command line is just like mtdparts.

+ 1 - 0
block/partitions/Makefile

@@ -8,6 +8,7 @@ obj-$(CONFIG_ACORN_PARTITION) += acorn.o
 obj-$(CONFIG_AMIGA_PARTITION) += amiga.o
 obj-$(CONFIG_ATARI_PARTITION) += atari.o
 obj-$(CONFIG_AIX_PARTITION) += aix.o
+obj-$(CONFIG_CMDLINE_PARTITION) += cmdline.o
 obj-$(CONFIG_MAC_PARTITION) += mac.o
 obj-$(CONFIG_LDM_PARTITION) += ldm.o
 obj-$(CONFIG_MSDOS_PARTITION) += msdos.o

+ 4 - 0
block/partitions/check.c

@@ -34,6 +34,7 @@
 #include "efi.h"
 #include "karma.h"
 #include "sysv68.h"
+#include "cmdline.h"
 
 int warn_no_part = 1; /*This is ugly: should make genhd removable media aware*/
 
@@ -65,6 +66,9 @@ static int (*check_part[])(struct parsed_partitions *) = {
 	adfspart_check_ADFS,
 #endif
 
+#ifdef CONFIG_CMDLINE_PARTITION
+	cmdline_partition,
+#endif
 #ifdef CONFIG_EFI_PARTITION
 	efi_partition,		/* this must come before msdos */
 #endif

+ 99 - 0
block/partitions/cmdline.c

@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2013 HUAWEI
+ * Author: Cai Zhiyong <caizhiyong@huawei.com>
+ *
+ * Read block device partition table from command line.
+ * The partition used for fixed block device (eMMC) embedded device.
+ * It is no MBR, save storage space. Bootloader can be easily accessed
+ * by absolute address of data on the block device.
+ * Users can easily change the partition.
+ *
+ * The format for the command line is just like mtdparts.
+ *
+ * Verbose config please reference "Documentation/block/cmdline-partition.txt"
+ *
+ */
+
+#include <linux/cmdline-parser.h>
+
+#include "check.h"
+#include "cmdline.h"
+
+static char *cmdline;
+static struct cmdline_parts *bdev_parts;
+
+static int add_part(int slot, struct cmdline_subpart *subpart, void *param)
+{
+	int label_min;
+	struct partition_meta_info *info;
+	char tmp[sizeof(info->volname) + 4];
+	struct parsed_partitions *state = (struct parsed_partitions *)param;
+
+	if (slot >= state->limit)
+		return 1;
+
+	put_partition(state, slot, subpart->from >> 9,
+		      subpart->size >> 9);
+
+	info = &state->parts[slot].info;
+
+	label_min = min_t(int, sizeof(info->volname) - 1,
+			  sizeof(subpart->name));
+	strncpy(info->volname, subpart->name, label_min);
+	info->volname[label_min] = '\0';
+
+	snprintf(tmp, sizeof(tmp), "(%s)", info->volname);
+	strlcat(state->pp_buf, tmp, PAGE_SIZE);
+
+	state->parts[slot].has_info = true;
+
+	return 0;
+}
+
+static int __init cmdline_parts_setup(char *s)
+{
+	cmdline = s;
+	return 1;
+}
+__setup("blkdevparts=", cmdline_parts_setup);
+
+/*
+ * Purpose: allocate cmdline partitions.
+ * Returns:
+ * -1 if unable to read the partition table
+ *  0 if this isn't our partition table
+ *  1 if successful
+ */
+int cmdline_partition(struct parsed_partitions *state)
+{
+	sector_t disk_size;
+	char bdev[BDEVNAME_SIZE];
+	struct cmdline_parts *parts;
+
+	if (cmdline) {
+		if (bdev_parts)
+			cmdline_parts_free(&bdev_parts);
+
+		if (cmdline_parts_parse(&bdev_parts, cmdline)) {
+			cmdline = NULL;
+			return -1;
+		}
+		cmdline = NULL;
+	}
+
+	if (!bdev_parts)
+		return 0;
+
+	bdevname(state->bdev, bdev);
+	parts = cmdline_parts_find(bdev_parts, bdev);
+	if (!parts)
+		return 0;
+
+	disk_size = get_capacity(state->bdev->bd_disk) << 9;
+
+	cmdline_parts_set(parts, disk_size, 1, add_part, (void *)state);
+
+	strlcat(state->pp_buf, "\n", PAGE_SIZE);
+
+	return 1;
+}

+ 2 - 0
block/partitions/cmdline.h

@@ -0,0 +1,2 @@
+
+int cmdline_partition(struct parsed_partitions *state);

+ 109 - 62
block/partitions/efi.c

@@ -25,6 +25,9 @@
  * TODO:
  *
  * Changelog:
+ * Mon August 5th, 2013 Davidlohr Bueso <davidlohr@hp.com>
+ * - detect hybrid MBRs, tighter pMBR checking & cleanups.
+ *
  * Mon Nov 09 2004 Matt Domsch <Matt_Domsch@dell.com>
  * - test for valid PMBR and valid PGPT before ever reading
  *   AGPT, allow override with 'gpt' kernel command line option.
@@ -149,34 +152,80 @@ static u64 last_lba(struct block_device *bdev)
 		       bdev_logical_block_size(bdev)) - 1ULL;
 }
 
-static inline int
-pmbr_part_valid(struct partition *part)
+static inline int pmbr_part_valid(gpt_mbr_record *part)
 {
-        if (part->sys_ind == EFI_PMBR_OSTYPE_EFI_GPT &&
-            le32_to_cpu(part->start_sect) == 1UL)
-                return 1;
-        return 0;
+	if (part->os_type != EFI_PMBR_OSTYPE_EFI_GPT)
+		goto invalid;
+
+	/* set to 0x00000001 (i.e., the LBA of the GPT Partition Header) */
+	if (le32_to_cpu(part->starting_lba) != GPT_PRIMARY_PARTITION_TABLE_LBA)
+		goto invalid;
+
+	return GPT_MBR_PROTECTIVE;
+invalid:
+	return 0;
 }
 
 /**
  * is_pmbr_valid(): test Protective MBR for validity
  * @mbr: pointer to a legacy mbr structure
+ * @total_sectors: amount of sectors in the device
  *
- * Description: Returns 1 if PMBR is valid, 0 otherwise.
- * Validity depends on two things:
+ * Description: Checks for a valid protective or hybrid
+ * master boot record (MBR). The validity of a pMBR depends
+ * on all of the following properties:
  *  1) MSDOS signature is in the last two bytes of the MBR
  *  2) One partition of type 0xEE is found
+ *
+ * In addition, a hybrid MBR will have up to three additional
+ * primary partitions, which point to the same space that's
+ * marked out by up to three GPT partitions.
+ *
+ * Returns 0 upon invalid MBR, or GPT_MBR_PROTECTIVE or
+ * GPT_MBR_HYBRID depending on the device layout.
  */
-static int
-is_pmbr_valid(legacy_mbr *mbr)
+static int is_pmbr_valid(legacy_mbr *mbr, sector_t total_sectors)
 {
-	int i;
+	int i, part = 0, ret = 0; /* invalid by default */
+
 	if (!mbr || le16_to_cpu(mbr->signature) != MSDOS_MBR_SIGNATURE)
-                return 0;
+		goto done;
+
+	for (i = 0; i < 4; i++) {
+		ret = pmbr_part_valid(&mbr->partition_record[i]);
+		if (ret == GPT_MBR_PROTECTIVE) {
+			part = i;
+			/*
+			 * Ok, we at least know that there's a protective MBR,
+			 * now check if there are other partition types for
+			 * hybrid MBR.
+			 */
+			goto check_hybrid;
+		}
+	}
+
+	if (ret != GPT_MBR_PROTECTIVE)
+		goto done;
+check_hybrid:
 	for (i = 0; i < 4; i++)
-		if (pmbr_part_valid(&mbr->partition_record[i]))
-                        return 1;
-	return 0;
+		if ((mbr->partition_record[i].os_type !=
+			EFI_PMBR_OSTYPE_EFI_GPT) &&
+		    (mbr->partition_record[i].os_type != 0x00))
+			ret = GPT_MBR_HYBRID;
+
+	/*
+	 * Protective MBRs take up the lesser of the whole disk
+	 * or 2 TiB (32bit LBA), ignoring the rest of the disk.
+	 *
+	 * Hybrid MBRs do not necessarily comply with this.
+	 */
+	if (ret == GPT_MBR_PROTECTIVE) {
+		if (le32_to_cpu(mbr->partition_record[part].size_in_lba) !=
+		    min((uint32_t) total_sectors - 1, 0xFFFFFFFF))
+			ret = 0;
+	}
+done:
+	return ret;
 }
 
 /**
@@ -243,8 +292,7 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
 		return NULL;
 
 	if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
-                     (u8 *) pte,
-		     count) < count) {
+			(u8 *) pte, count) < count) {
 		kfree(pte);
                 pte=NULL;
 		return NULL;
@@ -364,7 +412,12 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
 			 (unsigned long long)lastlba);
 		goto fail;
 	}
-
+	if (le64_to_cpu((*gpt)->last_usable_lba) < le64_to_cpu((*gpt)->first_usable_lba)) {
+		pr_debug("GPT: last_usable_lba incorrect: %lld > %lld\n",
+			 (unsigned long long)le64_to_cpu((*gpt)->last_usable_lba),
+			 (unsigned long long)le64_to_cpu((*gpt)->first_usable_lba));
+		goto fail;
+	}
 	/* Check that sizeof_partition_entry has the correct value */
 	if (le32_to_cpu((*gpt)->sizeof_partition_entry) != sizeof(gpt_entry)) {
 		pr_debug("GUID Partitition Entry Size check failed.\n");
@@ -429,44 +482,42 @@ compare_gpts(gpt_header *pgpt, gpt_header *agpt, u64 lastlba)
 	if (!pgpt || !agpt)
 		return;
 	if (le64_to_cpu(pgpt->my_lba) != le64_to_cpu(agpt->alternate_lba)) {
-		printk(KERN_WARNING
-		       "GPT:Primary header LBA != Alt. header alternate_lba\n");
-		printk(KERN_WARNING "GPT:%lld != %lld\n",
+		pr_warn("GPT:Primary header LBA != Alt. header alternate_lba\n");
+		pr_warn("GPT:%lld != %lld\n",
 		       (unsigned long long)le64_to_cpu(pgpt->my_lba),
                        (unsigned long long)le64_to_cpu(agpt->alternate_lba));
 		error_found++;
 	}
 	if (le64_to_cpu(pgpt->alternate_lba) != le64_to_cpu(agpt->my_lba)) {
-		printk(KERN_WARNING
-		       "GPT:Primary header alternate_lba != Alt. header my_lba\n");
-		printk(KERN_WARNING "GPT:%lld != %lld\n",
+		pr_warn("GPT:Primary header alternate_lba != Alt. header my_lba\n");
+		pr_warn("GPT:%lld != %lld\n",
 		       (unsigned long long)le64_to_cpu(pgpt->alternate_lba),
                        (unsigned long long)le64_to_cpu(agpt->my_lba));
 		error_found++;
 	}
 	if (le64_to_cpu(pgpt->first_usable_lba) !=
             le64_to_cpu(agpt->first_usable_lba)) {
-		printk(KERN_WARNING "GPT:first_usable_lbas don't match.\n");
-		printk(KERN_WARNING "GPT:%lld != %lld\n",
+		pr_warn("GPT:first_usable_lbas don't match.\n");
+		pr_warn("GPT:%lld != %lld\n",
 		       (unsigned long long)le64_to_cpu(pgpt->first_usable_lba),
                        (unsigned long long)le64_to_cpu(agpt->first_usable_lba));
 		error_found++;
 	}
 	if (le64_to_cpu(pgpt->last_usable_lba) !=
             le64_to_cpu(agpt->last_usable_lba)) {
-		printk(KERN_WARNING "GPT:last_usable_lbas don't match.\n");
-		printk(KERN_WARNING "GPT:%lld != %lld\n",
+		pr_warn("GPT:last_usable_lbas don't match.\n");
+		pr_warn("GPT:%lld != %lld\n",
 		       (unsigned long long)le64_to_cpu(pgpt->last_usable_lba),
                        (unsigned long long)le64_to_cpu(agpt->last_usable_lba));
 		error_found++;
 	}
 	if (efi_guidcmp(pgpt->disk_guid, agpt->disk_guid)) {
-		printk(KERN_WARNING "GPT:disk_guids don't match.\n");
+		pr_warn("GPT:disk_guids don't match.\n");
 		error_found++;
 	}
 	if (le32_to_cpu(pgpt->num_partition_entries) !=
             le32_to_cpu(agpt->num_partition_entries)) {
-		printk(KERN_WARNING "GPT:num_partition_entries don't match: "
+		pr_warn("GPT:num_partition_entries don't match: "
 		       "0x%x != 0x%x\n",
 		       le32_to_cpu(pgpt->num_partition_entries),
 		       le32_to_cpu(agpt->num_partition_entries));
@@ -474,8 +525,7 @@ compare_gpts(gpt_header *pgpt, gpt_header *agpt, u64 lastlba)
 	}
 	if (le32_to_cpu(pgpt->sizeof_partition_entry) !=
             le32_to_cpu(agpt->sizeof_partition_entry)) {
-		printk(KERN_WARNING
-		       "GPT:sizeof_partition_entry values don't match: "
+		pr_warn("GPT:sizeof_partition_entry values don't match: "
 		       "0x%x != 0x%x\n",
                        le32_to_cpu(pgpt->sizeof_partition_entry),
 		       le32_to_cpu(agpt->sizeof_partition_entry));
@@ -483,34 +533,30 @@ compare_gpts(gpt_header *pgpt, gpt_header *agpt, u64 lastlba)
 	}
 	if (le32_to_cpu(pgpt->partition_entry_array_crc32) !=
             le32_to_cpu(agpt->partition_entry_array_crc32)) {
-		printk(KERN_WARNING
-		       "GPT:partition_entry_array_crc32 values don't match: "
+		pr_warn("GPT:partition_entry_array_crc32 values don't match: "
 		       "0x%x != 0x%x\n",
                        le32_to_cpu(pgpt->partition_entry_array_crc32),
 		       le32_to_cpu(agpt->partition_entry_array_crc32));
 		error_found++;
 	}
 	if (le64_to_cpu(pgpt->alternate_lba) != lastlba) {
-		printk(KERN_WARNING
-		       "GPT:Primary header thinks Alt. header is not at the end of the disk.\n");
-		printk(KERN_WARNING "GPT:%lld != %lld\n",
+		pr_warn("GPT:Primary header thinks Alt. header is not at the end of the disk.\n");
+		pr_warn("GPT:%lld != %lld\n",
 			(unsigned long long)le64_to_cpu(pgpt->alternate_lba),
 			(unsigned long long)lastlba);
 		error_found++;
 	}
 
 	if (le64_to_cpu(agpt->my_lba) != lastlba) {
-		printk(KERN_WARNING
-		       "GPT:Alternate GPT header not at the end of the disk.\n");
-		printk(KERN_WARNING "GPT:%lld != %lld\n",
+		pr_warn("GPT:Alternate GPT header not at the end of the disk.\n");
+		pr_warn("GPT:%lld != %lld\n",
 			(unsigned long long)le64_to_cpu(agpt->my_lba),
 			(unsigned long long)lastlba);
 		error_found++;
 	}
 
 	if (error_found)
-		printk(KERN_WARNING
-		       "GPT: Use GNU Parted to correct GPT errors.\n");
+		pr_warn("GPT: Use GNU Parted to correct GPT errors.\n");
 	return;
 }
 
@@ -536,6 +582,7 @@ static int find_valid_gpt(struct parsed_partitions *state, gpt_header **gpt,
 	gpt_header *pgpt = NULL, *agpt = NULL;
 	gpt_entry *pptes = NULL, *aptes = NULL;
 	legacy_mbr *legacymbr;
+	sector_t total_sectors = i_size_read(state->bdev->bd_inode) >> 9;
 	u64 lastlba;
 
 	if (!ptes)
@@ -543,17 +590,22 @@ static int find_valid_gpt(struct parsed_partitions *state, gpt_header **gpt,
 
 	lastlba = last_lba(state->bdev);
         if (!force_gpt) {
-                /* This will be added to the EFI Spec. per Intel after v1.02. */
-                legacymbr = kzalloc(sizeof (*legacymbr), GFP_KERNEL);
-                if (legacymbr) {
-                        read_lba(state, 0, (u8 *) legacymbr,
-				 sizeof (*legacymbr));
-                        good_pmbr = is_pmbr_valid(legacymbr);
-                        kfree(legacymbr);
-                }
-                if (!good_pmbr)
-                        goto fail;
-        }
+		/* This will be added to the EFI Spec. per Intel after v1.02. */
+		legacymbr = kzalloc(sizeof(*legacymbr), GFP_KERNEL);
+		if (!legacymbr)
+			goto fail;
+
+		read_lba(state, 0, (u8 *)legacymbr, sizeof(*legacymbr));
+		good_pmbr = is_pmbr_valid(legacymbr, total_sectors);
+		kfree(legacymbr);
+
+		if (!good_pmbr)
+			goto fail;
+
+		pr_debug("Device has a %s MBR\n",
+			 good_pmbr == GPT_MBR_PROTECTIVE ?
+						"protective" : "hybrid");
+	}
 
 	good_pgpt = is_gpt_valid(state, GPT_PRIMARY_PARTITION_TABLE_LBA,
 				 &pgpt, &pptes);
@@ -576,11 +628,8 @@ static int find_valid_gpt(struct parsed_partitions *state, gpt_header **gpt,
                 *ptes = pptes;
                 kfree(agpt);
                 kfree(aptes);
-                if (!good_agpt) {
-                        printk(KERN_WARNING 
-			       "Alternate GPT is invalid, "
-                               "using primary GPT.\n");
-                }
+		if (!good_agpt)
+                        pr_warn("Alternate GPT is invalid, using primary GPT.\n");
                 return 1;
         }
         else if (good_agpt) {
@@ -588,8 +637,7 @@ static int find_valid_gpt(struct parsed_partitions *state, gpt_header **gpt,
                 *ptes = aptes;
                 kfree(pgpt);
                 kfree(pptes);
-                printk(KERN_WARNING 
-                       "Primary GPT is invalid, using alternate GPT.\n");
+		pr_warn("Primary GPT is invalid, using alternate GPT.\n");
                 return 1;
         }
 
@@ -651,8 +699,7 @@ int efi_partition(struct parsed_partitions *state)
 		put_partition(state, i+1, start * ssz, size * ssz);
 
 		/* If this is a RAID volume, tell md */
-		if (!efi_guidcmp(ptes[i].partition_type_guid,
-				 PARTITION_LINUX_RAID_GUID))
+		if (!efi_guidcmp(ptes[i].partition_type_guid, PARTITION_LINUX_RAID_GUID))
 			state->parts[i + 1].flags = ADDPART_FLAG_RAID;
 
 		info = &state->parts[i + 1].info;

+ 18 - 20
block/partitions/efi.h

@@ -37,6 +37,9 @@
 #define EFI_PMBR_OSTYPE_EFI 0xEF
 #define EFI_PMBR_OSTYPE_EFI_GPT 0xEE
 
+#define GPT_MBR_PROTECTIVE  1
+#define GPT_MBR_HYBRID      2
+
 #define GPT_HEADER_SIGNATURE 0x5452415020494645ULL
 #define GPT_HEADER_REVISION_V1 0x00010000
 #define GPT_PRIMARY_PARTITION_TABLE_LBA 1
@@ -101,11 +104,25 @@ typedef struct _gpt_entry {
 	efi_char16_t partition_name[72 / sizeof (efi_char16_t)];
 } __attribute__ ((packed)) gpt_entry;
 
+typedef struct _gpt_mbr_record {
+	u8	boot_indicator; /* unused by EFI, set to 0x80 for bootable */
+	u8	start_head;     /* unused by EFI, pt start in CHS */
+	u8	start_sector;   /* unused by EFI, pt start in CHS */
+	u8	start_track;
+	u8	os_type;        /* EFI and legacy non-EFI OS types */
+	u8	end_head;       /* unused by EFI, pt end in CHS */
+	u8	end_sector;     /* unused by EFI, pt end in CHS */
+	u8	end_track;      /* unused by EFI, pt end in CHS */
+	__le32	starting_lba;   /* used by EFI - start addr of the on disk pt */
+	__le32	size_in_lba;    /* used by EFI - size of pt in LBA */
+} __packed gpt_mbr_record;
+
+
 typedef struct _legacy_mbr {
 	u8 boot_code[440];
 	__le32 unique_mbr_signature;
 	__le16 unknown;
-	struct partition partition_record[4];
+	gpt_mbr_record partition_record[4];
 	__le16 signature;
 } __attribute__ ((packed)) legacy_mbr;
 
@@ -113,22 +130,3 @@ typedef struct _legacy_mbr {
 extern int efi_partition(struct parsed_partitions *state);
 
 #endif
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only.  This must remain at the end
- * of the file.
- * --------------------------------------------------------------------------
- * Local variables:
- * c-indent-level: 4 
- * c-brace-imaginary-offset: 0
- * c-brace-offset: -4
- * c-argdecl-indent: 4
- * c-label-offset: -4
- * c-continued-statement-offset: 4
- * c-continued-brace-offset: 0
- * indent-tabs-mode: nil
- * tab-width: 8
- * End:
- */

+ 3 - 1
drivers/block/aoe/aoe.h

@@ -1,5 +1,5 @@
 /* Copyright (c) 2013 Coraid, Inc.  See COPYING for GPL terms. */
-#define VERSION "83"
+#define VERSION "85"
 #define AOE_MAJOR 152
 #define DEVICE_NAME "aoe"
 
@@ -169,6 +169,7 @@ struct aoedev {
 	ulong ref;
 	struct work_struct work;/* disk create work struct */
 	struct gendisk *gd;
+	struct dentry *debugfs;
 	struct request_queue *blkq;
 	struct hd_geometry geo;
 	sector_t ssize;
@@ -206,6 +207,7 @@ struct ktstate {
 int aoeblk_init(void);
 void aoeblk_exit(void);
 void aoeblk_gdalloc(void *);
+void aoedisk_rm_debugfs(struct aoedev *d);
 void aoedisk_rm_sysfs(struct aoedev *d);
 
 int aoechr_init(void);

+ 98 - 2
drivers/block/aoe/aoeblk.c

@@ -1,4 +1,4 @@
-/* Copyright (c) 2012 Coraid, Inc.  See COPYING for GPL terms. */
+/* Copyright (c) 2013 Coraid, Inc.  See COPYING for GPL terms. */
 /*
  * aoeblk.c
  * block device routines
@@ -17,11 +17,13 @@
 #include <linux/mutex.h>
 #include <linux/export.h>
 #include <linux/moduleparam.h>
+#include <linux/debugfs.h>
 #include <scsi/sg.h>
 #include "aoe.h"
 
 static DEFINE_MUTEX(aoeblk_mutex);
 static struct kmem_cache *buf_pool_cache;
+static struct dentry *aoe_debugfs_dir;
 
 /* GPFS needs a larger value than the default. */
 static int aoe_maxsectors;
@@ -108,6 +110,55 @@ static ssize_t aoedisk_show_payload(struct device *dev,
 	return snprintf(page, PAGE_SIZE, "%lu\n", d->maxbcnt);
 }
 
+static int aoedisk_debugfs_show(struct seq_file *s, void *ignored)
+{
+	struct aoedev *d;
+	struct aoetgt **t, **te;
+	struct aoeif *ifp, *ife;
+	unsigned long flags;
+	char c;
+
+	d = s->private;
+	seq_printf(s, "rttavg: %d rttdev: %d\n",
+		d->rttavg >> RTTSCALE,
+		d->rttdev >> RTTDSCALE);
+	seq_printf(s, "nskbpool: %d\n", skb_queue_len(&d->skbpool));
+	seq_printf(s, "kicked: %ld\n", d->kicked);
+	seq_printf(s, "maxbcnt: %ld\n", d->maxbcnt);
+	seq_printf(s, "ref: %ld\n", d->ref);
+
+	spin_lock_irqsave(&d->lock, flags);
+	t = d->targets;
+	te = t + d->ntargets;
+	for (; t < te && *t; t++) {
+		c = '\t';
+		seq_printf(s, "falloc: %ld\n", (*t)->falloc);
+		seq_printf(s, "ffree: %p\n",
+			list_empty(&(*t)->ffree) ? NULL : (*t)->ffree.next);
+		seq_printf(s, "%pm:%d:%d:%d\n", (*t)->addr, (*t)->nout,
+			(*t)->maxout, (*t)->nframes);
+		seq_printf(s, "\tssthresh:%d\n", (*t)->ssthresh);
+		seq_printf(s, "\ttaint:%d\n", (*t)->taint);
+		seq_printf(s, "\tr:%d\n", (*t)->rpkts);
+		seq_printf(s, "\tw:%d\n", (*t)->wpkts);
+		ifp = (*t)->ifs;
+		ife = ifp + ARRAY_SIZE((*t)->ifs);
+		for (; ifp->nd && ifp < ife; ifp++) {
+			seq_printf(s, "%c%s", c, ifp->nd->name);
+			c = ',';
+		}
+		seq_puts(s, "\n");
+	}
+	spin_unlock_irqrestore(&d->lock, flags);
+
+	return 0;
+}
+
+static int aoe_debugfs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, aoedisk_debugfs_show, inode->i_private);
+}
+
 static DEVICE_ATTR(state, S_IRUGO, aoedisk_show_state, NULL);
 static DEVICE_ATTR(mac, S_IRUGO, aoedisk_show_mac, NULL);
 static DEVICE_ATTR(netif, S_IRUGO, aoedisk_show_netif, NULL);
@@ -130,6 +181,44 @@ static const struct attribute_group attr_group = {
 	.attrs = aoe_attrs,
 };
 
+static const struct file_operations aoe_debugfs_fops = {
+	.open = aoe_debugfs_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static void
+aoedisk_add_debugfs(struct aoedev *d)
+{
+	struct dentry *entry;
+	char *p;
+
+	if (aoe_debugfs_dir == NULL)
+		return;
+	p = strchr(d->gd->disk_name, '/');
+	if (p == NULL)
+		p = d->gd->disk_name;
+	else
+		p++;
+	BUG_ON(*p == '\0');
+	entry = debugfs_create_file(p, 0444, aoe_debugfs_dir, d,
+				    &aoe_debugfs_fops);
+	if (IS_ERR_OR_NULL(entry)) {
+		pr_info("aoe: cannot create debugfs file for %s\n",
+			d->gd->disk_name);
+		return;
+	}
+	BUG_ON(d->debugfs);
+	d->debugfs = entry;
+}
+void
+aoedisk_rm_debugfs(struct aoedev *d)
+{
+	debugfs_remove(d->debugfs);
+	d->debugfs = NULL;
+}
+
 static int
 aoedisk_add_sysfs(struct aoedev *d)
 {
@@ -330,6 +419,7 @@ aoeblk_gdalloc(void *vp)
 
 	add_disk(gd);
 	aoedisk_add_sysfs(d);
+	aoedisk_add_debugfs(d);
 
 	spin_lock_irqsave(&d->lock, flags);
 	WARN_ON(!(d->flags & DEVFL_GD_NOW));
@@ -351,6 +441,8 @@ err:
 void
 aoeblk_exit(void)
 {
+	debugfs_remove_recursive(aoe_debugfs_dir);
+	aoe_debugfs_dir = NULL;
 	kmem_cache_destroy(buf_pool_cache);
 }
 
@@ -362,7 +454,11 @@ aoeblk_init(void)
 					   0, 0, NULL);
 	if (buf_pool_cache == NULL)
 		return -ENOMEM;
-
+	aoe_debugfs_dir = debugfs_create_dir("aoe", NULL);
+	if (IS_ERR_OR_NULL(aoe_debugfs_dir)) {
+		pr_info("aoe: cannot create debugfs directory\n");
+		aoe_debugfs_dir = NULL;
+	}
 	return 0;
 }
 

+ 0 - 4
drivers/block/aoe/aoecmd.c

@@ -380,7 +380,6 @@ aoecmd_ata_rw(struct aoedev *d)
 {
 	struct frame *f;
 	struct buf *buf;
-	struct aoetgt *t;
 	struct sk_buff *skb;
 	struct sk_buff_head queue;
 	ulong bcnt, fbcnt;
@@ -391,7 +390,6 @@ aoecmd_ata_rw(struct aoedev *d)
 	f = newframe(d);
 	if (f == NULL)
 		return 0;
-	t = *d->tgt;
 	bcnt = d->maxbcnt;
 	if (bcnt == 0)
 		bcnt = DEFAULTBCNT;
@@ -485,7 +483,6 @@ resend(struct aoedev *d, struct frame *f)
 	struct sk_buff *skb;
 	struct sk_buff_head queue;
 	struct aoe_hdr *h;
-	struct aoe_atahdr *ah;
 	struct aoetgt *t;
 	char buf[128];
 	u32 n;
@@ -500,7 +497,6 @@ resend(struct aoedev *d, struct frame *f)
 		return;
 	}
 	h = (struct aoe_hdr *) skb_mac_header(skb);
-	ah = (struct aoe_atahdr *) (h+1);
 
 	if (!(f->flags & FFL_PROBE)) {
 		snprintf(buf, sizeof(buf),

+ 4 - 6
drivers/block/aoe/aoedev.c

@@ -12,6 +12,7 @@
 #include <linux/bitmap.h>
 #include <linux/kdev_t.h>
 #include <linux/moduleparam.h>
+#include <linux/string.h>
 #include "aoe.h"
 
 static void dummy_timer(ulong);
@@ -241,16 +242,12 @@ aoedev_downdev(struct aoedev *d)
 static int
 user_req(char *s, size_t slen, struct aoedev *d)
 {
-	char *p;
+	const char *p;
 	size_t lim;
 
 	if (!d->gd)
 		return 0;
-	p = strrchr(d->gd->disk_name, '/');
-	if (!p)
-		p = d->gd->disk_name;
-	else
-		p += 1;
+	p = kbasename(d->gd->disk_name);
 	lim = sizeof(d->gd->disk_name);
 	lim -= p - d->gd->disk_name;
 	if (slen < lim)
@@ -278,6 +275,7 @@ freedev(struct aoedev *d)
 
 	del_timer_sync(&d->timer);
 	if (d->gd) {
+		aoedisk_rm_debugfs(d);
 		aoedisk_rm_sysfs(d);
 		del_gendisk(d->gd);
 		put_disk(d->gd);

+ 7 - 0
drivers/block/cciss.c

@@ -4257,6 +4257,13 @@ static void cciss_find_board_params(ctlr_info_t *h)
 	cciss_get_max_perf_mode_cmds(h);
 	h->nr_cmds = h->max_commands - 4 - cciss_tape_cmds;
 	h->maxsgentries = readl(&(h->cfgtable->MaxSGElements));
+	/*
+	 * The P600 may exhibit poor performnace under some workloads
+	 * if we use the value in the configuration table. Limit this
+	 * controller to MAXSGENTRIES (32) instead.
+	 */
+	if (h->board_id == 0x3225103C)
+		h->maxsgentries = MAXSGENTRIES;
 	/*
 	 * Limit in-command s/g elements to 32 save dma'able memory.
 	 * Howvever spec says if 0, use 31

+ 1 - 1
drivers/block/mg_disk.c

@@ -636,7 +636,7 @@ ok_to_write:
 		mg_request(host->breq);
 }
 
-void mg_times_out(unsigned long data)
+static void mg_times_out(unsigned long data)
 {
 	struct mg_host *host = (struct mg_host *)data;
 	char *name;

+ 1 - 1
drivers/block/osdblk.c

@@ -598,7 +598,7 @@ static ssize_t class_osdblk_remove(struct class *c,
 	unsigned long ul;
 	struct list_head *tmp;
 
-	rc = strict_strtoul(buf, 10, &ul);
+	rc = kstrtoul(buf, 10, &ul);
 	if (rc)
 		return rc;
 

+ 140 - 138
drivers/block/pktcdvd.c

@@ -44,6 +44,8 @@
  *
  *************************************************************************/
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/pktcdvd.h>
 #include <linux/module.h>
 #include <linux/types.h>
@@ -69,23 +71,24 @@
 
 #define DRIVER_NAME	"pktcdvd"
 
-#if PACKET_DEBUG
-#define DPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
-#else
-#define DPRINTK(fmt, args...)
-#endif
-
-#if PACKET_DEBUG > 1
-#define VPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
-#else
-#define VPRINTK(fmt, args...)
-#endif
+#define pkt_err(pd, fmt, ...)						\
+	pr_err("%s: " fmt, pd->name, ##__VA_ARGS__)
+#define pkt_notice(pd, fmt, ...)					\
+	pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__)
+#define pkt_info(pd, fmt, ...)						\
+	pr_info("%s: " fmt, pd->name, ##__VA_ARGS__)
+
+#define pkt_dbg(level, pd, fmt, ...)					\
+do {									\
+	if (level == 2 && PACKET_DEBUG >= 2)				\
+		pr_notice("%s: %s():" fmt,				\
+			  pd->name, __func__, ##__VA_ARGS__);		\
+	else if (level == 1 && PACKET_DEBUG >= 1)			\
+		pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__);		\
+} while (0)
 
 #define MAX_SPEED 0xffff
 
-#define ZONE(sector, pd) (((sector) + (pd)->offset) & \
-			~(sector_t)((pd)->settings.size - 1))
-
 static DEFINE_MUTEX(pktcdvd_mutex);
 static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
 static struct proc_dir_entry *pkt_proc;
@@ -103,7 +106,10 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev);
 static int pkt_remove_dev(dev_t pkt_dev);
 static int pkt_seq_show(struct seq_file *m, void *p);
 
-
+static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
+{
+	return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
+}
 
 /*
  * create and register a pktcdvd kernel object.
@@ -424,7 +430,7 @@ static int pkt_sysfs_init(void)
 	if (ret) {
 		kfree(class_pktcdvd);
 		class_pktcdvd = NULL;
-		printk(DRIVER_NAME": failed to create class pktcdvd\n");
+		pr_err("failed to create class pktcdvd\n");
 		return ret;
 	}
 	return 0;
@@ -517,7 +523,7 @@ static void pkt_bio_finished(struct pktcdvd_device *pd)
 {
 	BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
 	if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
-		VPRINTK(DRIVER_NAME": queue empty\n");
+		pkt_dbg(2, pd, "queue empty\n");
 		atomic_set(&pd->iosched.attention, 1);
 		wake_up(&pd->wqueue);
 	}
@@ -734,36 +740,33 @@ out:
 	return ret;
 }
 
+static const char *sense_key_string(__u8 index)
+{
+	static const char * const info[] = {
+		"No sense", "Recovered error", "Not ready",
+		"Medium error", "Hardware error", "Illegal request",
+		"Unit attention", "Data protect", "Blank check",
+	};
+
+	return index < ARRAY_SIZE(info) ? info[index] : "INVALID";
+}
+
 /*
  * A generic sense dump / resolve mechanism should be implemented across
  * all ATAPI + SCSI devices.
  */
-static void pkt_dump_sense(struct packet_command *cgc)
+static void pkt_dump_sense(struct pktcdvd_device *pd,
+			   struct packet_command *cgc)
 {
-	static char *info[9] = { "No sense", "Recovered error", "Not ready",
-				 "Medium error", "Hardware error", "Illegal request",
-				 "Unit attention", "Data protect", "Blank check" };
-	int i;
 	struct request_sense *sense = cgc->sense;
 
-	printk(DRIVER_NAME":");
-	for (i = 0; i < CDROM_PACKET_SIZE; i++)
-		printk(" %02x", cgc->cmd[i]);
-	printk(" - ");
-
-	if (sense == NULL) {
-		printk("no sense\n");
-		return;
-	}
-
-	printk("sense %02x.%02x.%02x", sense->sense_key, sense->asc, sense->ascq);
-
-	if (sense->sense_key > 8) {
-		printk(" (INVALID)\n");
-		return;
-	}
-
-	printk(" (%s)\n", info[sense->sense_key]);
+	if (sense)
+		pkt_err(pd, "%*ph - sense %02x.%02x.%02x (%s)\n",
+			CDROM_PACKET_SIZE, cgc->cmd,
+			sense->sense_key, sense->asc, sense->ascq,
+			sense_key_string(sense->sense_key));
+	else
+		pkt_err(pd, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd);
 }
 
 /*
@@ -806,7 +809,7 @@ static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
 	cgc.cmd[5] = write_speed & 0xff;
 
 	if ((ret = pkt_generic_packet(pd, &cgc)))
-		pkt_dump_sense(&cgc);
+		pkt_dump_sense(pd, &cgc);
 
 	return ret;
 }
@@ -872,7 +875,7 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
 				need_write_seek = 0;
 			if (need_write_seek && reads_queued) {
 				if (atomic_read(&pd->cdrw.pending_bios) > 0) {
-					VPRINTK(DRIVER_NAME": write, waiting\n");
+					pkt_dbg(2, pd, "write, waiting\n");
 					break;
 				}
 				pkt_flush_cache(pd);
@@ -881,7 +884,7 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
 		} else {
 			if (!reads_queued && writes_queued) {
 				if (atomic_read(&pd->cdrw.pending_bios) > 0) {
-					VPRINTK(DRIVER_NAME": read, waiting\n");
+					pkt_dbg(2, pd, "read, waiting\n");
 					break;
 				}
 				pd->iosched.writing = 1;
@@ -943,7 +946,7 @@ static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_que
 		set_bit(PACKET_MERGE_SEGS, &pd->flags);
 		return 0;
 	} else {
-		printk(DRIVER_NAME": cdrom max_phys_segments too small\n");
+		pkt_err(pd, "cdrom max_phys_segments too small\n");
 		return -EIO;
 	}
 }
@@ -987,8 +990,9 @@ static void pkt_end_io_read(struct bio *bio, int err)
 	struct pktcdvd_device *pd = pkt->pd;
 	BUG_ON(!pd);
 
-	VPRINTK("pkt_end_io_read: bio=%p sec0=%llx sec=%llx err=%d\n", bio,
-		(unsigned long long)pkt->sector, (unsigned long long)bio->bi_sector, err);
+	pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
+		bio, (unsigned long long)pkt->sector,
+		(unsigned long long)bio->bi_sector, err);
 
 	if (err)
 		atomic_inc(&pkt->io_errors);
@@ -1005,7 +1009,7 @@ static void pkt_end_io_packet_write(struct bio *bio, int err)
 	struct pktcdvd_device *pd = pkt->pd;
 	BUG_ON(!pd);
 
-	VPRINTK("pkt_end_io_packet_write: id=%d, err=%d\n", pkt->id, err);
+	pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, err);
 
 	pd->stats.pkt_ended++;
 
@@ -1047,7 +1051,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
 	spin_unlock(&pkt->lock);
 
 	if (pkt->cache_valid) {
-		VPRINTK("pkt_gather_data: zone %llx cached\n",
+		pkt_dbg(2, pd, "zone %llx cached\n",
 			(unsigned long long)pkt->sector);
 		goto out_account;
 	}
@@ -1070,7 +1074,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
 
 		p = (f * CD_FRAMESIZE) / PAGE_SIZE;
 		offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
-		VPRINTK("pkt_gather_data: Adding frame %d, page:%p offs:%d\n",
+		pkt_dbg(2, pd, "Adding frame %d, page:%p offs:%d\n",
 			f, pkt->pages[p], offset);
 		if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
 			BUG();
@@ -1082,7 +1086,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
 	}
 
 out_account:
-	VPRINTK("pkt_gather_data: need %d frames for zone %llx\n",
+	pkt_dbg(2, pd, "need %d frames for zone %llx\n",
 		frames_read, (unsigned long long)pkt->sector);
 	pd->stats.pkt_started++;
 	pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
@@ -1183,7 +1187,8 @@ static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state
 		"IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
 	};
 	enum packet_data_state old_state = pkt->state;
-	VPRINTK("pkt %2d : s=%6llx %s -> %s\n", pkt->id, (unsigned long long)pkt->sector,
+	pkt_dbg(2, pd, "pkt %2d : s=%6llx %s -> %s\n",
+		pkt->id, (unsigned long long)pkt->sector,
 		state_name[old_state], state_name[state]);
 #endif
 	pkt->state = state;
@@ -1202,12 +1207,10 @@ static int pkt_handle_queue(struct pktcdvd_device *pd)
 	struct rb_node *n;
 	int wakeup;
 
-	VPRINTK("handle_queue\n");
-
 	atomic_set(&pd->scan_queue, 0);
 
 	if (list_empty(&pd->cdrw.pkt_free_list)) {
-		VPRINTK("handle_queue: no pkt\n");
+		pkt_dbg(2, pd, "no pkt\n");
 		return 0;
 	}
 
@@ -1224,7 +1227,7 @@ static int pkt_handle_queue(struct pktcdvd_device *pd)
 	node = first_node;
 	while (node) {
 		bio = node->bio;
-		zone = ZONE(bio->bi_sector, pd);
+		zone = get_zone(bio->bi_sector, pd);
 		list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
 			if (p->sector == zone) {
 				bio = NULL;
@@ -1244,7 +1247,7 @@ try_next_bio:
 	}
 	spin_unlock(&pd->lock);
 	if (!bio) {
-		VPRINTK("handle_queue: no bio\n");
+		pkt_dbg(2, pd, "no bio\n");
 		return 0;
 	}
 
@@ -1260,12 +1263,12 @@ try_next_bio:
 	 * to this packet.
 	 */
 	spin_lock(&pd->lock);
-	VPRINTK("pkt_handle_queue: looking for zone %llx\n", (unsigned long long)zone);
+	pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone);
 	while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
 		bio = node->bio;
-		VPRINTK("pkt_handle_queue: found zone=%llx\n",
-			(unsigned long long)ZONE(bio->bi_sector, pd));
-		if (ZONE(bio->bi_sector, pd) != zone)
+		pkt_dbg(2, pd, "found zone=%llx\n",
+			(unsigned long long)get_zone(bio->bi_sector, pd));
+		if (get_zone(bio->bi_sector, pd) != zone)
 			break;
 		pkt_rbtree_erase(pd, node);
 		spin_lock(&pkt->lock);
@@ -1316,7 +1319,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
 		if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
 			BUG();
 	}
-	VPRINTK(DRIVER_NAME": vcnt=%d\n", pkt->w_bio->bi_vcnt);
+	pkt_dbg(2, pd, "vcnt=%d\n", pkt->w_bio->bi_vcnt);
 
 	/*
 	 * Fill-in bvec with data from orig_bios.
@@ -1327,7 +1330,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
 	pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);
 	spin_unlock(&pkt->lock);
 
-	VPRINTK("pkt_start_write: Writing %d frames for zone %llx\n",
+	pkt_dbg(2, pd, "Writing %d frames for zone %llx\n",
 		pkt->write_size, (unsigned long long)pkt->sector);
 
 	if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) {
@@ -1359,7 +1362,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data
 {
 	int uptodate;
 
-	VPRINTK("run_state_machine: pkt %d\n", pkt->id);
+	pkt_dbg(2, pd, "pkt %d\n", pkt->id);
 
 	for (;;) {
 		switch (pkt->state) {
@@ -1398,7 +1401,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data
 			if (pkt_start_recovery(pkt)) {
 				pkt_start_write(pd, pkt);
 			} else {
-				VPRINTK("No recovery possible\n");
+				pkt_dbg(2, pd, "No recovery possible\n");
 				pkt_set_state(pkt, PACKET_FINISHED_STATE);
 			}
 			break;
@@ -1419,8 +1422,6 @@ static void pkt_handle_packets(struct pktcdvd_device *pd)
 {
 	struct packet_data *pkt, *next;
 
-	VPRINTK("pkt_handle_packets\n");
-
 	/*
 	 * Run state machine for active packets
 	 */
@@ -1502,9 +1503,9 @@ static int kcdrwd(void *foobar)
 			if (PACKET_DEBUG > 1) {
 				int states[PACKET_NUM_STATES];
 				pkt_count_states(pd, states);
-				VPRINTK("kcdrwd: i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
-					states[0], states[1], states[2], states[3],
-					states[4], states[5]);
+				pkt_dbg(2, pd, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
+					states[0], states[1], states[2],
+					states[3], states[4], states[5]);
 			}
 
 			min_sleep_time = MAX_SCHEDULE_TIMEOUT;
@@ -1513,9 +1514,9 @@ static int kcdrwd(void *foobar)
 					min_sleep_time = pkt->sleep_time;
 			}
 
-			VPRINTK("kcdrwd: sleeping\n");
+			pkt_dbg(2, pd, "sleeping\n");
 			residue = schedule_timeout(min_sleep_time);
-			VPRINTK("kcdrwd: wake up\n");
+			pkt_dbg(2, pd, "wake up\n");
 
 			/* make swsusp happy with our thread */
 			try_to_freeze();
@@ -1563,9 +1564,10 @@ work_to_do:
 
 static void pkt_print_settings(struct pktcdvd_device *pd)
 {
-	printk(DRIVER_NAME": %s packets, ", pd->settings.fp ? "Fixed" : "Variable");
-	printk("%u blocks, ", pd->settings.size >> 2);
-	printk("Mode-%c disc\n", pd->settings.block_mode == 8 ? '1' : '2');
+	pkt_info(pd, "%s packets, %u blocks, Mode-%c disc\n",
+		 pd->settings.fp ? "Fixed" : "Variable",
+		 pd->settings.size >> 2,
+		 pd->settings.block_mode == 8 ? '1' : '2');
 }
 
 static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control)
@@ -1699,7 +1701,7 @@ static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
 	init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
 	cgc.sense = &sense;
 	if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
-		pkt_dump_sense(&cgc);
+		pkt_dump_sense(pd, &cgc);
 		return ret;
 	}
 
@@ -1714,7 +1716,7 @@ static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
 	init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
 	cgc.sense = &sense;
 	if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
-		pkt_dump_sense(&cgc);
+		pkt_dump_sense(pd, &cgc);
 		return ret;
 	}
 
@@ -1749,14 +1751,14 @@ static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
 		/*
 		 * paranoia
 		 */
-		printk(DRIVER_NAME": write mode wrong %d\n", wp->data_block_type);
+		pkt_err(pd, "write mode wrong %d\n", wp->data_block_type);
 		return 1;
 	}
 	wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
 
 	cgc.buflen = cgc.cmd[8] = size;
 	if ((ret = pkt_mode_select(pd, &cgc))) {
-		pkt_dump_sense(&cgc);
+		pkt_dump_sense(pd, &cgc);
 		return ret;
 	}
 
@@ -1793,7 +1795,7 @@ static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
 	if (ti->rt == 1 && ti->blank == 0)
 		return 1;
 
-	printk(DRIVER_NAME": bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
+	pkt_err(pd, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
 	return 0;
 }
 
@@ -1811,7 +1813,8 @@ static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
 		case 0x12: /* DVD-RAM */
 			return 1;
 		default:
-			VPRINTK(DRIVER_NAME": Wrong disc profile (%x)\n", pd->mmc3_profile);
+			pkt_dbg(2, pd, "Wrong disc profile (%x)\n",
+				pd->mmc3_profile);
 			return 0;
 	}
 
@@ -1820,22 +1823,22 @@ static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
 	 * but i'm not sure, should we leave this to user apps? probably.
 	 */
 	if (di->disc_type == 0xff) {
-		printk(DRIVER_NAME": Unknown disc. No track?\n");
+		pkt_notice(pd, "unknown disc - no track?\n");
 		return 0;
 	}
 
 	if (di->disc_type != 0x20 && di->disc_type != 0) {
-		printk(DRIVER_NAME": Wrong disc type (%x)\n", di->disc_type);
+		pkt_err(pd, "wrong disc type (%x)\n", di->disc_type);
 		return 0;
 	}
 
 	if (di->erasable == 0) {
-		printk(DRIVER_NAME": Disc not erasable\n");
+		pkt_notice(pd, "disc not erasable\n");
 		return 0;
 	}
 
 	if (di->border_status == PACKET_SESSION_RESERVED) {
-		printk(DRIVER_NAME": Can't write to last track (reserved)\n");
+		pkt_err(pd, "can't write to last track (reserved)\n");
 		return 0;
 	}
 
@@ -1860,7 +1863,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
 	memset(&ti, 0, sizeof(track_information));
 
 	if ((ret = pkt_get_disc_info(pd, &di))) {
-		printk("failed get_disc\n");
+		pkt_err(pd, "failed get_disc\n");
 		return ret;
 	}
 
@@ -1871,12 +1874,12 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
 
 	track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
 	if ((ret = pkt_get_track_info(pd, track, 1, &ti))) {
-		printk(DRIVER_NAME": failed get_track\n");
+		pkt_err(pd, "failed get_track\n");
 		return ret;
 	}
 
 	if (!pkt_writable_track(pd, &ti)) {
-		printk(DRIVER_NAME": can't write to this track\n");
+		pkt_err(pd, "can't write to this track\n");
 		return -EROFS;
 	}
 
@@ -1886,11 +1889,11 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
 	 */
 	pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
 	if (pd->settings.size == 0) {
-		printk(DRIVER_NAME": detected zero packet size!\n");
+		pkt_notice(pd, "detected zero packet size!\n");
 		return -ENXIO;
 	}
 	if (pd->settings.size > PACKET_MAX_SECTORS) {
-		printk(DRIVER_NAME": packet size is too big\n");
+		pkt_err(pd, "packet size is too big\n");
 		return -EROFS;
 	}
 	pd->settings.fp = ti.fp;
@@ -1932,7 +1935,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
 			pd->settings.block_mode = PACKET_BLOCK_MODE2;
 			break;
 		default:
-			printk(DRIVER_NAME": unknown data mode\n");
+			pkt_err(pd, "unknown data mode\n");
 			return -EROFS;
 	}
 	return 0;
@@ -1966,10 +1969,10 @@ static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd,
 	cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
 	ret = pkt_mode_select(pd, &cgc);
 	if (ret) {
-		printk(DRIVER_NAME": write caching control failed\n");
-		pkt_dump_sense(&cgc);
+		pkt_err(pd, "write caching control failed\n");
+		pkt_dump_sense(pd, &cgc);
 	} else if (!ret && set)
-		printk(DRIVER_NAME": enabled write caching on %s\n", pd->name);
+		pkt_notice(pd, "enabled write caching\n");
 	return ret;
 }
 
@@ -2005,7 +2008,7 @@ static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
 			     sizeof(struct mode_page_header);
 		ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
 		if (ret) {
-			pkt_dump_sense(&cgc);
+			pkt_dump_sense(pd, &cgc);
 			return ret;
 		}
 	}
@@ -2064,7 +2067,7 @@ static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
 	cgc.cmd[8] = 2;
 	ret = pkt_generic_packet(pd, &cgc);
 	if (ret) {
-		pkt_dump_sense(&cgc);
+		pkt_dump_sense(pd, &cgc);
 		return ret;
 	}
 	size = ((unsigned int) buf[0]<<8) + buf[1] + 2;
@@ -2079,16 +2082,16 @@ static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
 	cgc.cmd[8] = size;
 	ret = pkt_generic_packet(pd, &cgc);
 	if (ret) {
-		pkt_dump_sense(&cgc);
+		pkt_dump_sense(pd, &cgc);
 		return ret;
 	}
 
 	if (!(buf[6] & 0x40)) {
-		printk(DRIVER_NAME": Disc type is not CD-RW\n");
+		pkt_notice(pd, "disc type is not CD-RW\n");
 		return 1;
 	}
 	if (!(buf[6] & 0x4)) {
-		printk(DRIVER_NAME": A1 values on media are not valid, maybe not CDRW?\n");
+		pkt_notice(pd, "A1 values on media are not valid, maybe not CDRW?\n");
 		return 1;
 	}
 
@@ -2108,14 +2111,14 @@ static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
 			*speed = us_clv_to_speed[sp];
 			break;
 		default:
-			printk(DRIVER_NAME": Unknown disc sub-type %d\n",st);
+			pkt_notice(pd, "unknown disc sub-type %d\n", st);
 			return 1;
 	}
 	if (*speed) {
-		printk(DRIVER_NAME": Max. media speed: %d\n",*speed);
+		pkt_info(pd, "maximum media speed: %d\n", *speed);
 		return 0;
 	} else {
-		printk(DRIVER_NAME": Unknown speed %d for sub-type %d\n",sp,st);
+		pkt_notice(pd, "unknown speed %d for sub-type %d\n", sp, st);
 		return 1;
 	}
 }
@@ -2126,7 +2129,7 @@ static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
 	struct request_sense sense;
 	int ret;
 
-	VPRINTK(DRIVER_NAME": Performing OPC\n");
+	pkt_dbg(2, pd, "Performing OPC\n");
 
 	init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
 	cgc.sense = &sense;
@@ -2134,7 +2137,7 @@ static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
 	cgc.cmd[0] = GPCMD_SEND_OPC;
 	cgc.cmd[1] = 1;
 	if ((ret = pkt_generic_packet(pd, &cgc)))
-		pkt_dump_sense(&cgc);
+		pkt_dump_sense(pd, &cgc);
 	return ret;
 }
 
@@ -2144,12 +2147,12 @@ static int pkt_open_write(struct pktcdvd_device *pd)
 	unsigned int write_speed, media_write_speed, read_speed;
 
 	if ((ret = pkt_probe_settings(pd))) {
-		VPRINTK(DRIVER_NAME": %s failed probe\n", pd->name);
+		pkt_dbg(2, pd, "failed probe\n");
 		return ret;
 	}
 
 	if ((ret = pkt_set_write_settings(pd))) {
-		DPRINTK(DRIVER_NAME": %s failed saving write settings\n", pd->name);
+		pkt_dbg(1, pd, "failed saving write settings\n");
 		return -EIO;
 	}
 
@@ -2161,26 +2164,26 @@ static int pkt_open_write(struct pktcdvd_device *pd)
 		case 0x13: /* DVD-RW */
 		case 0x1a: /* DVD+RW */
 		case 0x12: /* DVD-RAM */
-			DPRINTK(DRIVER_NAME": write speed %ukB/s\n", write_speed);
+			pkt_dbg(1, pd, "write speed %ukB/s\n", write_speed);
 			break;
 		default:
 			if ((ret = pkt_media_speed(pd, &media_write_speed)))
 				media_write_speed = 16;
 			write_speed = min(write_speed, media_write_speed * 177);
-			DPRINTK(DRIVER_NAME": write speed %ux\n", write_speed / 176);
+			pkt_dbg(1, pd, "write speed %ux\n", write_speed / 176);
 			break;
 	}
 	read_speed = write_speed;
 
 	if ((ret = pkt_set_speed(pd, write_speed, read_speed))) {
-		DPRINTK(DRIVER_NAME": %s couldn't set write speed\n", pd->name);
+		pkt_dbg(1, pd, "couldn't set write speed\n");
 		return -EIO;
 	}
 	pd->write_speed = write_speed;
 	pd->read_speed = read_speed;
 
 	if ((ret = pkt_perform_opc(pd))) {
-		DPRINTK(DRIVER_NAME": %s Optimum Power Calibration failed\n", pd->name);
+		pkt_dbg(1, pd, "Optimum Power Calibration failed\n");
 	}
 
 	return 0;
@@ -2205,7 +2208,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
 		goto out;
 
 	if ((ret = pkt_get_last_written(pd, &lba))) {
-		printk(DRIVER_NAME": pkt_get_last_written failed\n");
+		pkt_err(pd, "pkt_get_last_written failed\n");
 		goto out_putdev;
 	}
 
@@ -2235,11 +2238,11 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
 
 	if (write) {
 		if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
-			printk(DRIVER_NAME": not enough memory for buffers\n");
+			pkt_err(pd, "not enough memory for buffers\n");
 			ret = -ENOMEM;
 			goto out_putdev;
 		}
-		printk(DRIVER_NAME": %lukB available on disc\n", lba << 1);
+		pkt_info(pd, "%lukB available on disc\n", lba << 1);
 	}
 
 	return 0;
@@ -2257,7 +2260,7 @@ out:
 static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
 {
 	if (flush && pkt_flush_cache(pd))
-		DPRINTK(DRIVER_NAME": %s not flushing cache\n", pd->name);
+		pkt_dbg(1, pd, "not flushing cache\n");
 
 	pkt_lock_door(pd, 0);
 
@@ -2279,8 +2282,6 @@ static int pkt_open(struct block_device *bdev, fmode_t mode)
 	struct pktcdvd_device *pd = NULL;
 	int ret;
 
-	VPRINTK(DRIVER_NAME": entering open\n");
-
 	mutex_lock(&pktcdvd_mutex);
 	mutex_lock(&ctl_mutex);
 	pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev));
@@ -2315,7 +2316,6 @@ static int pkt_open(struct block_device *bdev, fmode_t mode)
 out_dec:
 	pd->refcnt--;
 out:
-	VPRINTK(DRIVER_NAME": failed open (%d)\n", ret);
 	mutex_unlock(&ctl_mutex);
 	mutex_unlock(&pktcdvd_mutex);
 	return ret;
@@ -2360,7 +2360,8 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
 
 	pd = q->queuedata;
 	if (!pd) {
-		printk(DRIVER_NAME": %s incorrect request queue\n", bdevname(bio->bi_bdev, b));
+		pr_err("%s incorrect request queue\n",
+		       bdevname(bio->bi_bdev, b));
 		goto end_io;
 	}
 
@@ -2382,20 +2383,20 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
 	}
 
 	if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
-		printk(DRIVER_NAME": WRITE for ro device %s (%llu)\n",
-			pd->name, (unsigned long long)bio->bi_sector);
+		pkt_notice(pd, "WRITE for ro device (%llu)\n",
+			   (unsigned long long)bio->bi_sector);
 		goto end_io;
 	}
 
 	if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) {
-		printk(DRIVER_NAME": wrong bio size\n");
+		pkt_err(pd, "wrong bio size\n");
 		goto end_io;
 	}
 
 	blk_queue_bounce(q, &bio);
 
-	zone = ZONE(bio->bi_sector, pd);
-	VPRINTK("pkt_make_request: start = %6llx stop = %6llx\n",
+	zone = get_zone(bio->bi_sector, pd);
+	pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
 		(unsigned long long)bio->bi_sector,
 		(unsigned long long)bio_end_sector(bio));
 
@@ -2405,7 +2406,7 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
 		sector_t last_zone;
 		int first_sectors;
 
-		last_zone = ZONE(bio_end_sector(bio) - 1, pd);
+		last_zone = get_zone(bio_end_sector(bio) - 1, pd);
 		if (last_zone != zone) {
 			BUG_ON(last_zone != zone + pd->settings.size);
 			first_sectors = last_zone - bio->bi_sector;
@@ -2500,7 +2501,7 @@ static int pkt_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
 			  struct bio_vec *bvec)
 {
 	struct pktcdvd_device *pd = q->queuedata;
-	sector_t zone = ZONE(bmd->bi_sector, pd);
+	sector_t zone = get_zone(bmd->bi_sector, pd);
 	int used = ((bmd->bi_sector - zone) << 9) + bmd->bi_size;
 	int remaining = (pd->settings.size << 9) - used;
 	int remaining2;
@@ -2609,7 +2610,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
 	struct block_device *bdev;
 
 	if (pd->pkt_dev == dev) {
-		printk(DRIVER_NAME": Recursive setup not allowed\n");
+		pkt_err(pd, "recursive setup not allowed\n");
 		return -EBUSY;
 	}
 	for (i = 0; i < MAX_WRITERS; i++) {
@@ -2617,11 +2618,12 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
 		if (!pd2)
 			continue;
 		if (pd2->bdev->bd_dev == dev) {
-			printk(DRIVER_NAME": %s already setup\n", bdevname(pd2->bdev, b));
+			pkt_err(pd, "%s already setup\n",
+				bdevname(pd2->bdev, b));
 			return -EBUSY;
 		}
 		if (pd2->pkt_dev == dev) {
-			printk(DRIVER_NAME": Can't chain pktcdvd devices\n");
+			pkt_err(pd, "can't chain pktcdvd devices\n");
 			return -EBUSY;
 		}
 	}
@@ -2644,13 +2646,13 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
 	atomic_set(&pd->cdrw.pending_bios, 0);
 	pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name);
 	if (IS_ERR(pd->cdrw.thread)) {
-		printk(DRIVER_NAME": can't start kernel thread\n");
+		pkt_err(pd, "can't start kernel thread\n");
 		ret = -ENOMEM;
 		goto out_mem;
 	}
 
 	proc_create_data(pd->name, 0, pkt_proc, &pkt_proc_fops, pd);
-	DPRINTK(DRIVER_NAME": writer %s mapped to %s\n", pd->name, bdevname(bdev, b));
+	pkt_dbg(1, pd, "writer mapped to %s\n", bdevname(bdev, b));
 	return 0;
 
 out_mem:
@@ -2665,8 +2667,8 @@ static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
 	struct pktcdvd_device *pd = bdev->bd_disk->private_data;
 	int ret;
 
-	VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd,
-		MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
+	pkt_dbg(2, pd, "cmd %x, dev %d:%d\n",
+		cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
 
 	mutex_lock(&pktcdvd_mutex);
 	switch (cmd) {
@@ -2690,7 +2692,7 @@ static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
 		break;
 
 	default:
-		VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd);
+		pkt_dbg(2, pd, "Unknown ioctl (%x)\n", cmd);
 		ret = -ENOTTY;
 	}
 	mutex_unlock(&pktcdvd_mutex);
@@ -2743,7 +2745,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
 		if (!pkt_devs[idx])
 			break;
 	if (idx == MAX_WRITERS) {
-		printk(DRIVER_NAME": max %d writers supported\n", MAX_WRITERS);
+		pr_err("max %d writers supported\n", MAX_WRITERS);
 		ret = -EBUSY;
 		goto out_mutex;
 	}
@@ -2818,7 +2820,7 @@ out_mem:
 	kfree(pd);
 out_mutex:
 	mutex_unlock(&ctl_mutex);
-	printk(DRIVER_NAME": setup of pktcdvd device failed\n");
+	pr_err("setup of pktcdvd device failed\n");
 	return ret;
 }
 
@@ -2839,7 +2841,7 @@ static int pkt_remove_dev(dev_t pkt_dev)
 			break;
 	}
 	if (idx == MAX_WRITERS) {
-		DPRINTK(DRIVER_NAME": dev not setup\n");
+		pr_debug("dev not setup\n");
 		ret = -ENXIO;
 		goto out;
 	}
@@ -2859,7 +2861,7 @@ static int pkt_remove_dev(dev_t pkt_dev)
 	blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY);
 
 	remove_proc_entry(pd->name, pkt_proc);
-	DPRINTK(DRIVER_NAME": writer %s unmapped\n", pd->name);
+	pkt_dbg(1, pd, "writer unmapped\n");
 
 	del_gendisk(pd->disk);
 	blk_cleanup_queue(pd->disk->queue);
@@ -2969,7 +2971,7 @@ static int __init pkt_init(void)
 
 	ret = register_blkdev(pktdev_major, DRIVER_NAME);
 	if (ret < 0) {
-		printk(DRIVER_NAME": Unable to register block device\n");
+		pr_err("unable to register block device\n");
 		goto out2;
 	}
 	if (!pktdev_major)
@@ -2983,7 +2985,7 @@ static int __init pkt_init(void)
 
 	ret = misc_register(&pkt_misc);
 	if (ret) {
-		printk(DRIVER_NAME": Unable to register misc device\n");
+		pr_err("unable to register misc device\n");
 		goto out_misc;
 	}
 

+ 1 - 1
drivers/block/rbd.c

@@ -5132,7 +5132,7 @@ static ssize_t rbd_remove(struct bus_type *bus,
 	bool already = false;
 	int ret;
 
-	ret = strict_strtoul(buf, 10, &ul);
+	ret = kstrtoul(buf, 10, &ul);
 	if (ret)
 		return ret;
 

+ 0 - 2
drivers/block/swim.c

@@ -924,7 +924,6 @@ static int swim_probe(struct platform_device *dev)
 	return 0;
 
 out_kfree:
-	platform_set_drvdata(dev, NULL);
 	kfree(swd);
 out_iounmap:
 	iounmap(swim_base);
@@ -962,7 +961,6 @@ static int swim_remove(struct platform_device *dev)
 	if (res)
 		release_mem_region(res->start, resource_size(res));
 
-	platform_set_drvdata(dev, NULL);
 	kfree(swd);
 
 	return 0;

+ 1 - 1
drivers/block/xen-blkback/xenbus.c

@@ -620,7 +620,7 @@ static void backend_changed(struct xenbus_watch *watch,
 	}
 
 	/* Front end dir is a number, which is used as the handle. */
-	err = strict_strtoul(strrchr(dev->otherend, '/') + 1, 0, &handle);
+	err = kstrtoul(strrchr(dev->otherend, '/') + 1, 0, &handle);
 	if (err)
 		return;
 

+ 24 - 36
drivers/char/tpm/tpm_tis.c

@@ -766,6 +766,25 @@ static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
 }
 #endif
 
+#ifdef CONFIG_PM_SLEEP
+static int tpm_tis_resume(struct device *dev)
+{
+	struct tpm_chip *chip = dev_get_drvdata(dev);
+	int ret;
+
+	if (chip->vendor.irq)
+		tpm_tis_reenable_interrupts(chip);
+
+	ret = tpm_pm_resume(dev);
+	if (!ret)
+		tpm_do_selftest(chip);
+
+	return ret;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
+
 #ifdef CONFIG_PNP
 static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
 				      const struct pnp_device_id *pnp_id)
@@ -787,26 +806,6 @@ static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
 	return tpm_tis_init(&pnp_dev->dev, start, len, irq);
 }
 
-static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg)
-{
-	return tpm_pm_suspend(&dev->dev);
-}
-
-static int tpm_tis_pnp_resume(struct pnp_dev *dev)
-{
-	struct tpm_chip *chip = pnp_get_drvdata(dev);
-	int ret;
-
-	if (chip->vendor.irq)
-		tpm_tis_reenable_interrupts(chip);
-
-	ret = tpm_pm_resume(&dev->dev);
-	if (!ret)
-		tpm_do_selftest(chip);
-
-	return ret;
-}
-
 static struct pnp_device_id tpm_pnp_tbl[] = {
 	{"PNP0C31", 0},		/* TPM */
 	{"ATM1200", 0},		/* Atmel */
@@ -835,9 +834,12 @@ static struct pnp_driver tis_pnp_driver = {
 	.name = "tpm_tis",
 	.id_table = tpm_pnp_tbl,
 	.probe = tpm_tis_pnp_init,
-	.suspend = tpm_tis_pnp_suspend,
-	.resume = tpm_tis_pnp_resume,
 	.remove = tpm_tis_pnp_remove,
+#ifdef CONFIG_PM_SLEEP
+	.driver	= {
+		.pm = &tpm_tis_pm,
+	},
+#endif
 };
 
 #define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2
@@ -846,20 +848,6 @@ module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
 MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
 #endif
 
-#ifdef CONFIG_PM_SLEEP
-static int tpm_tis_resume(struct device *dev)
-{
-	struct tpm_chip *chip = dev_get_drvdata(dev);
-
-	if (chip->vendor.irq)
-		tpm_tis_reenable_interrupts(chip);
-
-	return tpm_pm_resume(dev);
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
-
 static struct platform_driver tis_drv = {
 	.driver = {
 		.name = "tpm_tis",

+ 29 - 44
drivers/firmware/dmi_scan.c

@@ -14,7 +14,7 @@
  * of and an antecedent to, SMBIOS, which stands for System
  * Management BIOS.  See further: http://www.dmtf.org/standards
  */
-static char dmi_empty_string[] = "        ";
+static const char dmi_empty_string[] = "        ";
 
 static u16 __initdata dmi_ver;
 /*
@@ -49,7 +49,7 @@ static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s)
 	return "";
 }
 
-static char * __init dmi_string(const struct dmi_header *dm, u8 s)
+static const char * __init dmi_string(const struct dmi_header *dm, u8 s)
 {
 	const char *bp = dmi_string_nosave(dm, s);
 	char *str;
@@ -62,8 +62,6 @@ static char * __init dmi_string(const struct dmi_header *dm, u8 s)
 	str = dmi_alloc(len);
 	if (str != NULL)
 		strcpy(str, bp);
-	else
-		printk(KERN_ERR "dmi_string: cannot allocate %Zu bytes.\n", len);
 
 	return str;
 }
@@ -133,17 +131,18 @@ static int __init dmi_checksum(const u8 *buf, u8 len)
 	return sum == 0;
 }
 
-static char *dmi_ident[DMI_STRING_MAX];
+static const char *dmi_ident[DMI_STRING_MAX];
 static LIST_HEAD(dmi_devices);
 int dmi_available;
 
 /*
  *	Save a DMI string
  */
-static void __init dmi_save_ident(const struct dmi_header *dm, int slot, int string)
+static void __init dmi_save_ident(const struct dmi_header *dm, int slot,
+		int string)
 {
-	const char *d = (const char*) dm;
-	char *p;
+	const char *d = (const char *) dm;
+	const char *p;
 
 	if (dmi_ident[slot])
 		return;
@@ -155,9 +154,10 @@ static void __init dmi_save_ident(const struct dmi_header *dm, int slot, int str
 	dmi_ident[slot] = p;
 }
 
-static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int index)
+static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
+		int index)
 {
-	const u8 *d = (u8*) dm + index;
+	const u8 *d = (u8 *) dm + index;
 	char *s;
 	int is_ff = 1, is_00 = 1, i;
 
@@ -188,12 +188,13 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int inde
 	else
 		sprintf(s, "%pUB", d);
 
-        dmi_ident[slot] = s;
+	dmi_ident[slot] = s;
 }
 
-static void __init dmi_save_type(const struct dmi_header *dm, int slot, int index)
+static void __init dmi_save_type(const struct dmi_header *dm, int slot,
+		int index)
 {
-	const u8 *d = (u8*) dm + index;
+	const u8 *d = (u8 *) dm + index;
 	char *s;
 
 	if (dmi_ident[slot])
@@ -216,10 +217,8 @@ static void __init dmi_save_one_device(int type, const char *name)
 		return;
 
 	dev = dmi_alloc(sizeof(*dev) + strlen(name) + 1);
-	if (!dev) {
-		printk(KERN_ERR "dmi_save_one_device: out of memory.\n");
+	if (!dev)
 		return;
-	}
 
 	dev->type = type;
 	strcpy((char *)(dev + 1), name);
@@ -249,17 +248,14 @@ static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm)
 	struct dmi_device *dev;
 
 	for (i = 1; i <= count; i++) {
-		char *devname = dmi_string(dm, i);
+		const char *devname = dmi_string(dm, i);
 
 		if (devname == dmi_empty_string)
 			continue;
 
 		dev = dmi_alloc(sizeof(*dev));
-		if (!dev) {
-			printk(KERN_ERR
-			   "dmi_save_oem_strings_devices: out of memory.\n");
+		if (!dev)
 			break;
-		}
 
 		dev->type = DMI_DEV_TYPE_OEM_STRING;
 		dev->name = devname;
@@ -272,21 +268,17 @@ static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm)
 static void __init dmi_save_ipmi_device(const struct dmi_header *dm)
 {
 	struct dmi_device *dev;
-	void * data;
+	void *data;
 
 	data = dmi_alloc(dm->length);
-	if (data == NULL) {
-		printk(KERN_ERR "dmi_save_ipmi_device: out of memory.\n");
+	if (data == NULL)
 		return;
-	}
 
 	memcpy(data, dm, dm->length);
 
 	dev = dmi_alloc(sizeof(*dev));
-	if (!dev) {
-		printk(KERN_ERR "dmi_save_ipmi_device: out of memory.\n");
+	if (!dev)
 		return;
-	}
 
 	dev->type = DMI_DEV_TYPE_IPMI;
 	dev->name = "IPMI controller";
@@ -301,10 +293,9 @@ static void __init dmi_save_dev_onboard(int instance, int segment, int bus,
 	struct dmi_dev_onboard *onboard_dev;
 
 	onboard_dev = dmi_alloc(sizeof(*onboard_dev) + strlen(name) + 1);
-	if (!onboard_dev) {
-		printk(KERN_ERR "dmi_save_dev_onboard: out of memory.\n");
+	if (!onboard_dev)
 		return;
-	}
+
 	onboard_dev->instance = instance;
 	onboard_dev->segment = segment;
 	onboard_dev->bus = bus;
@@ -320,7 +311,7 @@ static void __init dmi_save_dev_onboard(int instance, int segment, int bus,
 
 static void __init dmi_save_extended_devices(const struct dmi_header *dm)
 {
-	const u8 *d = (u8*) dm + 5;
+	const u8 *d = (u8 *) dm + 5;
 
 	/* Skip disabled device */
 	if ((*d & 0x80) == 0)
@@ -338,7 +329,7 @@ static void __init dmi_save_extended_devices(const struct dmi_header *dm)
  */
 static void __init dmi_decode(const struct dmi_header *dm, void *dummy)
 {
-	switch(dm->type) {
+	switch (dm->type) {
 	case 0:		/* BIOS Information */
 		dmi_save_ident(dm, DMI_BIOS_VENDOR, 4);
 		dmi_save_ident(dm, DMI_BIOS_VERSION, 5);
@@ -502,13 +493,7 @@ void __init dmi_scan_machine(void)
 			dmi_available = 1;
 			goto out;
 		}
-	}
-	else {
-		/*
-		 * no iounmap() for that ioremap(); it would be a no-op, but
-		 * it's so early in setup that sucker gets confused into doing
-		 * what it shouldn't if we actually call it.
-		 */
+	} else {
 		p = dmi_ioremap(0xF0000, 0x10000);
 		if (p == NULL)
 			goto error;
@@ -533,7 +518,7 @@ void __init dmi_scan_machine(void)
 		dmi_iounmap(p, 0x10000);
 	}
  error:
-	printk(KERN_INFO "DMI not present or invalid.\n");
+	pr_info("DMI not present or invalid.\n");
  out:
 	dmi_initialized = 1;
 }
@@ -669,7 +654,7 @@ int dmi_name_in_serial(const char *str)
 
 /**
  *	dmi_name_in_vendors - Check if string is in the DMI system or board vendor name
- *	@str: 	Case sensitive Name
+ *	@str: Case sensitive Name
  */
 int dmi_name_in_vendors(const char *str)
 {
@@ -696,13 +681,13 @@ EXPORT_SYMBOL(dmi_name_in_vendors);
  *	A new search is initiated by passing %NULL as the @from argument.
  *	If @from is not %NULL, searches continue from next device.
  */
-const struct dmi_device * dmi_find_device(int type, const char *name,
+const struct dmi_device *dmi_find_device(int type, const char *name,
 				    const struct dmi_device *from)
 {
 	const struct list_head *head = from ? &from->list : &dmi_devices;
 	struct list_head *d;
 
-	for(d = head->next; d != &dmi_devices; d = d->next) {
+	for (d = head->next; d != &dmi_devices; d = d->next) {
 		const struct dmi_device *dev =
 			list_entry(d, struct dmi_device, list);
 

+ 1 - 1
drivers/firmware/google/gsmi.c

@@ -525,7 +525,7 @@ static ssize_t gsmi_clear_eventlog_store(struct kobject *kobj,
 		u32 data_type;
 	} param;
 
-	rc = strict_strtoul(buf, 0, &val);
+	rc = kstrtoul(buf, 0, &val);
 	if (rc)
 		return rc;
 

+ 0 - 2
drivers/iommu/msm_iommu_dev.c

@@ -282,7 +282,6 @@ static int msm_iommu_remove(struct platform_device *pdev)
 		clk_put(drv->pclk);
 		memset(drv, 0, sizeof(*drv));
 		kfree(drv);
-		platform_set_drvdata(pdev, NULL);
 	}
 	return 0;
 }
@@ -366,7 +365,6 @@ static int msm_iommu_ctx_remove(struct platform_device *pdev)
 	if (drv) {
 		memset(drv, 0, sizeof(struct msm_iommu_ctx_drvdata));
 		kfree(drv);
-		platform_set_drvdata(pdev, NULL);
 	}
 	return 0;
 }

+ 0 - 2
drivers/iommu/omap-iommu.c

@@ -1008,8 +1008,6 @@ static int omap_iommu_remove(struct platform_device *pdev)
 	struct resource *res;
 	struct omap_iommu *obj = platform_get_drvdata(pdev);
 
-	platform_set_drvdata(pdev, NULL);
-
 	iopgtable_clear_entry_all(obj);
 
 	irq = platform_get_irq(pdev, 0);

+ 12 - 0
drivers/memstick/core/Kconfig

@@ -24,3 +24,15 @@ config MSPRO_BLOCK
 	  support. This provides a block device driver, which you can use
 	  to mount the filesystem. Almost everyone wishing MemoryStick
 	  support should say Y or M here.
+
+config MS_BLOCK
+	tristate "MemoryStick Standard device driver"
+	depends on BLOCK
+	help
+	  Say Y here to enable the MemoryStick Standard device driver
+	  support. This provides a block device driver, which you can use
+	  to mount the filesystem.
+	  This driver works with old (bulky) MemoryStick and MemoryStick Duo
+	  but not PRO. Say Y if you have such card.
+	  Driver is new and not yet well tested, thus it can damage your card
+	  (even permanently)

+ 1 - 1
drivers/memstick/core/Makefile

@@ -3,5 +3,5 @@
 #
 
 obj-$(CONFIG_MEMSTICK)		+= memstick.o
-
+obj-$(CONFIG_MS_BLOCK)		+= ms_block.o
 obj-$(CONFIG_MSPRO_BLOCK)	+= mspro_block.o

+ 2385 - 0
drivers/memstick/core/ms_block.c

@@ -0,0 +1,2385 @@
+/*
+ *  ms_block.c - Sony MemoryStick (legacy) storage support
+
+ *  Copyright (C) 2013 Maxim Levitsky <maximlevitsky@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Minor portions of the driver were copied from mspro_block.c which is
+ * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
+ *
+ */
+#define DRIVER_NAME "ms_block"
+#define pr_fmt(fmt) DRIVER_NAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/memstick.h>
+#include <linux/idr.h>
+#include <linux/hdreg.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/bitmap.h>
+#include <linux/scatterlist.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include "ms_block.h"
+
+static int debug;
+static int cache_flush_timeout = 1000;
+static bool verify_writes;
+
+/*
+ * Copies section of 'sg_from' starting from offset 'offset' and with length
+ * 'len' To another scatterlist of to_nents enties
+ */
+static size_t msb_sg_copy(struct scatterlist *sg_from,
+	struct scatterlist *sg_to, int to_nents, size_t offset, size_t len)
+{
+	size_t copied = 0;
+
+	while (offset > 0) {
+		if (offset >= sg_from->length) {
+			if (sg_is_last(sg_from))
+				return 0;
+
+			offset -= sg_from->length;
+			sg_from = sg_next(sg_from);
+			continue;
+		}
+
+		copied = min(len, sg_from->length - offset);
+		sg_set_page(sg_to, sg_page(sg_from),
+			copied, sg_from->offset + offset);
+
+		len -= copied;
+		offset = 0;
+
+		if (sg_is_last(sg_from) || !len)
+			goto out;
+
+		sg_to = sg_next(sg_to);
+		to_nents--;
+		sg_from = sg_next(sg_from);
+	}
+
+	while (len > sg_from->length && to_nents--) {
+		len -= sg_from->length;
+		copied += sg_from->length;
+
+		sg_set_page(sg_to, sg_page(sg_from),
+				sg_from->length, sg_from->offset);
+
+		if (sg_is_last(sg_from) || !len)
+			goto out;
+
+		sg_from = sg_next(sg_from);
+		sg_to = sg_next(sg_to);
+	}
+
+	if (len && to_nents) {
+		sg_set_page(sg_to, sg_page(sg_from), len, sg_from->offset);
+		copied += len;
+	}
+out:
+	sg_mark_end(sg_to);
+	return copied;
+}
+
+/*
+ * Compares section of 'sg' starting from offset 'offset' and with length 'len'
+ * to linear buffer of length 'len' at address 'buffer'
+ * Returns 0 if equal and  -1 otherwice
+ */
+static int msb_sg_compare_to_buffer(struct scatterlist *sg,
+					size_t offset, u8 *buffer, size_t len)
+{
+	int retval = 0, cmplen;
+	struct sg_mapping_iter miter;
+
+	sg_miter_start(&miter, sg, sg_nents(sg),
+					SG_MITER_ATOMIC | SG_MITER_FROM_SG);
+
+	while (sg_miter_next(&miter) && len > 0) {
+		if (offset >= miter.length) {
+			offset -= miter.length;
+			continue;
+		}
+
+		cmplen = min(miter.length - offset, len);
+		retval = memcmp(miter.addr + offset, buffer, cmplen) ? -1 : 0;
+		if (retval)
+			break;
+
+		buffer += cmplen;
+		len -= cmplen;
+		offset = 0;
+	}
+
+	if (!retval && len)
+		retval = -1;
+
+	sg_miter_stop(&miter);
+	return retval;
+}
+
+
+/* Get zone at which block with logical address 'lba' lives
+ * Flash is broken into zones.
+ * Each zone consists of 512 eraseblocks, out of which in first
+ * zone 494 are used and 496 are for all following zones.
+ * Therefore zone #0 hosts blocks 0-493, zone #1 blocks 494-988, etc...
+*/
+static int msb_get_zone_from_lba(int lba)
+{
+	if (lba < 494)
+		return 0;
+	return ((lba - 494) / 496) + 1;
+}
+
+/* Get zone of physical block. Trivial */
+static int msb_get_zone_from_pba(int pba)
+{
+	return pba / MS_BLOCKS_IN_ZONE;
+}
+
+/* Debug test to validate free block counts */
+static int msb_validate_used_block_bitmap(struct msb_data *msb)
+{
+	int total_free_blocks = 0;
+	int i;
+
+	if (!debug)
+		return 0;
+
+	for (i = 0; i < msb->zone_count; i++)
+		total_free_blocks += msb->free_block_count[i];
+
+	if (msb->block_count - bitmap_weight(msb->used_blocks_bitmap,
+					msb->block_count) == total_free_blocks)
+		return 0;
+
+	pr_err("BUG: free block counts don't match the bitmap");
+	msb->read_only = true;
+	return -EINVAL;
+}
+
+/* Mark physical block as used */
+static void msb_mark_block_used(struct msb_data *msb, int pba)
+{
+	int zone = msb_get_zone_from_pba(pba);
+
+	if (test_bit(pba, msb->used_blocks_bitmap)) {
+		pr_err(
+		"BUG: attempt to mark already used pba %d as used", pba);
+		msb->read_only = true;
+		return;
+	}
+
+	if (msb_validate_used_block_bitmap(msb))
+		return;
+
+	/* No races because all IO is single threaded */
+	__set_bit(pba, msb->used_blocks_bitmap);
+	msb->free_block_count[zone]--;
+}
+
+/* Mark physical block as free */
+static void msb_mark_block_unused(struct msb_data *msb, int pba)
+{
+	int zone = msb_get_zone_from_pba(pba);
+
+	if (!test_bit(pba, msb->used_blocks_bitmap)) {
+		pr_err("BUG: attempt to mark already unused pba %d as unused" , pba);
+		msb->read_only = true;
+		return;
+	}
+
+	if (msb_validate_used_block_bitmap(msb))
+		return;
+
+	/* No races because all IO is single threaded */
+	__clear_bit(pba, msb->used_blocks_bitmap);
+	msb->free_block_count[zone]++;
+}
+
+/* Invalidate current register window */
+static void msb_invalidate_reg_window(struct msb_data *msb)
+{
+	msb->reg_addr.w_offset = offsetof(struct ms_register, id);
+	msb->reg_addr.w_length = sizeof(struct ms_id_register);
+	msb->reg_addr.r_offset = offsetof(struct ms_register, id);
+	msb->reg_addr.r_length = sizeof(struct ms_id_register);
+	msb->addr_valid = false;
+}
+
+/* Start a state machine */
+static int msb_run_state_machine(struct msb_data *msb, int   (*state_func)
+		(struct memstick_dev *card, struct memstick_request **req))
+{
+	struct memstick_dev *card = msb->card;
+
+	WARN_ON(msb->state != -1);
+	msb->int_polling = false;
+	msb->state = 0;
+	msb->exit_error = 0;
+
+	memset(&card->current_mrq, 0, sizeof(card->current_mrq));
+
+	card->next_request = state_func;
+	memstick_new_req(card->host);
+	wait_for_completion(&card->mrq_complete);
+
+	WARN_ON(msb->state != -1);
+	return msb->exit_error;
+}
+
+/* State machines call that to exit */
+static int msb_exit_state_machine(struct msb_data *msb, int error)
+{
+	WARN_ON(msb->state == -1);
+
+	msb->state = -1;
+	msb->exit_error = error;
+	msb->card->next_request = h_msb_default_bad;
+
+	/* Invalidate reg window on errors */
+	if (error)
+		msb_invalidate_reg_window(msb);
+
+	complete(&msb->card->mrq_complete);
+	return -ENXIO;
+}
+
+/* read INT register */
+static int msb_read_int_reg(struct msb_data *msb, long timeout)
+{
+	struct memstick_request *mrq = &msb->card->current_mrq;
+
+	WARN_ON(msb->state == -1);
+
+	if (!msb->int_polling) {
+		msb->int_timeout = jiffies +
+			msecs_to_jiffies(timeout == -1 ? 500 : timeout);
+		msb->int_polling = true;
+	} else if (time_after(jiffies, msb->int_timeout)) {
+		mrq->data[0] = MEMSTICK_INT_CMDNAK;
+		return 0;
+	}
+
+	if ((msb->caps & MEMSTICK_CAP_AUTO_GET_INT) &&
+				mrq->need_card_int && !mrq->error) {
+		mrq->data[0] = mrq->int_reg;
+		mrq->need_card_int = false;
+		return 0;
+	} else {
+		memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
+		return 1;
+	}
+}
+
+/* Read a register */
+static int msb_read_regs(struct msb_data *msb, int offset, int len)
+{
+	struct memstick_request *req = &msb->card->current_mrq;
+
+	if (msb->reg_addr.r_offset != offset ||
+	    msb->reg_addr.r_length != len || !msb->addr_valid) {
+
+		msb->reg_addr.r_offset = offset;
+		msb->reg_addr.r_length = len;
+		msb->addr_valid = true;
+
+		memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
+			&msb->reg_addr, sizeof(msb->reg_addr));
+		return 0;
+	}
+
+	memstick_init_req(req, MS_TPC_READ_REG, NULL, len);
+	return 1;
+}
+
+/* Write a card register */
+static int msb_write_regs(struct msb_data *msb, int offset, int len, void *buf)
+{
+	struct memstick_request *req = &msb->card->current_mrq;
+
+	if (msb->reg_addr.w_offset != offset ||
+		msb->reg_addr.w_length != len  || !msb->addr_valid) {
+
+		msb->reg_addr.w_offset = offset;
+		msb->reg_addr.w_length = len;
+		msb->addr_valid = true;
+
+		memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
+			&msb->reg_addr, sizeof(msb->reg_addr));
+		return 0;
+	}
+
+	memstick_init_req(req, MS_TPC_WRITE_REG, buf, len);
+	return 1;
+}
+
+/* Handler for absence of IO */
+static int h_msb_default_bad(struct memstick_dev *card,
+						struct memstick_request **mrq)
+{
+	return -ENXIO;
+}
+
+/*
+ * This function is a handler for reads of one page from device.
+ * Writes output to msb->current_sg, takes sector address from msb->reg.param
+ * Can also be used to read extra data only. Set params accordintly.
+ */
+static int h_msb_read_page(struct memstick_dev *card,
+					struct memstick_request **out_mrq)
+{
+	struct msb_data *msb = memstick_get_drvdata(card);
+	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
+	struct scatterlist sg[2];
+	u8 command, intreg;
+
+	if (mrq->error) {
+		dbg("read_page, unknown error");
+		return msb_exit_state_machine(msb, mrq->error);
+	}
+again:
+	switch (msb->state) {
+	case MSB_RP_SEND_BLOCK_ADDRESS:
+		/* msb_write_regs sometimes "fails" because it needs to update
+			the reg window, and thus it returns request for that.
+			Then we stay in this state and retry */
+		if (!msb_write_regs(msb,
+			offsetof(struct ms_register, param),
+			sizeof(struct ms_param_register),
+			(unsigned char *)&msb->regs.param))
+			return 0;
+
+		msb->state = MSB_RP_SEND_READ_COMMAND;
+		return 0;
+
+	case MSB_RP_SEND_READ_COMMAND:
+		command = MS_CMD_BLOCK_READ;
+		memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
+		msb->state = MSB_RP_SEND_INT_REQ;
+		return 0;
+
+	case MSB_RP_SEND_INT_REQ:
+		msb->state = MSB_RP_RECEIVE_INT_REQ_RESULT;
+		/* If dont actually need to send the int read request (only in
+			serial mode), then just fall through */
+		if (msb_read_int_reg(msb, -1))
+			return 0;
+		/* fallthrough */
+
+	case MSB_RP_RECEIVE_INT_REQ_RESULT:
+		intreg = mrq->data[0];
+		msb->regs.status.interrupt = intreg;
+
+		if (intreg & MEMSTICK_INT_CMDNAK)
+			return msb_exit_state_machine(msb, -EIO);
+
+		if (!(intreg & MEMSTICK_INT_CED)) {
+			msb->state = MSB_RP_SEND_INT_REQ;
+			goto again;
+		}
+
+		msb->int_polling = false;
+		msb->state = (intreg & MEMSTICK_INT_ERR) ?
+			MSB_RP_SEND_READ_STATUS_REG : MSB_RP_SEND_OOB_READ;
+		goto again;
+
+	case MSB_RP_SEND_READ_STATUS_REG:
+		 /* read the status register to understand source of the INT_ERR */
+		if (!msb_read_regs(msb,
+			offsetof(struct ms_register, status),
+			sizeof(struct ms_status_register)))
+			return 0;
+
+		msb->state = MSB_RP_RECEIVE_OOB_READ;
+		return 0;
+
+	case MSB_RP_RECIVE_STATUS_REG:
+		msb->regs.status = *(struct ms_status_register *)mrq->data;
+		msb->state = MSB_RP_SEND_OOB_READ;
+		/* fallthrough */
+
+	case MSB_RP_SEND_OOB_READ:
+		if (!msb_read_regs(msb,
+			offsetof(struct ms_register, extra_data),
+			sizeof(struct ms_extra_data_register)))
+			return 0;
+
+		msb->state = MSB_RP_RECEIVE_OOB_READ;
+		return 0;
+
+	case MSB_RP_RECEIVE_OOB_READ:
+		msb->regs.extra_data =
+			*(struct ms_extra_data_register *) mrq->data;
+		msb->state = MSB_RP_SEND_READ_DATA;
+		/* fallthrough */
+
+	case MSB_RP_SEND_READ_DATA:
+		/* Skip that state if we only read the oob */
+		if (msb->regs.param.cp == MEMSTICK_CP_EXTRA) {
+			msb->state = MSB_RP_RECEIVE_READ_DATA;
+			goto again;
+		}
+
+		sg_init_table(sg, ARRAY_SIZE(sg));
+		msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
+			msb->current_sg_offset,
+			msb->page_size);
+
+		memstick_init_req_sg(mrq, MS_TPC_READ_LONG_DATA, sg);
+		msb->state = MSB_RP_RECEIVE_READ_DATA;
+		return 0;
+
+	case MSB_RP_RECEIVE_READ_DATA:
+		if (!(msb->regs.status.interrupt & MEMSTICK_INT_ERR)) {
+			msb->current_sg_offset += msb->page_size;
+			return msb_exit_state_machine(msb, 0);
+		}
+
+		if (msb->regs.status.status1 & MEMSTICK_UNCORR_ERROR) {
+			dbg("read_page: uncorrectable error");
+			return msb_exit_state_machine(msb, -EBADMSG);
+		}
+
+		if (msb->regs.status.status1 & MEMSTICK_CORR_ERROR) {
+			dbg("read_page: correctable error");
+			msb->current_sg_offset += msb->page_size;
+			return msb_exit_state_machine(msb, -EUCLEAN);
+		} else {
+			dbg("read_page: INT error, but no status error bits");
+			return msb_exit_state_machine(msb, -EIO);
+		}
+	}
+
+	BUG();
+}
+
+/*
+ * Handler of writes of exactly one block.
+ * Takes address from msb->regs.param.
+ * Writes same extra data to blocks, also taken
+ * from msb->regs.extra
+ * Returns -EBADMSG if write fails due to uncorrectable error, or -EIO if
+ * device refuses to take the command or something else
+ */
+static int h_msb_write_block(struct memstick_dev *card,
+					struct memstick_request **out_mrq)
+{
+	struct msb_data *msb = memstick_get_drvdata(card);
+	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
+	struct scatterlist sg[2];
+	u8 intreg, command;
+
+	if (mrq->error)
+		return msb_exit_state_machine(msb, mrq->error);
+
+again:
+	switch (msb->state) {
+
+	/* HACK: Jmicon handling of TPCs between 8 and
+	 *	sizeof(memstick_request.data) is broken due to hardware
+	 *	bug in PIO mode that is used for these TPCs
+	 *	Therefore split the write
+	 */
+
+	case MSB_WB_SEND_WRITE_PARAMS:
+		if (!msb_write_regs(msb,
+			offsetof(struct ms_register, param),
+			sizeof(struct ms_param_register),
+			&msb->regs.param))
+			return 0;
+
+		msb->state = MSB_WB_SEND_WRITE_OOB;
+		return 0;
+
+	case MSB_WB_SEND_WRITE_OOB:
+		if (!msb_write_regs(msb,
+			offsetof(struct ms_register, extra_data),
+			sizeof(struct ms_extra_data_register),
+			&msb->regs.extra_data))
+			return 0;
+		msb->state = MSB_WB_SEND_WRITE_COMMAND;
+		return 0;
+
+
+	case MSB_WB_SEND_WRITE_COMMAND:
+		command = MS_CMD_BLOCK_WRITE;
+		memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
+		msb->state = MSB_WB_SEND_INT_REQ;
+		return 0;
+
+	case MSB_WB_SEND_INT_REQ:
+		msb->state = MSB_WB_RECEIVE_INT_REQ;
+		if (msb_read_int_reg(msb, -1))
+			return 0;
+		/* fallthrough */
+
+	case MSB_WB_RECEIVE_INT_REQ:
+		intreg = mrq->data[0];
+		msb->regs.status.interrupt = intreg;
+
+		/* errors mean out of here, and fast... */
+		if (intreg & (MEMSTICK_INT_CMDNAK))
+			return msb_exit_state_machine(msb, -EIO);
+
+		if (intreg & MEMSTICK_INT_ERR)
+			return msb_exit_state_machine(msb, -EBADMSG);
+
+
+		/* for last page we need to poll CED */
+		if (msb->current_page == msb->pages_in_block) {
+			if (intreg & MEMSTICK_INT_CED)
+				return msb_exit_state_machine(msb, 0);
+			msb->state = MSB_WB_SEND_INT_REQ;
+			goto again;
+
+		}
+
+		/* for non-last page we need BREQ before writing next chunk */
+		if (!(intreg & MEMSTICK_INT_BREQ)) {
+			msb->state = MSB_WB_SEND_INT_REQ;
+			goto again;
+		}
+
+		msb->int_polling = false;
+		msb->state = MSB_WB_SEND_WRITE_DATA;
+		/* fallthrough */
+
+	case MSB_WB_SEND_WRITE_DATA:
+		sg_init_table(sg, ARRAY_SIZE(sg));
+
+		if (msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
+			msb->current_sg_offset,
+			msb->page_size) < msb->page_size)
+			return msb_exit_state_machine(msb, -EIO);
+
+		memstick_init_req_sg(mrq, MS_TPC_WRITE_LONG_DATA, sg);
+		mrq->need_card_int = 1;
+		msb->state = MSB_WB_RECEIVE_WRITE_CONFIRMATION;
+		return 0;
+
+	case MSB_WB_RECEIVE_WRITE_CONFIRMATION:
+		msb->current_page++;
+		msb->current_sg_offset += msb->page_size;
+		msb->state = MSB_WB_SEND_INT_REQ;
+		goto again;
+	default:
+		BUG();
+	}
+
+	return 0;
+}
+
+/*
+ * This function is used to send simple IO requests to device that consist
+ * of register write + command
+ */
+static int h_msb_send_command(struct memstick_dev *card,
+					struct memstick_request **out_mrq)
+{
+	struct msb_data *msb = memstick_get_drvdata(card);
+	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
+	u8 intreg;
+
+	if (mrq->error) {
+		dbg("send_command: unknown error");
+		return msb_exit_state_machine(msb, mrq->error);
+	}
+again:
+	switch (msb->state) {
+
+	/* HACK: see h_msb_write_block */
+	case MSB_SC_SEND_WRITE_PARAMS: /* write param register*/
+		if (!msb_write_regs(msb,
+			offsetof(struct ms_register, param),
+			sizeof(struct ms_param_register),
+			&msb->regs.param))
+			return 0;
+		msb->state = MSB_SC_SEND_WRITE_OOB;
+		return 0;
+
+	case MSB_SC_SEND_WRITE_OOB:
+		if (!msb->command_need_oob) {
+			msb->state = MSB_SC_SEND_COMMAND;
+			goto again;
+		}
+
+		if (!msb_write_regs(msb,
+			offsetof(struct ms_register, extra_data),
+			sizeof(struct ms_extra_data_register),
+			&msb->regs.extra_data))
+			return 0;
+
+		msb->state = MSB_SC_SEND_COMMAND;
+		return 0;
+
+	case MSB_SC_SEND_COMMAND:
+		memstick_init_req(mrq, MS_TPC_SET_CMD, &msb->command_value, 1);
+		msb->state = MSB_SC_SEND_INT_REQ;
+		return 0;
+
+	case MSB_SC_SEND_INT_REQ:
+		msb->state = MSB_SC_RECEIVE_INT_REQ;
+		if (msb_read_int_reg(msb, -1))
+			return 0;
+		/* fallthrough */
+
+	case MSB_SC_RECEIVE_INT_REQ:
+		intreg = mrq->data[0];
+
+		if (intreg & MEMSTICK_INT_CMDNAK)
+			return msb_exit_state_machine(msb, -EIO);
+		if (intreg & MEMSTICK_INT_ERR)
+			return msb_exit_state_machine(msb, -EBADMSG);
+
+		if (!(intreg & MEMSTICK_INT_CED)) {
+			msb->state = MSB_SC_SEND_INT_REQ;
+			goto again;
+		}
+
+		return msb_exit_state_machine(msb, 0);
+	}
+
+	BUG();
+}
+
+/* Small handler for card reset */
+static int h_msb_reset(struct memstick_dev *card,
+					struct memstick_request **out_mrq)
+{
+	u8 command = MS_CMD_RESET;
+	struct msb_data *msb = memstick_get_drvdata(card);
+	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
+
+	if (mrq->error)
+		return msb_exit_state_machine(msb, mrq->error);
+
+	switch (msb->state) {
+	case MSB_RS_SEND:
+		memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
+		mrq->need_card_int = 0;
+		msb->state = MSB_RS_CONFIRM;
+		return 0;
+	case MSB_RS_CONFIRM:
+		return msb_exit_state_machine(msb, 0);
+	}
+	BUG();
+}
+
+/* This handler is used to do serial->parallel switch */
+static int h_msb_parallel_switch(struct memstick_dev *card,
+					struct memstick_request **out_mrq)
+{
+	struct msb_data *msb = memstick_get_drvdata(card);
+	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
+	struct memstick_host *host = card->host;
+
+	if (mrq->error) {
+		dbg("parallel_switch: error");
+		msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
+		return msb_exit_state_machine(msb, mrq->error);
+	}
+
+	switch (msb->state) {
+	case MSB_PS_SEND_SWITCH_COMMAND:
+		/* Set the parallel interface on memstick side */
+		msb->regs.param.system |= MEMSTICK_SYS_PAM;
+
+		if (!msb_write_regs(msb,
+			offsetof(struct ms_register, param),
+			1,
+			(unsigned char *)&msb->regs.param))
+			return 0;
+
+		msb->state = MSB_PS_SWICH_HOST;
+		return 0;
+
+	case MSB_PS_SWICH_HOST:
+		 /* Set parallel interface on our side + send a dummy request
+			to see if card responds */
+		host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4);
+		memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
+		msb->state = MSB_PS_CONFIRM;
+		return 0;
+
+	case MSB_PS_CONFIRM:
+		return msb_exit_state_machine(msb, 0);
+	}
+
+	BUG();
+}
+
+static int msb_switch_to_parallel(struct msb_data *msb);
+
+/* Reset the card, to guard against hw errors beeing treated as bad blocks */
+static int msb_reset(struct msb_data *msb, bool full)
+{
+
+	bool was_parallel = msb->regs.param.system & MEMSTICK_SYS_PAM;
+	struct memstick_dev *card = msb->card;
+	struct memstick_host *host = card->host;
+	int error;
+
+	/* Reset the card */
+	msb->regs.param.system = MEMSTICK_SYS_BAMD;
+
+	if (full) {
+		error =  host->set_param(host,
+					MEMSTICK_POWER, MEMSTICK_POWER_OFF);
+		if (error)
+			goto out_error;
+
+		msb_invalidate_reg_window(msb);
+
+		error = host->set_param(host,
+					MEMSTICK_POWER, MEMSTICK_POWER_ON);
+		if (error)
+			goto out_error;
+
+		error = host->set_param(host,
+					MEMSTICK_INTERFACE, MEMSTICK_SERIAL);
+		if (error) {
+out_error:
+			dbg("Failed to reset the host controller");
+			msb->read_only = true;
+			return -EFAULT;
+		}
+	}
+
+	error = msb_run_state_machine(msb, h_msb_reset);
+	if (error) {
+		dbg("Failed to reset the card");
+		msb->read_only = true;
+		return -ENODEV;
+	}
+
+	/* Set parallel mode */
+	if (was_parallel)
+		msb_switch_to_parallel(msb);
+	return 0;
+}
+
+/* Attempts to switch interface to parallel mode */
+static int msb_switch_to_parallel(struct msb_data *msb)
+{
+	int error;
+
+	error = msb_run_state_machine(msb, h_msb_parallel_switch);
+	if (error) {
+		pr_err("Switch to parallel failed");
+		msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
+		msb_reset(msb, true);
+		return -EFAULT;
+	}
+
+	msb->caps |= MEMSTICK_CAP_AUTO_GET_INT;
+	return 0;
+}
+
+/* Changes overwrite flag on a page */
+static int msb_set_overwrite_flag(struct msb_data *msb,
+						u16 pba, u8 page, u8 flag)
+{
+	if (msb->read_only)
+		return -EROFS;
+
+	msb->regs.param.block_address = cpu_to_be16(pba);
+	msb->regs.param.page_address = page;
+	msb->regs.param.cp = MEMSTICK_CP_OVERWRITE;
+	msb->regs.extra_data.overwrite_flag = flag;
+	msb->command_value = MS_CMD_BLOCK_WRITE;
+	msb->command_need_oob = true;
+
+	dbg_verbose("changing overwrite flag to %02x for sector %d, page %d",
+							flag, pba, page);
+	return msb_run_state_machine(msb, h_msb_send_command);
+}
+
+static int msb_mark_bad(struct msb_data *msb, int pba)
+{
+	pr_notice("marking pba %d as bad", pba);
+	msb_reset(msb, true);
+	return msb_set_overwrite_flag(
+			msb, pba, 0, 0xFF & ~MEMSTICK_OVERWRITE_BKST);
+}
+
+static int msb_mark_page_bad(struct msb_data *msb, int pba, int page)
+{
+	dbg("marking page %d of pba %d as bad", page, pba);
+	msb_reset(msb, true);
+	return msb_set_overwrite_flag(msb,
+		pba, page, ~MEMSTICK_OVERWRITE_PGST0);
+}
+
+/* Erases one physical block */
+static int msb_erase_block(struct msb_data *msb, u16 pba)
+{
+	int error, try;
+	if (msb->read_only)
+		return -EROFS;
+
+	dbg_verbose("erasing pba %d", pba);
+
+	for (try = 1; try < 3; try++) {
+		msb->regs.param.block_address = cpu_to_be16(pba);
+		msb->regs.param.page_address = 0;
+		msb->regs.param.cp = MEMSTICK_CP_BLOCK;
+		msb->command_value = MS_CMD_BLOCK_ERASE;
+		msb->command_need_oob = false;
+
+
+		error = msb_run_state_machine(msb, h_msb_send_command);
+		if (!error || msb_reset(msb, true))
+			break;
+	}
+
+	if (error) {
+		pr_err("erase failed, marking pba %d as bad", pba);
+		msb_mark_bad(msb, pba);
+	}
+
+	dbg_verbose("erase success, marking pba %d as unused", pba);
+	msb_mark_block_unused(msb, pba);
+	__set_bit(pba, msb->erased_blocks_bitmap);
+	return error;
+}
+
+/* Reads one page from device */
+static int msb_read_page(struct msb_data *msb,
+	u16 pba, u8 page, struct ms_extra_data_register *extra,
+					struct scatterlist *sg,  int offset)
+{
+	int try, error;
+
+	if (pba == MS_BLOCK_INVALID) {
+		unsigned long flags;
+		struct sg_mapping_iter miter;
+		size_t len = msb->page_size;
+
+		dbg_verbose("read unmapped sector. returning 0xFF");
+
+		local_irq_save(flags);
+		sg_miter_start(&miter, sg, sg_nents(sg),
+				SG_MITER_ATOMIC | SG_MITER_TO_SG);
+
+		while (sg_miter_next(&miter) && len > 0) {
+
+			int chunklen;
+
+			if (offset && offset >= miter.length) {
+				offset -= miter.length;
+				continue;
+			}
+
+			chunklen = min(miter.length - offset, len);
+			memset(miter.addr + offset, 0xFF, chunklen);
+			len -= chunklen;
+			offset = 0;
+		}
+
+		sg_miter_stop(&miter);
+		local_irq_restore(flags);
+
+		if (offset)
+			return -EFAULT;
+
+		if (extra)
+			memset(extra, 0xFF, sizeof(*extra));
+		return 0;
+	}
+
+	if (pba >= msb->block_count) {
+		pr_err("BUG: attempt to read beyond the end of the card at pba %d", pba);
+		return -EINVAL;
+	}
+
+	for (try = 1; try < 3; try++) {
+		msb->regs.param.block_address = cpu_to_be16(pba);
+		msb->regs.param.page_address = page;
+		msb->regs.param.cp = MEMSTICK_CP_PAGE;
+
+		msb->current_sg = sg;
+		msb->current_sg_offset = offset;
+		error = msb_run_state_machine(msb, h_msb_read_page);
+
+
+		if (error == -EUCLEAN) {
+			pr_notice("correctable error on pba %d, page %d",
+				pba, page);
+			error = 0;
+		}
+
+		if (!error && extra)
+			*extra = msb->regs.extra_data;
+
+		if (!error || msb_reset(msb, true))
+			break;
+
+	}
+
+	/* Mark bad pages */
+	if (error == -EBADMSG) {
+		pr_err("uncorrectable error on read of pba %d, page %d",
+			pba, page);
+
+		if (msb->regs.extra_data.overwrite_flag &
+					MEMSTICK_OVERWRITE_PGST0)
+			msb_mark_page_bad(msb, pba, page);
+		return -EBADMSG;
+	}
+
+	if (error)
+		pr_err("read of pba %d, page %d failed with error %d",
+			pba, page, error);
+	return error;
+}
+
+/* Reads oob of page only */
+static int msb_read_oob(struct msb_data *msb, u16 pba, u16 page,
+	struct ms_extra_data_register *extra)
+{
+	int error;
+
+	BUG_ON(!extra);
+	msb->regs.param.block_address = cpu_to_be16(pba);
+	msb->regs.param.page_address = page;
+	msb->regs.param.cp = MEMSTICK_CP_EXTRA;
+
+	if (pba > msb->block_count) {
+		pr_err("BUG: attempt to read beyond the end of card at pba %d", pba);
+		return -EINVAL;
+	}
+
+	error = msb_run_state_machine(msb, h_msb_read_page);
+	*extra = msb->regs.extra_data;
+
+	if (error == -EUCLEAN) {
+		pr_notice("correctable error on pba %d, page %d",
+			pba, page);
+		return 0;
+	}
+
+	return error;
+}
+
+/* Reads a block and compares it with data contained in scatterlist orig_sg */
+static int msb_verify_block(struct msb_data *msb, u16 pba,
+				struct scatterlist *orig_sg,  int offset)
+{
+	struct scatterlist sg;
+	int page = 0, error;
+
+	sg_init_one(&sg, msb->block_buffer, msb->block_size);
+
+	while (page < msb->pages_in_block) {
+
+		error = msb_read_page(msb, pba, page,
+				NULL, &sg, page * msb->page_size);
+		if (error)
+			return error;
+		page++;
+	}
+
+	if (msb_sg_compare_to_buffer(orig_sg, offset,
+				msb->block_buffer, msb->block_size))
+		return -EIO;
+	return 0;
+}
+
+/* Writes exectly one block + oob */
+static int msb_write_block(struct msb_data *msb,
+			u16 pba, u32 lba, struct scatterlist *sg, int offset)
+{
+	int error, current_try = 1;
+	BUG_ON(sg->length < msb->page_size);
+
+	if (msb->read_only)
+		return -EROFS;
+
+	if (pba == MS_BLOCK_INVALID) {
+		pr_err(
+			"BUG: write: attempt to write MS_BLOCK_INVALID block");
+		return -EINVAL;
+	}
+
+	if (pba >= msb->block_count || lba >= msb->logical_block_count) {
+		pr_err(
+		"BUG: write: attempt to write beyond the end of device");
+		return -EINVAL;
+	}
+
+	if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
+		pr_err("BUG: write: lba zone mismatch");
+		return -EINVAL;
+	}
+
+	if (pba == msb->boot_block_locations[0] ||
+		pba == msb->boot_block_locations[1]) {
+		pr_err("BUG: write: attempt to write to boot blocks!");
+		return -EINVAL;
+	}
+
+	while (1) {
+
+		if (msb->read_only)
+			return -EROFS;
+
+		msb->regs.param.cp = MEMSTICK_CP_BLOCK;
+		msb->regs.param.page_address = 0;
+		msb->regs.param.block_address = cpu_to_be16(pba);
+
+		msb->regs.extra_data.management_flag = 0xFF;
+		msb->regs.extra_data.overwrite_flag = 0xF8;
+		msb->regs.extra_data.logical_address = cpu_to_be16(lba);
+
+		msb->current_sg = sg;
+		msb->current_sg_offset = offset;
+		msb->current_page = 0;
+
+		error = msb_run_state_machine(msb, h_msb_write_block);
+
+		/* Sector we just wrote to is assumed erased since its pba
+			was erased. If it wasn't erased, write will succeed
+			and will just clear the bits that were set in the block
+			thus test that what we have written,
+			matches what we expect.
+			We do trust the blocks that we erased */
+		if (!error && (verify_writes ||
+				!test_bit(pba, msb->erased_blocks_bitmap)))
+			error = msb_verify_block(msb, pba, sg, offset);
+
+		if (!error)
+			break;
+
+		if (current_try > 1 || msb_reset(msb, true))
+			break;
+
+		pr_err("write failed, trying to erase the pba %d", pba);
+		error = msb_erase_block(msb, pba);
+		if (error)
+			break;
+
+		current_try++;
+	}
+	return error;
+}
+
+/* Finds a free block for write replacement */
+static u16 msb_get_free_block(struct msb_data *msb, int zone)
+{
+	u16 pos;
+	int pba = zone * MS_BLOCKS_IN_ZONE;
+	int i;
+
+	get_random_bytes(&pos, sizeof(pos));
+
+	if (!msb->free_block_count[zone]) {
+		pr_err("NO free blocks in the zone %d, to use for a write, (media is WORN out) switching to RO mode", zone);
+		msb->read_only = true;
+		return MS_BLOCK_INVALID;
+	}
+
+	pos %= msb->free_block_count[zone];
+
+	dbg_verbose("have %d choices for a free block, selected randomally: %d",
+		msb->free_block_count[zone], pos);
+
+	pba = find_next_zero_bit(msb->used_blocks_bitmap,
+							msb->block_count, pba);
+	for (i = 0; i < pos; ++i)
+		pba = find_next_zero_bit(msb->used_blocks_bitmap,
+						msb->block_count, pba + 1);
+
+	dbg_verbose("result of the free blocks scan: pba %d", pba);
+
+	if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) {
+		pr_err("BUG: cant get a free block");
+		msb->read_only = true;
+		return MS_BLOCK_INVALID;
+	}
+
+	msb_mark_block_used(msb, pba);
+	return pba;
+}
+
+static int msb_update_block(struct msb_data *msb, u16 lba,
+	struct scatterlist *sg, int offset)
+{
+	u16 pba, new_pba;
+	int error, try;
+
+	pba = msb->lba_to_pba_table[lba];
+	dbg_verbose("start of a block update at lba  %d, pba %d", lba, pba);
+
+	if (pba != MS_BLOCK_INVALID) {
+		dbg_verbose("setting the update flag on the block");
+		msb_set_overwrite_flag(msb, pba, 0,
+				0xFF & ~MEMSTICK_OVERWRITE_UDST);
+	}
+
+	for (try = 0; try < 3; try++) {
+		new_pba = msb_get_free_block(msb,
+			msb_get_zone_from_lba(lba));
+
+		if (new_pba == MS_BLOCK_INVALID) {
+			error = -EIO;
+			goto out;
+		}
+
+		dbg_verbose("block update: writing updated block to the pba %d",
+								new_pba);
+		error = msb_write_block(msb, new_pba, lba, sg, offset);
+		if (error == -EBADMSG) {
+			msb_mark_bad(msb, new_pba);
+			continue;
+		}
+
+		if (error)
+			goto out;
+
+		dbg_verbose("block update: erasing the old block");
+		msb_erase_block(msb, pba);
+		msb->lba_to_pba_table[lba] = new_pba;
+		return 0;
+	}
+out:
+	if (error) {
+		pr_err("block update error after %d tries,  switching to r/o mode", try);
+		msb->read_only = true;
+	}
+	return error;
+}
+
+/* Converts endiannes in the boot block for easy use */
+static void msb_fix_boot_page_endianness(struct ms_boot_page *p)
+{
+	p->header.block_id = be16_to_cpu(p->header.block_id);
+	p->header.format_reserved = be16_to_cpu(p->header.format_reserved);
+	p->entry.disabled_block.start_addr
+		= be32_to_cpu(p->entry.disabled_block.start_addr);
+	p->entry.disabled_block.data_size
+		= be32_to_cpu(p->entry.disabled_block.data_size);
+	p->entry.cis_idi.start_addr
+		= be32_to_cpu(p->entry.cis_idi.start_addr);
+	p->entry.cis_idi.data_size
+		= be32_to_cpu(p->entry.cis_idi.data_size);
+	p->attr.block_size = be16_to_cpu(p->attr.block_size);
+	p->attr.number_of_blocks = be16_to_cpu(p->attr.number_of_blocks);
+	p->attr.number_of_effective_blocks
+		= be16_to_cpu(p->attr.number_of_effective_blocks);
+	p->attr.page_size = be16_to_cpu(p->attr.page_size);
+	p->attr.memory_manufacturer_code
+		= be16_to_cpu(p->attr.memory_manufacturer_code);
+	p->attr.memory_device_code = be16_to_cpu(p->attr.memory_device_code);
+	p->attr.implemented_capacity
+		= be16_to_cpu(p->attr.implemented_capacity);
+	p->attr.controller_number = be16_to_cpu(p->attr.controller_number);
+	p->attr.controller_function = be16_to_cpu(p->attr.controller_function);
+}
+
+static int msb_read_boot_blocks(struct msb_data *msb)
+{
+	int pba = 0;
+	struct scatterlist sg;
+	struct ms_extra_data_register extra;
+	struct ms_boot_page *page;
+
+	msb->boot_block_locations[0] = MS_BLOCK_INVALID;
+	msb->boot_block_locations[1] = MS_BLOCK_INVALID;
+	msb->boot_block_count = 0;
+
+	dbg_verbose("Start of a scan for the boot blocks");
+
+	if (!msb->boot_page) {
+		page = kmalloc(sizeof(struct ms_boot_page)*2, GFP_KERNEL);
+		if (!page)
+			return -ENOMEM;
+
+		msb->boot_page = page;
+	} else
+		page = msb->boot_page;
+
+	msb->block_count = MS_BLOCK_MAX_BOOT_ADDR;
+
+	for (pba = 0; pba < MS_BLOCK_MAX_BOOT_ADDR; pba++) {
+
+		sg_init_one(&sg, page, sizeof(*page));
+		if (msb_read_page(msb, pba, 0, &extra, &sg, 0)) {
+			dbg("boot scan: can't read pba %d", pba);
+			continue;
+		}
+
+		if (extra.management_flag & MEMSTICK_MANAGEMENT_SYSFLG) {
+			dbg("managment flag doesn't indicate boot block %d",
+									pba);
+			continue;
+		}
+
+		if (be16_to_cpu(page->header.block_id) != MS_BLOCK_BOOT_ID) {
+			dbg("the pba at %d doesn' contain boot block ID", pba);
+			continue;
+		}
+
+		msb_fix_boot_page_endianness(page);
+		msb->boot_block_locations[msb->boot_block_count] = pba;
+
+		page++;
+		msb->boot_block_count++;
+
+		if (msb->boot_block_count == 2)
+			break;
+	}
+
+	if (!msb->boot_block_count) {
+		pr_err("media doesn't contain master page, aborting");
+		return -EIO;
+	}
+
+	dbg_verbose("End of scan for boot blocks");
+	return 0;
+}
+
+static int msb_read_bad_block_table(struct msb_data *msb, int block_nr)
+{
+	struct ms_boot_page *boot_block;
+	struct scatterlist sg;
+	u16 *buffer = NULL;
+	int offset = 0;
+	int i, error = 0;
+	int data_size, data_offset, page, page_offset, size_to_read;
+	u16 pba;
+
+	BUG_ON(block_nr > 1);
+	boot_block = &msb->boot_page[block_nr];
+	pba = msb->boot_block_locations[block_nr];
+
+	if (msb->boot_block_locations[block_nr] == MS_BLOCK_INVALID)
+		return -EINVAL;
+
+	data_size = boot_block->entry.disabled_block.data_size;
+	data_offset = sizeof(struct ms_boot_page) +
+			boot_block->entry.disabled_block.start_addr;
+	if (!data_size)
+		return 0;
+
+	page = data_offset / msb->page_size;
+	page_offset = data_offset % msb->page_size;
+	size_to_read =
+		DIV_ROUND_UP(data_size + page_offset, msb->page_size) *
+			msb->page_size;
+
+	dbg("reading bad block of boot block at pba %d, offset %d len %d",
+		pba, data_offset, data_size);
+
+	buffer = kzalloc(size_to_read, GFP_KERNEL);
+	if (!buffer)
+		return -ENOMEM;
+
+	/* Read the buffer */
+	sg_init_one(&sg, buffer, size_to_read);
+
+	while (offset < size_to_read) {
+		error = msb_read_page(msb, pba, page, NULL, &sg, offset);
+		if (error)
+			goto out;
+
+		page++;
+		offset += msb->page_size;
+
+		if (page == msb->pages_in_block) {
+			pr_err(
+			"bad block table extends beyond the boot block");
+			break;
+		}
+	}
+
+	/* Process the bad block table */
+	for (i = page_offset; i < data_size / sizeof(u16); i++) {
+
+		u16 bad_block = be16_to_cpu(buffer[i]);
+
+		if (bad_block >= msb->block_count) {
+			dbg("bad block table contains invalid block %d",
+								bad_block);
+			continue;
+		}
+
+		if (test_bit(bad_block, msb->used_blocks_bitmap))  {
+			dbg("duplicate bad block %d in the table",
+				bad_block);
+			continue;
+		}
+
+		dbg("block %d is marked as factory bad", bad_block);
+		msb_mark_block_used(msb, bad_block);
+	}
+out:
+	kfree(buffer);
+	return error;
+}
+
+static int msb_ftl_initialize(struct msb_data *msb)
+{
+	int i;
+
+	if (msb->ftl_initialized)
+		return 0;
+
+	msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE;
+	msb->logical_block_count = msb->zone_count * 496 - 2;
+
+	msb->used_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
+	msb->erased_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
+	msb->lba_to_pba_table =
+		kmalloc(msb->logical_block_count * sizeof(u16), GFP_KERNEL);
+
+	if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table ||
+						!msb->erased_blocks_bitmap) {
+		kfree(msb->used_blocks_bitmap);
+		kfree(msb->lba_to_pba_table);
+		kfree(msb->erased_blocks_bitmap);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < msb->zone_count; i++)
+		msb->free_block_count[i] = MS_BLOCKS_IN_ZONE;
+
+	memset(msb->lba_to_pba_table, MS_BLOCK_INVALID,
+			msb->logical_block_count * sizeof(u16));
+
+	dbg("initial FTL tables created. Zone count = %d, Logical block count = %d",
+		msb->zone_count, msb->logical_block_count);
+
+	msb->ftl_initialized = true;
+	return 0;
+}
+
+static int msb_ftl_scan(struct msb_data *msb)
+{
+	u16 pba, lba, other_block;
+	u8 overwrite_flag, managment_flag, other_overwrite_flag;
+	int error;
+	struct ms_extra_data_register extra;
+	u8 *overwrite_flags = kzalloc(msb->block_count, GFP_KERNEL);
+
+	if (!overwrite_flags)
+		return -ENOMEM;
+
+	dbg("Start of media scanning");
+	for (pba = 0; pba < msb->block_count; pba++) {
+
+		if (pba == msb->boot_block_locations[0] ||
+			pba == msb->boot_block_locations[1]) {
+			dbg_verbose("pba %05d -> [boot block]", pba);
+			msb_mark_block_used(msb, pba);
+			continue;
+		}
+
+		if (test_bit(pba, msb->used_blocks_bitmap)) {
+			dbg_verbose("pba %05d -> [factory bad]", pba);
+			continue;
+		}
+
+		memset(&extra, 0, sizeof(extra));
+		error = msb_read_oob(msb, pba, 0, &extra);
+
+		/* can't trust the page if we can't read the oob */
+		if (error == -EBADMSG) {
+			pr_notice(
+			"oob of pba %d damaged, will try to erase it", pba);
+			msb_mark_block_used(msb, pba);
+			msb_erase_block(msb, pba);
+			continue;
+		} else if (error) {
+			pr_err("unknown error %d on read of oob of pba %d - aborting",
+				error, pba);
+
+			kfree(overwrite_flags);
+			return error;
+		}
+
+		lba = be16_to_cpu(extra.logical_address);
+		managment_flag = extra.management_flag;
+		overwrite_flag = extra.overwrite_flag;
+		overwrite_flags[pba] = overwrite_flag;
+
+		/* Skip bad blocks */
+		if (!(overwrite_flag & MEMSTICK_OVERWRITE_BKST)) {
+			dbg("pba %05d -> [BAD]", pba);
+			msb_mark_block_used(msb, pba);
+			continue;
+		}
+
+		/* Skip system/drm blocks */
+		if ((managment_flag & MEMSTICK_MANAGMENT_FLAG_NORMAL) !=
+			MEMSTICK_MANAGMENT_FLAG_NORMAL) {
+			dbg("pba %05d -> [reserved managment flag %02x]",
+							pba, managment_flag);
+			msb_mark_block_used(msb, pba);
+			continue;
+		}
+
+		/* Erase temporary tables */
+		if (!(managment_flag & MEMSTICK_MANAGEMENT_ATFLG)) {
+			dbg("pba %05d -> [temp table] - will erase", pba);
+
+			msb_mark_block_used(msb, pba);
+			msb_erase_block(msb, pba);
+			continue;
+		}
+
+		if (lba == MS_BLOCK_INVALID) {
+			dbg_verbose("pba %05d -> [free]", pba);
+			continue;
+		}
+
+		msb_mark_block_used(msb, pba);
+
+		/* Block has LBA not according to zoning*/
+		if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
+			pr_notice("pba %05d -> [bad lba %05d] - will erase",
+								pba, lba);
+			msb_erase_block(msb, pba);
+			continue;
+		}
+
+		/* No collisions - great */
+		if (msb->lba_to_pba_table[lba] == MS_BLOCK_INVALID) {
+			dbg_verbose("pba %05d -> [lba %05d]", pba, lba);
+			msb->lba_to_pba_table[lba] = pba;
+			continue;
+		}
+
+		other_block = msb->lba_to_pba_table[lba];
+		other_overwrite_flag = overwrite_flags[other_block];
+
+		pr_notice("Collision between pba %d and pba %d",
+			pba, other_block);
+
+		if (!(overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
+			pr_notice("pba %d is marked as stable, use it", pba);
+			msb_erase_block(msb, other_block);
+			msb->lba_to_pba_table[lba] = pba;
+			continue;
+		}
+
+		if (!(other_overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
+			pr_notice("pba %d is marked as stable, use it",
+								other_block);
+			msb_erase_block(msb, pba);
+			continue;
+		}
+
+		pr_notice("collision between blocks %d and %d, without stable flag set on both, erasing pba %d",
+				pba, other_block, other_block);
+
+		msb_erase_block(msb, other_block);
+		msb->lba_to_pba_table[lba] = pba;
+	}
+
+	dbg("End of media scanning");
+	kfree(overwrite_flags);
+	return 0;
+}
+
+static void msb_cache_flush_timer(unsigned long data)
+{
+	struct msb_data *msb = (struct msb_data *)data;
+	msb->need_flush_cache = true;
+	queue_work(msb->io_queue, &msb->io_work);
+}
+
+
+static void msb_cache_discard(struct msb_data *msb)
+{
+	if (msb->cache_block_lba == MS_BLOCK_INVALID)
+		return;
+
+	del_timer_sync(&msb->cache_flush_timer);
+
+	dbg_verbose("Discarding the write cache");
+	msb->cache_block_lba = MS_BLOCK_INVALID;
+	bitmap_zero(&msb->valid_cache_bitmap, msb->pages_in_block);
+}
+
+static int msb_cache_init(struct msb_data *msb)
+{
+	setup_timer(&msb->cache_flush_timer, msb_cache_flush_timer,
+		(unsigned long)msb);
+
+	if (!msb->cache)
+		msb->cache = kzalloc(msb->block_size, GFP_KERNEL);
+	if (!msb->cache)
+		return -ENOMEM;
+
+	msb_cache_discard(msb);
+	return 0;
+}
+
+static int msb_cache_flush(struct msb_data *msb)
+{
+	struct scatterlist sg;
+	struct ms_extra_data_register extra;
+	int page, offset, error;
+	u16 pba, lba;
+
+	if (msb->read_only)
+		return -EROFS;
+
+	if (msb->cache_block_lba == MS_BLOCK_INVALID)
+		return 0;
+
+	lba = msb->cache_block_lba;
+	pba = msb->lba_to_pba_table[lba];
+
+	dbg_verbose("Flushing the write cache of pba %d (LBA %d)",
+						pba, msb->cache_block_lba);
+
+	sg_init_one(&sg, msb->cache , msb->block_size);
+
+	/* Read all missing pages in cache */
+	for (page = 0; page < msb->pages_in_block; page++) {
+
+		if (test_bit(page, &msb->valid_cache_bitmap))
+			continue;
+
+		offset = page * msb->page_size;
+
+		dbg_verbose("reading non-present sector %d of cache block %d",
+			page, lba);
+		error = msb_read_page(msb, pba, page, &extra, &sg, offset);
+
+		/* Bad pages are copied with 00 page status */
+		if (error == -EBADMSG) {
+			pr_err("read error on sector %d, contents probably damaged", page);
+			continue;
+		}
+
+		if (error)
+			return error;
+
+		if ((extra.overwrite_flag & MEMSTICK_OV_PG_NORMAL) !=
+							MEMSTICK_OV_PG_NORMAL) {
+			dbg("page %d is marked as bad", page);
+			continue;
+		}
+
+		set_bit(page, &msb->valid_cache_bitmap);
+	}
+
+	/* Write the cache now */
+	error = msb_update_block(msb, msb->cache_block_lba, &sg, 0);
+	pba = msb->lba_to_pba_table[msb->cache_block_lba];
+
+	/* Mark invalid pages */
+	if (!error) {
+		for (page = 0; page < msb->pages_in_block; page++) {
+
+			if (test_bit(page, &msb->valid_cache_bitmap))
+				continue;
+
+			dbg("marking page %d as containing damaged data",
+				page);
+			msb_set_overwrite_flag(msb,
+				pba , page, 0xFF & ~MEMSTICK_OV_PG_NORMAL);
+		}
+	}
+
+	msb_cache_discard(msb);
+	return error;
+}
+
+static int msb_cache_write(struct msb_data *msb, int lba,
+	int page, bool add_to_cache_only, struct scatterlist *sg, int offset)
+{
+	int error;
+	struct scatterlist sg_tmp[10];
+
+	if (msb->read_only)
+		return -EROFS;
+
+	if (msb->cache_block_lba == MS_BLOCK_INVALID ||
+						lba != msb->cache_block_lba)
+		if (add_to_cache_only)
+			return 0;
+
+	/* If we need to write different block */
+	if (msb->cache_block_lba != MS_BLOCK_INVALID &&
+						lba != msb->cache_block_lba) {
+		dbg_verbose("first flush the cache");
+		error = msb_cache_flush(msb);
+		if (error)
+			return error;
+	}
+
+	if (msb->cache_block_lba  == MS_BLOCK_INVALID) {
+		msb->cache_block_lba  = lba;
+		mod_timer(&msb->cache_flush_timer,
+			jiffies + msecs_to_jiffies(cache_flush_timeout));
+	}
+
+	dbg_verbose("Write of LBA %d page %d to cache ", lba, page);
+
+	sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
+	msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp), offset, msb->page_size);
+
+	sg_copy_to_buffer(sg_tmp, sg_nents(sg_tmp),
+		msb->cache + page * msb->page_size, msb->page_size);
+
+	set_bit(page, &msb->valid_cache_bitmap);
+	return 0;
+}
+
+static int msb_cache_read(struct msb_data *msb, int lba,
+				int page, struct scatterlist *sg, int offset)
+{
+	int pba = msb->lba_to_pba_table[lba];
+	struct scatterlist sg_tmp[10];
+	int error = 0;
+
+	if (lba == msb->cache_block_lba &&
+			test_bit(page, &msb->valid_cache_bitmap)) {
+
+		dbg_verbose("Read of LBA %d (pba %d) sector %d from cache",
+							lba, pba, page);
+
+		sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
+		msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp),
+			offset, msb->page_size);
+		sg_copy_from_buffer(sg_tmp, sg_nents(sg_tmp),
+			msb->cache + msb->page_size * page,
+							msb->page_size);
+	} else {
+		dbg_verbose("Read of LBA %d (pba %d) sector %d from device",
+							lba, pba, page);
+
+		error = msb_read_page(msb, pba, page, NULL, sg, offset);
+		if (error)
+			return error;
+
+		msb_cache_write(msb, lba, page, true, sg, offset);
+	}
+	return error;
+}
+
+/* Emulated geometry table
+ * This table content isn't that importaint,
+ * One could put here different values, providing that they still
+ * cover whole disk.
+ * 64 MB entry is what windows reports for my 64M memstick */
+
+static const struct chs_entry chs_table[] = {
+/*        size sectors cylynders  heads */
+	{ 4,    16,    247,       2  },
+	{ 8,    16,    495,       2  },
+	{ 16,   16,    495,       4  },
+	{ 32,   16,    991,       4  },
+	{ 64,   16,    991,       8  },
+	{128,   16,    991,       16 },
+	{ 0 }
+};
+
+/* Load information about the card */
+static int msb_init_card(struct memstick_dev *card)
+{
+	struct msb_data *msb = memstick_get_drvdata(card);
+	struct memstick_host *host = card->host;
+	struct ms_boot_page *boot_block;
+	int error = 0, i, raw_size_in_megs;
+
+	msb->caps = 0;
+
+	if (card->id.class >= MEMSTICK_CLASS_ROM &&
+				card->id.class <= MEMSTICK_CLASS_ROM)
+		msb->read_only = true;
+
+	msb->state = -1;
+	error = msb_reset(msb, false);
+	if (error)
+		return error;
+
+	/* Due to a bug in Jmicron driver written by Alex Dubov,
+	 its serial mode barely works,
+	 so we switch to parallel mode right away */
+	if (host->caps & MEMSTICK_CAP_PAR4)
+		msb_switch_to_parallel(msb);
+
+	msb->page_size = sizeof(struct ms_boot_page);
+
+	/* Read the boot page */
+	error = msb_read_boot_blocks(msb);
+	if (error)
+		return -EIO;
+
+	boot_block = &msb->boot_page[0];
+
+	/* Save intersting attributes from boot page */
+	msb->block_count = boot_block->attr.number_of_blocks;
+	msb->page_size = boot_block->attr.page_size;
+
+	msb->pages_in_block = boot_block->attr.block_size * 2;
+	msb->block_size = msb->page_size * msb->pages_in_block;
+
+	if (msb->page_size > PAGE_SIZE) {
+		/* this isn't supported by linux at all, anyway*/
+		dbg("device page %d size isn't supported", msb->page_size);
+		return -EINVAL;
+	}
+
+	msb->block_buffer = kzalloc(msb->block_size, GFP_KERNEL);
+	if (!msb->block_buffer)
+		return -ENOMEM;
+
+	raw_size_in_megs = (msb->block_size * msb->block_count) >> 20;
+
+	for (i = 0; chs_table[i].size; i++) {
+
+		if (chs_table[i].size != raw_size_in_megs)
+			continue;
+
+		msb->geometry.cylinders = chs_table[i].cyl;
+		msb->geometry.heads = chs_table[i].head;
+		msb->geometry.sectors = chs_table[i].sec;
+		break;
+	}
+
+	if (boot_block->attr.transfer_supporting == 1)
+		msb->caps |= MEMSTICK_CAP_PAR4;
+
+	if (boot_block->attr.device_type & 0x03)
+		msb->read_only = true;
+
+	dbg("Total block count = %d", msb->block_count);
+	dbg("Each block consists of %d pages", msb->pages_in_block);
+	dbg("Page size = %d bytes", msb->page_size);
+	dbg("Parallel mode supported: %d", !!(msb->caps & MEMSTICK_CAP_PAR4));
+	dbg("Read only: %d", msb->read_only);
+
+#if 0
+	/* Now we can switch the interface */
+	if (host->caps & msb->caps & MEMSTICK_CAP_PAR4)
+		msb_switch_to_parallel(msb);
+#endif
+
+	error = msb_cache_init(msb);
+	if (error)
+		return error;
+
+	error = msb_ftl_initialize(msb);
+	if (error)
+		return error;
+
+
+	/* Read the bad block table */
+	error = msb_read_bad_block_table(msb, 0);
+
+	if (error && error != -ENOMEM) {
+		dbg("failed to read bad block table from primary boot block, trying from backup");
+		error = msb_read_bad_block_table(msb, 1);
+	}
+
+	if (error)
+		return error;
+
+	/* *drum roll* Scan the media */
+	error = msb_ftl_scan(msb);
+	if (error) {
+		pr_err("Scan of media failed");
+		return error;
+	}
+
+	return 0;
+
+}
+
+static int msb_do_write_request(struct msb_data *msb, int lba,
+	int page, struct scatterlist *sg, size_t len, int *sucessfuly_written)
+{
+	int error = 0;
+	off_t offset = 0;
+	*sucessfuly_written = 0;
+
+	while (offset < len) {
+		if (page == 0 && len - offset >= msb->block_size) {
+
+			if (msb->cache_block_lba == lba)
+				msb_cache_discard(msb);
+
+			dbg_verbose("Writing whole lba %d", lba);
+			error = msb_update_block(msb, lba, sg, offset);
+			if (error)
+				return error;
+
+			offset += msb->block_size;
+			*sucessfuly_written += msb->block_size;
+			lba++;
+			continue;
+		}
+
+		error = msb_cache_write(msb, lba, page, false, sg, offset);
+		if (error)
+			return error;
+
+		offset += msb->page_size;
+		*sucessfuly_written += msb->page_size;
+
+		page++;
+		if (page == msb->pages_in_block) {
+			page = 0;
+			lba++;
+		}
+	}
+	return 0;
+}
+
+static int msb_do_read_request(struct msb_data *msb, int lba,
+		int page, struct scatterlist *sg, int len, int *sucessfuly_read)
+{
+	int error = 0;
+	int offset = 0;
+	*sucessfuly_read = 0;
+
+	while (offset < len) {
+
+		error = msb_cache_read(msb, lba, page, sg, offset);
+		if (error)
+			return error;
+
+		offset += msb->page_size;
+		*sucessfuly_read += msb->page_size;
+
+		page++;
+		if (page == msb->pages_in_block) {
+			page = 0;
+			lba++;
+		}
+	}
+	return 0;
+}
+
+static void msb_io_work(struct work_struct *work)
+{
+	struct msb_data *msb = container_of(work, struct msb_data, io_work);
+	int page, error, len;
+	sector_t lba;
+	unsigned long flags;
+	struct scatterlist *sg = msb->prealloc_sg;
+
+	dbg_verbose("IO: work started");
+
+	while (1) {
+		spin_lock_irqsave(&msb->q_lock, flags);
+
+		if (msb->need_flush_cache) {
+			msb->need_flush_cache = false;
+			spin_unlock_irqrestore(&msb->q_lock, flags);
+			msb_cache_flush(msb);
+			continue;
+		}
+
+		if (!msb->req) {
+			msb->req = blk_fetch_request(msb->queue);
+			if (!msb->req) {
+				dbg_verbose("IO: no more requests exiting");
+				spin_unlock_irqrestore(&msb->q_lock, flags);
+				return;
+			}
+		}
+
+		spin_unlock_irqrestore(&msb->q_lock, flags);
+
+		/* If card was removed meanwhile */
+		if (!msb->req)
+			return;
+
+		/* process the request */
+		dbg_verbose("IO: processing new request");
+		blk_rq_map_sg(msb->queue, msb->req, sg);
+
+		lba = blk_rq_pos(msb->req);
+
+		sector_div(lba, msb->page_size / 512);
+		page = do_div(lba, msb->pages_in_block);
+
+		if (rq_data_dir(msb->req) == READ)
+			error = msb_do_read_request(msb, lba, page, sg,
+				blk_rq_bytes(msb->req), &len);
+		else
+			error = msb_do_write_request(msb, lba, page, sg,
+				blk_rq_bytes(msb->req), &len);
+
+		spin_lock_irqsave(&msb->q_lock, flags);
+
+		if (len)
+			if (!__blk_end_request(msb->req, 0, len))
+				msb->req = NULL;
+
+		if (error && msb->req) {
+			dbg_verbose("IO: ending one sector of the request with error");
+			if (!__blk_end_request(msb->req, error, msb->page_size))
+				msb->req = NULL;
+		}
+
+		if (msb->req)
+			dbg_verbose("IO: request still pending");
+
+		spin_unlock_irqrestore(&msb->q_lock, flags);
+	}
+}
+
+static DEFINE_IDR(msb_disk_idr); /*set of used disk numbers */
+static DEFINE_MUTEX(msb_disk_lock); /* protects against races in open/release */
+
+static int msb_bd_open(struct block_device *bdev, fmode_t mode)
+{
+	struct gendisk *disk = bdev->bd_disk;
+	struct msb_data *msb = disk->private_data;
+
+	dbg_verbose("block device open");
+
+	mutex_lock(&msb_disk_lock);
+
+	if (msb && msb->card)
+		msb->usage_count++;
+
+	mutex_unlock(&msb_disk_lock);
+	return 0;
+}
+
+static void msb_data_clear(struct msb_data *msb)
+{
+	kfree(msb->boot_page);
+	kfree(msb->used_blocks_bitmap);
+	kfree(msb->lba_to_pba_table);
+	kfree(msb->cache);
+	msb->card = NULL;
+}
+
+static int msb_disk_release(struct gendisk *disk)
+{
+	struct msb_data *msb = disk->private_data;
+
+	dbg_verbose("block device release");
+	mutex_lock(&msb_disk_lock);
+
+	if (msb) {
+		if (msb->usage_count)
+			msb->usage_count--;
+
+		if (!msb->usage_count) {
+			disk->private_data = NULL;
+			idr_remove(&msb_disk_idr, msb->disk_id);
+			put_disk(disk);
+			kfree(msb);
+		}
+	}
+	mutex_unlock(&msb_disk_lock);
+	return 0;
+}
+
+static void msb_bd_release(struct gendisk *disk, fmode_t mode)
+{
+	msb_disk_release(disk);
+}
+
+static int msb_bd_getgeo(struct block_device *bdev,
+				 struct hd_geometry *geo)
+{
+	struct msb_data *msb = bdev->bd_disk->private_data;
+	*geo = msb->geometry;
+	return 0;
+}
+
+static int msb_prepare_req(struct request_queue *q, struct request *req)
+{
+	if (req->cmd_type != REQ_TYPE_FS &&
+				req->cmd_type != REQ_TYPE_BLOCK_PC) {
+		blk_dump_rq_flags(req, "MS unsupported request");
+		return BLKPREP_KILL;
+	}
+	req->cmd_flags |= REQ_DONTPREP;
+	return BLKPREP_OK;
+}
+
+static void msb_submit_req(struct request_queue *q)
+{
+	struct memstick_dev *card = q->queuedata;
+	struct msb_data *msb = memstick_get_drvdata(card);
+	struct request *req = NULL;
+
+	dbg_verbose("Submit request");
+
+	if (msb->card_dead) {
+		dbg("Refusing requests on removed card");
+
+		WARN_ON(!msb->io_queue_stopped);
+
+		while ((req = blk_fetch_request(q)) != NULL)
+			__blk_end_request_all(req, -ENODEV);
+		return;
+	}
+
+	if (msb->req)
+		return;
+
+	if (!msb->io_queue_stopped)
+		queue_work(msb->io_queue, &msb->io_work);
+}
+
+static int msb_check_card(struct memstick_dev *card)
+{
+	struct msb_data *msb = memstick_get_drvdata(card);
+	return (msb->card_dead == 0);
+}
+
+static void msb_stop(struct memstick_dev *card)
+{
+	struct msb_data *msb = memstick_get_drvdata(card);
+	unsigned long flags;
+
+	dbg("Stopping all msblock IO");
+
+	spin_lock_irqsave(&msb->q_lock, flags);
+	blk_stop_queue(msb->queue);
+	msb->io_queue_stopped = true;
+	spin_unlock_irqrestore(&msb->q_lock, flags);
+
+	del_timer_sync(&msb->cache_flush_timer);
+	flush_workqueue(msb->io_queue);
+
+	if (msb->req) {
+		spin_lock_irqsave(&msb->q_lock, flags);
+		blk_requeue_request(msb->queue, msb->req);
+		msb->req = NULL;
+		spin_unlock_irqrestore(&msb->q_lock, flags);
+	}
+
+}
+
+static void msb_start(struct memstick_dev *card)
+{
+	struct msb_data *msb = memstick_get_drvdata(card);
+	unsigned long flags;
+
+	dbg("Resuming IO from msblock");
+
+	msb_invalidate_reg_window(msb);
+
+	spin_lock_irqsave(&msb->q_lock, flags);
+	if (!msb->io_queue_stopped || msb->card_dead) {
+		spin_unlock_irqrestore(&msb->q_lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(&msb->q_lock, flags);
+
+	/* Kick cache flush anyway, its harmless */
+	msb->need_flush_cache = true;
+	msb->io_queue_stopped = false;
+
+	spin_lock_irqsave(&msb->q_lock, flags);
+	blk_start_queue(msb->queue);
+	spin_unlock_irqrestore(&msb->q_lock, flags);
+
+	queue_work(msb->io_queue, &msb->io_work);
+
+}
+
+static const struct block_device_operations msb_bdops = {
+	.open    = msb_bd_open,
+	.release = msb_bd_release,
+	.getgeo  = msb_bd_getgeo,
+	.owner   = THIS_MODULE
+};
+
+/* Registers the block device */
+static int msb_init_disk(struct memstick_dev *card)
+{
+	struct msb_data *msb = memstick_get_drvdata(card);
+	struct memstick_host *host = card->host;
+	int rc;
+	u64 limit = BLK_BOUNCE_HIGH;
+	unsigned long capacity;
+
+	if (host->dev.dma_mask && *(host->dev.dma_mask))
+		limit = *(host->dev.dma_mask);
+
+	mutex_lock(&msb_disk_lock);
+	msb->disk_id = idr_alloc(&msb_disk_idr, card, 0, 256, GFP_KERNEL);
+	mutex_unlock(&msb_disk_lock);
+
+	if (msb->disk_id  < 0)
+		return msb->disk_id;
+
+	msb->disk = alloc_disk(0);
+	if (!msb->disk) {
+		rc = -ENOMEM;
+		goto out_release_id;
+	}
+
+	msb->queue = blk_init_queue(msb_submit_req, &msb->q_lock);
+	if (!msb->queue) {
+		rc = -ENOMEM;
+		goto out_put_disk;
+	}
+
+	msb->queue->queuedata = card;
+	blk_queue_prep_rq(msb->queue, msb_prepare_req);
+
+	blk_queue_bounce_limit(msb->queue, limit);
+	blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES);
+	blk_queue_max_segments(msb->queue, MS_BLOCK_MAX_SEGS);
+	blk_queue_max_segment_size(msb->queue,
+				   MS_BLOCK_MAX_PAGES * msb->page_size);
+	blk_queue_logical_block_size(msb->queue, msb->page_size);
+
+	sprintf(msb->disk->disk_name, "msblk%d", msb->disk_id);
+	msb->disk->fops = &msb_bdops;
+	msb->disk->private_data = msb;
+	msb->disk->queue = msb->queue;
+	msb->disk->driverfs_dev = &card->dev;
+	msb->disk->flags |= GENHD_FL_EXT_DEVT;
+
+	capacity = msb->pages_in_block * msb->logical_block_count;
+	capacity *= (msb->page_size / 512);
+	set_capacity(msb->disk, capacity);
+	dbg("Set total disk size to %lu sectors", capacity);
+
+	msb->usage_count = 1;
+	msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM);
+	INIT_WORK(&msb->io_work, msb_io_work);
+	sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
+
+	if (msb->read_only)
+		set_disk_ro(msb->disk, 1);
+
+	msb_start(card);
+	add_disk(msb->disk);
+	dbg("Disk added");
+	return 0;
+
+out_put_disk:
+	put_disk(msb->disk);
+out_release_id:
+	mutex_lock(&msb_disk_lock);
+	idr_remove(&msb_disk_idr, msb->disk_id);
+	mutex_unlock(&msb_disk_lock);
+	return rc;
+}
+
+static int msb_probe(struct memstick_dev *card)
+{
+	struct msb_data *msb;
+	int rc = 0;
+
+	msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
+	if (!msb)
+		return -ENOMEM;
+	memstick_set_drvdata(card, msb);
+	msb->card = card;
+	spin_lock_init(&msb->q_lock);
+
+	rc = msb_init_card(card);
+	if (rc)
+		goto out_free;
+
+	rc = msb_init_disk(card);
+	if (!rc) {
+		card->check = msb_check_card;
+		card->stop = msb_stop;
+		card->start = msb_start;
+		return 0;
+	}
+out_free:
+	memstick_set_drvdata(card, NULL);
+	msb_data_clear(msb);
+	kfree(msb);
+	return rc;
+}
+
+static void msb_remove(struct memstick_dev *card)
+{
+	struct msb_data *msb = memstick_get_drvdata(card);
+	unsigned long flags;
+
+	if (!msb->io_queue_stopped)
+		msb_stop(card);
+
+	dbg("Removing the disk device");
+
+	/* Take care of unhandled + new requests from now on */
+	spin_lock_irqsave(&msb->q_lock, flags);
+	msb->card_dead = true;
+	blk_start_queue(msb->queue);
+	spin_unlock_irqrestore(&msb->q_lock, flags);
+
+	/* Remove the disk */
+	del_gendisk(msb->disk);
+	blk_cleanup_queue(msb->queue);
+	msb->queue = NULL;
+
+	mutex_lock(&msb_disk_lock);
+	msb_data_clear(msb);
+	mutex_unlock(&msb_disk_lock);
+
+	msb_disk_release(msb->disk);
+	memstick_set_drvdata(card, NULL);
+}
+
+#ifdef CONFIG_PM
+
+static int msb_suspend(struct memstick_dev *card, pm_message_t state)
+{
+	msb_stop(card);
+	return 0;
+}
+
+static int msb_resume(struct memstick_dev *card)
+{
+	struct msb_data *msb = memstick_get_drvdata(card);
+	struct msb_data *new_msb = NULL;
+	bool card_dead = true;
+
+#ifndef CONFIG_MEMSTICK_UNSAFE_RESUME
+	msb->card_dead = true;
+	return 0;
+#endif
+	mutex_lock(&card->host->lock);
+
+	new_msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
+	if (!new_msb)
+		goto out;
+
+	new_msb->card = card;
+	memstick_set_drvdata(card, new_msb);
+	spin_lock_init(&new_msb->q_lock);
+	sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
+
+	if (msb_init_card(card))
+		goto out;
+
+	if (msb->block_size != new_msb->block_size)
+		goto out;
+
+	if (memcmp(msb->boot_page, new_msb->boot_page,
+					sizeof(struct ms_boot_page)))
+		goto out;
+
+	if (msb->logical_block_count != new_msb->logical_block_count ||
+		memcmp(msb->lba_to_pba_table, new_msb->lba_to_pba_table,
+						msb->logical_block_count))
+		goto out;
+
+	if (msb->block_count != new_msb->block_count ||
+		memcmp(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap,
+							msb->block_count / 8))
+		goto out;
+
+	card_dead = false;
+out:
+	if (card_dead)
+		dbg("Card was removed/replaced during suspend");
+
+	msb->card_dead = card_dead;
+	memstick_set_drvdata(card, msb);
+
+	if (new_msb) {
+		msb_data_clear(new_msb);
+		kfree(new_msb);
+	}
+
+	msb_start(card);
+	mutex_unlock(&card->host->lock);
+	return 0;
+}
+#else
+
+#define msb_suspend NULL
+#define msb_resume NULL
+
+#endif /* CONFIG_PM */
+
+static struct memstick_device_id msb_id_tbl[] = {
+	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
+	 MEMSTICK_CLASS_FLASH},
+
+	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
+	 MEMSTICK_CLASS_ROM},
+
+	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
+	 MEMSTICK_CLASS_RO},
+
+	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
+	 MEMSTICK_CLASS_WP},
+
+	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_DUO, MEMSTICK_CATEGORY_STORAGE_DUO,
+	 MEMSTICK_CLASS_DUO},
+	{}
+};
+MODULE_DEVICE_TABLE(memstick, msb_id_tbl);
+
+
+static struct memstick_driver msb_driver = {
+	.driver = {
+		.name  = DRIVER_NAME,
+		.owner = THIS_MODULE
+	},
+	.id_table = msb_id_tbl,
+	.probe    = msb_probe,
+	.remove   = msb_remove,
+	.suspend  = msb_suspend,
+	.resume   = msb_resume
+};
+
+static int major;
+
+static int __init msb_init(void)
+{
+	int rc = register_blkdev(0, DRIVER_NAME);
+
+	if (rc < 0) {
+		pr_err("failed to register major (error %d)\n", rc);
+		return rc;
+	}
+
+	major = rc;
+	rc = memstick_register_driver(&msb_driver);
+	if (rc) {
+		unregister_blkdev(major, DRIVER_NAME);
+		pr_err("failed to register memstick driver (error %d)\n", rc);
+	}
+
+	return rc;
+}
+
+static void __exit msb_exit(void)
+{
+	memstick_unregister_driver(&msb_driver);
+	unregister_blkdev(major, DRIVER_NAME);
+	idr_destroy(&msb_disk_idr);
+}
+
+module_init(msb_init);
+module_exit(msb_exit);
+
+module_param(cache_flush_timeout, int, S_IRUGO);
+MODULE_PARM_DESC(cache_flush_timeout,
+				"Cache flush timeout in msec (1000 default)");
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug level (0-2)");
+
+module_param(verify_writes, bool, S_IRUGO);
+MODULE_PARM_DESC(verify_writes, "Read back and check all data that is written");
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxim Levitsky");
+MODULE_DESCRIPTION("Sony MemoryStick block device driver");

+ 290 - 0
drivers/memstick/core/ms_block.h

@@ -0,0 +1,290 @@
+/*
+ *  ms_block.h - Sony MemoryStick (legacy) storage support
+
+ *  Copyright (C) 2013 Maxim Levitsky <maximlevitsky@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Minor portions of the driver are copied from mspro_block.c which is
+ * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
+ *
+ * Also ms structures were copied from old broken driver by same author
+ * These probably come from MS spec
+ *
+ */
+
+#ifndef MS_BLOCK_NEW_H
+#define MS_BLOCK_NEW_H
+
+#define MS_BLOCK_MAX_SEGS      32
+#define MS_BLOCK_MAX_PAGES     ((2 << 16) - 1)
+
+#define MS_BLOCK_MAX_BOOT_ADDR 0x000c
+#define MS_BLOCK_BOOT_ID       0x0001
+#define MS_BLOCK_INVALID       0xffff
+#define MS_MAX_ZONES           16
+#define MS_BLOCKS_IN_ZONE      512
+
+#define MS_BLOCK_MAP_LINE_SZ   16
+#define MS_BLOCK_PART_SHIFT    3
+
+
+#define MEMSTICK_UNCORR_ERROR (MEMSTICK_STATUS1_UCFG | \
+		MEMSTICK_STATUS1_UCEX | MEMSTICK_STATUS1_UCDT)
+
+#define MEMSTICK_CORR_ERROR (MEMSTICK_STATUS1_FGER | MEMSTICK_STATUS1_EXER | \
+	MEMSTICK_STATUS1_DTER)
+
+#define MEMSTICK_INT_ERROR (MEMSTICK_INT_CMDNAK | MEMSTICK_INT_ERR)
+
+#define MEMSTICK_OVERWRITE_FLAG_NORMAL \
+	(MEMSTICK_OVERWRITE_PGST1 | \
+	MEMSTICK_OVERWRITE_PGST0  | \
+	MEMSTICK_OVERWRITE_BKST)
+
+#define MEMSTICK_OV_PG_NORMAL \
+	(MEMSTICK_OVERWRITE_PGST1 | MEMSTICK_OVERWRITE_PGST0)
+
+#define MEMSTICK_MANAGMENT_FLAG_NORMAL \
+	(MEMSTICK_MANAGEMENT_SYSFLG |  \
+	MEMSTICK_MANAGEMENT_SCMS1   |  \
+	MEMSTICK_MANAGEMENT_SCMS0)     \
+
+struct ms_boot_header {
+	unsigned short block_id;
+	unsigned short format_reserved;
+	unsigned char  reserved0[184];
+	unsigned char  data_entry;
+	unsigned char  reserved1[179];
+} __packed;
+
+
+struct ms_system_item {
+	unsigned int  start_addr;
+	unsigned int  data_size;
+	unsigned char data_type_id;
+	unsigned char reserved[3];
+} __packed;
+
+struct ms_system_entry {
+	struct ms_system_item disabled_block;
+	struct ms_system_item cis_idi;
+	unsigned char         reserved[24];
+} __packed;
+
+struct ms_boot_attr_info {
+	unsigned char      memorystick_class;
+	unsigned char      format_unique_value1;
+	unsigned short     block_size;
+	unsigned short     number_of_blocks;
+	unsigned short     number_of_effective_blocks;
+	unsigned short     page_size;
+	unsigned char      extra_data_size;
+	unsigned char      format_unique_value2;
+	unsigned char      assembly_time[8];
+	unsigned char      format_unique_value3;
+	unsigned char      serial_number[3];
+	unsigned char      assembly_manufacturer_code;
+	unsigned char      assembly_model_code[3];
+	unsigned short     memory_manufacturer_code;
+	unsigned short     memory_device_code;
+	unsigned short     implemented_capacity;
+	unsigned char      format_unique_value4[2];
+	unsigned char      vcc;
+	unsigned char      vpp;
+	unsigned short     controller_number;
+	unsigned short     controller_function;
+	unsigned char      reserved0[9];
+	unsigned char      transfer_supporting;
+	unsigned short     format_unique_value5;
+	unsigned char      format_type;
+	unsigned char      memorystick_application;
+	unsigned char      device_type;
+	unsigned char      reserved1[22];
+	unsigned char      format_uniqure_value6[2];
+	unsigned char      reserved2[15];
+} __packed;
+
+struct ms_cis_idi {
+	unsigned short general_config;
+	unsigned short logical_cylinders;
+	unsigned short reserved0;
+	unsigned short logical_heads;
+	unsigned short track_size;
+	unsigned short page_size;
+	unsigned short pages_per_track;
+	unsigned short msw;
+	unsigned short lsw;
+	unsigned short reserved1;
+	unsigned char  serial_number[20];
+	unsigned short buffer_type;
+	unsigned short buffer_size_increments;
+	unsigned short long_command_ecc;
+	unsigned char  firmware_version[28];
+	unsigned char  model_name[18];
+	unsigned short reserved2[5];
+	unsigned short pio_mode_number;
+	unsigned short dma_mode_number;
+	unsigned short field_validity;
+	unsigned short current_logical_cylinders;
+	unsigned short current_logical_heads;
+	unsigned short current_pages_per_track;
+	unsigned int   current_page_capacity;
+	unsigned short mutiple_page_setting;
+	unsigned int   addressable_pages;
+	unsigned short single_word_dma;
+	unsigned short multi_word_dma;
+	unsigned char  reserved3[128];
+} __packed;
+
+
+struct ms_boot_page {
+	struct ms_boot_header    header;
+	struct ms_system_entry   entry;
+	struct ms_boot_attr_info attr;
+} __packed;
+
+struct msb_data {
+	unsigned int			usage_count;
+	struct memstick_dev		*card;
+	struct gendisk			*disk;
+	struct request_queue		*queue;
+	spinlock_t			q_lock;
+	struct hd_geometry		geometry;
+	struct attribute_group		attr_group;
+	struct request			*req;
+	int				caps;
+	int				disk_id;
+
+	/* IO */
+	struct workqueue_struct		*io_queue;
+	bool				io_queue_stopped;
+	struct work_struct		io_work;
+	bool				card_dead;
+
+	/* Media properties */
+	struct ms_boot_page		*boot_page;
+	u16				boot_block_locations[2];
+	int				boot_block_count;
+
+	bool				read_only;
+	unsigned short			page_size;
+	int				block_size;
+	int				pages_in_block;
+	int				zone_count;
+	int				block_count;
+	int				logical_block_count;
+
+	/* FTL tables */
+	unsigned long			*used_blocks_bitmap;
+	unsigned long			*erased_blocks_bitmap;
+	u16				*lba_to_pba_table;
+	int				free_block_count[MS_MAX_ZONES];
+	bool				ftl_initialized;
+
+	/* Cache */
+	unsigned char			*cache;
+	unsigned long			valid_cache_bitmap;
+	int				cache_block_lba;
+	bool				need_flush_cache;
+	struct timer_list		cache_flush_timer;
+
+	/* Preallocated buffers */
+	unsigned char			*block_buffer;
+	struct scatterlist		prealloc_sg[MS_BLOCK_MAX_SEGS+1];
+
+
+	/* handler's local data */
+	struct ms_register_addr		reg_addr;
+	bool				addr_valid;
+
+	u8				command_value;
+	bool				command_need_oob;
+	struct scatterlist		*current_sg;
+	int				current_sg_offset;
+
+	struct ms_register		regs;
+	int				current_page;
+
+	int				state;
+	int				exit_error;
+	bool				int_polling;
+	unsigned long			int_timeout;
+
+};
+
+enum msb_readpage_states {
+	MSB_RP_SEND_BLOCK_ADDRESS = 0,
+	MSB_RP_SEND_READ_COMMAND,
+
+	MSB_RP_SEND_INT_REQ,
+	MSB_RP_RECEIVE_INT_REQ_RESULT,
+
+	MSB_RP_SEND_READ_STATUS_REG,
+	MSB_RP_RECIVE_STATUS_REG,
+
+	MSB_RP_SEND_OOB_READ,
+	MSB_RP_RECEIVE_OOB_READ,
+
+	MSB_RP_SEND_READ_DATA,
+	MSB_RP_RECEIVE_READ_DATA,
+};
+
+enum msb_write_block_states {
+	MSB_WB_SEND_WRITE_PARAMS = 0,
+	MSB_WB_SEND_WRITE_OOB,
+	MSB_WB_SEND_WRITE_COMMAND,
+
+	MSB_WB_SEND_INT_REQ,
+	MSB_WB_RECEIVE_INT_REQ,
+
+	MSB_WB_SEND_WRITE_DATA,
+	MSB_WB_RECEIVE_WRITE_CONFIRMATION,
+};
+
+enum msb_send_command_states {
+	MSB_SC_SEND_WRITE_PARAMS,
+	MSB_SC_SEND_WRITE_OOB,
+	MSB_SC_SEND_COMMAND,
+
+	MSB_SC_SEND_INT_REQ,
+	MSB_SC_RECEIVE_INT_REQ,
+
+};
+
+enum msb_reset_states {
+	MSB_RS_SEND,
+	MSB_RS_CONFIRM,
+};
+
+enum msb_par_switch_states {
+	MSB_PS_SEND_SWITCH_COMMAND,
+	MSB_PS_SWICH_HOST,
+	MSB_PS_CONFIRM,
+};
+
+struct chs_entry {
+	unsigned long size;
+	unsigned char sec;
+	unsigned short cyl;
+	unsigned char head;
+};
+
+static int msb_reset(struct msb_data *msb, bool full);
+
+static int h_msb_default_bad(struct memstick_dev *card,
+						struct memstick_request **mrq);
+
+#define __dbg(level, format, ...) \
+	do { \
+		if (debug >= level) \
+			pr_err(format "\n", ## __VA_ARGS__); \
+	} while (0)
+
+
+#define dbg(format, ...)		__dbg(1, format, ## __VA_ARGS__)
+#define dbg_verbose(format, ...)	__dbg(2, format, ## __VA_ARGS__)
+
+#endif

+ 0 - 2
drivers/memstick/host/rtsx_pci_ms.c

@@ -612,8 +612,6 @@ static int rtsx_pci_ms_drv_remove(struct platform_device *pdev)
 	memstick_remove_host(msh);
 	memstick_free_host(msh);
 
-	platform_set_drvdata(pdev, NULL);
-
 	dev_dbg(&(pdev->dev),
 		": Realtek PCI-E Memstick controller has been removed\n");
 

+ 14 - 4
drivers/platform/x86/apple-gmux.c

@@ -393,17 +393,21 @@ static void gmux_notify_handler(acpi_handle device, u32 value, void *context)
 		complete(&gmux_data->powerchange_done);
 }
 
-static int gmux_suspend(struct pnp_dev *pnp, pm_message_t state)
+static int gmux_suspend(struct device *dev)
 {
+	struct pnp_dev *pnp = to_pnp_dev(dev);
 	struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp);
+
 	gmux_data->resume_client_id = gmux_active_client(gmux_data);
 	gmux_disable_interrupts(gmux_data);
 	return 0;
 }
 
-static int gmux_resume(struct pnp_dev *pnp)
+static int gmux_resume(struct device *dev)
 {
+	struct pnp_dev *pnp = to_pnp_dev(dev);
 	struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp);
+
 	gmux_enable_interrupts(gmux_data);
 	gmux_switchto(gmux_data->resume_client_id);
 	if (gmux_data->power_state == VGA_SWITCHEROO_OFF)
@@ -605,13 +609,19 @@ static const struct pnp_device_id gmux_device_ids[] = {
 	{"", 0}
 };
 
+static const struct dev_pm_ops gmux_dev_pm_ops = {
+	.suspend = gmux_suspend,
+	.resume = gmux_resume,
+};
+
 static struct pnp_driver gmux_pnp_driver = {
 	.name		= "apple-gmux",
 	.probe		= gmux_probe,
 	.remove		= gmux_remove,
 	.id_table	= gmux_device_ids,
-	.suspend	= gmux_suspend,
-	.resume		= gmux_resume
+	.driver		= {
+			.pm = &gmux_dev_pm_ops,
+	},
 };
 
 static int __init apple_gmux_init(void)

+ 13 - 0
drivers/pnp/driver.c

@@ -163,6 +163,13 @@ static int __pnp_bus_suspend(struct device *dev, pm_message_t state)
 	if (!pnp_drv)
 		return 0;
 
+	if (pnp_drv->driver.pm && pnp_drv->driver.pm->suspend) {
+		error = pnp_drv->driver.pm->suspend(dev);
+		suspend_report_result(pnp_drv->driver.pm->suspend, error);
+		if (error)
+			return error;
+	}
+
 	if (pnp_drv->suspend) {
 		error = pnp_drv->suspend(pnp_dev, state);
 		if (error)
@@ -211,6 +218,12 @@ static int pnp_bus_resume(struct device *dev)
 			return error;
 	}
 
+	if (pnp_drv->driver.pm && pnp_drv->driver.pm->resume) {
+		error = pnp_drv->driver.pm->resume(dev);
+		if (error)
+			return error;
+	}
+
 	if (pnp_drv->resume) {
 		error = pnp_drv->resume(pnp_dev);
 		if (error)

+ 0 - 1
drivers/pps/clients/pps-gpio.c

@@ -184,7 +184,6 @@ static int pps_gpio_remove(struct platform_device *pdev)
 {
 	struct pps_gpio_device_data *data = platform_get_drvdata(pdev);
 
-	platform_set_drvdata(pdev, NULL);
 	pps_unregister_source(data->pps);
 	dev_info(&pdev->dev, "removed IRQ %d as PPS source\n", data->irq);
 	return 0;

+ 9 - 0
drivers/rtc/Kconfig

@@ -1249,6 +1249,15 @@ config RTC_DRV_SIRFSOC
 	  Say "yes" here to support the real time clock on SiRF SOC chips.
 	  This driver can also be built as a module called rtc-sirfsoc.
 
+config RTC_DRV_MOXART
+	tristate "MOXA ART RTC"
+	help
+	   If you say yes here you get support for the MOXA ART
+	   RTC module.
+
+	   This driver can also be built as a module. If so, the module
+	   will be called rtc-moxart
+
 comment "HID Sensor RTC drivers"
 
 config RTC_DRV_HID_SENSOR_TIME

+ 1 - 0
drivers/rtc/Makefile

@@ -130,3 +130,4 @@ obj-$(CONFIG_RTC_DRV_WM831X)	+= rtc-wm831x.o
 obj-$(CONFIG_RTC_DRV_WM8350)	+= rtc-wm8350.o
 obj-$(CONFIG_RTC_DRV_X1205)	+= rtc-x1205.o
 obj-$(CONFIG_RTC_DRV_SIRFSOC)	+= rtc-sirfsoc.o
+obj-$(CONFIG_RTC_DRV_MOXART)	+= rtc-moxart.o

+ 5 - 19
drivers/rtc/rtc-cmos.c

@@ -1018,23 +1018,6 @@ static void __exit cmos_pnp_remove(struct pnp_dev *pnp)
 	cmos_do_remove(&pnp->dev);
 }
 
-#ifdef	CONFIG_PM
-
-static int cmos_pnp_suspend(struct pnp_dev *pnp, pm_message_t mesg)
-{
-	return cmos_suspend(&pnp->dev);
-}
-
-static int cmos_pnp_resume(struct pnp_dev *pnp)
-{
-	return cmos_resume(&pnp->dev);
-}
-
-#else
-#define	cmos_pnp_suspend	NULL
-#define	cmos_pnp_resume		NULL
-#endif
-
 static void cmos_pnp_shutdown(struct pnp_dev *pnp)
 {
 	if (system_state == SYSTEM_POWER_OFF && !cmos_poweroff(&pnp->dev))
@@ -1060,8 +1043,11 @@ static struct pnp_driver cmos_pnp_driver = {
 
 	/* flag ensures resume() gets called, and stops syslog spam */
 	.flags		= PNP_DRIVER_RES_DO_NOT_CHANGE,
-	.suspend	= cmos_pnp_suspend,
-	.resume		= cmos_pnp_resume,
+#ifdef CONFIG_PM_SLEEP
+	.driver		= {
+			.pm = &cmos_pm_ops,
+	},
+#endif
 };
 
 #endif	/* CONFIG_PNP */

+ 5 - 12
drivers/rtc/rtc-ds1511.c

@@ -89,7 +89,6 @@ enum ds1511reg {
 struct rtc_plat_data {
 	struct rtc_device *rtc;
 	void __iomem *ioaddr;		/* virtual base address */
-	int size;				/* amount of memory mapped */
 	int irq;
 	unsigned int irqen;
 	int alrm_sec;
@@ -479,20 +478,14 @@ static int ds1511_rtc_probe(struct platform_device *pdev)
 	struct rtc_plat_data *pdata;
 	int ret = 0;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		return -ENODEV;
-
 	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
 	if (!pdata)
 		return -ENOMEM;
-	pdata->size = resource_size(res);
-	if (!devm_request_mem_region(&pdev->dev, res->start, pdata->size,
-			pdev->name))
-		return -EBUSY;
-	ds1511_base = devm_ioremap(&pdev->dev, res->start, pdata->size);
-	if (!ds1511_base)
-		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ds1511_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(ds1511_base))
+		return PTR_ERR(ds1511_base);
 	pdata->ioaddr = ds1511_base;
 	pdata->irq = platform_get_irq(pdev, 0);
 

+ 4 - 9
drivers/rtc/rtc-ds1553.c

@@ -285,19 +285,14 @@ static int ds1553_rtc_probe(struct platform_device *pdev)
 	void __iomem *ioaddr;
 	int ret = 0;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		return -ENODEV;
 	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
 	if (!pdata)
 		return -ENOMEM;
-	if (!devm_request_mem_region(&pdev->dev, res->start, RTC_REG_SIZE,
-			pdev->name))
-		return -EBUSY;
 
-	ioaddr = devm_ioremap(&pdev->dev, res->start, RTC_REG_SIZE);
-	if (!ioaddr)
-		return -ENOMEM;
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ioaddr = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(ioaddr))
+		return PTR_ERR(ioaddr);
 	pdata->ioaddr = ioaddr;
 	pdata->irq = platform_get_irq(pdev, 0);
 

+ 7 - 19
drivers/rtc/rtc-ds1742.c

@@ -52,11 +52,9 @@
 #define RTC_BATT_FLAG		0x80
 
 struct rtc_plat_data {
-	struct rtc_device *rtc;
 	void __iomem *ioaddr_nvram;
 	void __iomem *ioaddr_rtc;
 	size_t size_nvram;
-	size_t size;
 	unsigned long last_jiffies;
 	struct bin_attribute nvram_attr;
 };
@@ -117,11 +115,7 @@ static int ds1742_rtc_read_time(struct device *dev, struct rtc_time *tm)
 	/* year is 1900 + tm->tm_year */
 	tm->tm_year = bcd2bin(year) + bcd2bin(century) * 100 - 1900;
 
-	if (rtc_valid_tm(tm) < 0) {
-		dev_err(dev, "retrieved date/time is not valid.\n");
-		rtc_time_to_tm(0, tm);
-	}
-	return 0;
+	return rtc_valid_tm(tm);
 }
 
 static const struct rtc_class_ops ds1742_rtc_ops = {
@@ -168,22 +162,17 @@ static int ds1742_rtc_probe(struct platform_device *pdev)
 	void __iomem *ioaddr;
 	int ret = 0;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		return -ENODEV;
 	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
 	if (!pdata)
 		return -ENOMEM;
-	pdata->size = resource_size(res);
-	if (!devm_request_mem_region(&pdev->dev, res->start, pdata->size,
-		pdev->name))
-		return -EBUSY;
-	ioaddr = devm_ioremap(&pdev->dev, res->start, pdata->size);
-	if (!ioaddr)
-		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ioaddr = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(ioaddr))
+		return PTR_ERR(ioaddr);
 
 	pdata->ioaddr_nvram = ioaddr;
-	pdata->size_nvram = pdata->size - RTC_SIZE;
+	pdata->size_nvram = resource_size(res) - RTC_SIZE;
 	pdata->ioaddr_rtc = ioaddr + pdata->size_nvram;
 
 	sysfs_bin_attr_init(&pdata->nvram_attr);
@@ -212,7 +201,6 @@ static int ds1742_rtc_probe(struct platform_device *pdev)
 				  &ds1742_rtc_ops, THIS_MODULE);
 	if (IS_ERR(rtc))
 		return PTR_ERR(rtc);
-	pdata->rtc = rtc;
 
 	ret = sysfs_create_bin_file(&pdev->dev.kobj, &pdata->nvram_attr);
 

+ 3 - 11
drivers/rtc/rtc-ep93xx.c

@@ -138,17 +138,9 @@ static int ep93xx_rtc_probe(struct platform_device *pdev)
 		return -ENOMEM;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		return -ENXIO;
-
-	if (!devm_request_mem_region(&pdev->dev, res->start,
-				     resource_size(res), pdev->name))
-		return -EBUSY;
-
-	ep93xx_rtc->mmio_base = devm_ioremap(&pdev->dev, res->start,
-					     resource_size(res));
-	if (!ep93xx_rtc->mmio_base)
-		return -ENXIO;
+	ep93xx_rtc->mmio_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(ep93xx_rtc->mmio_base))
+		return PTR_ERR(ep93xx_rtc->mmio_base);
 
 	pdev->dev.platform_data = ep93xx_rtc;
 	platform_set_drvdata(pdev, ep93xx_rtc);

+ 15 - 7
drivers/rtc/rtc-hid-sensor-time.c

@@ -23,10 +23,6 @@
 #include <linux/iio/iio.h>
 #include <linux/rtc.h>
 
-/* Format: HID-SENSOR-usage_id_in_hex */
-/* Usage ID from spec for Time: 0x2000A0 */
-#define DRIVER_NAME "HID-SENSOR-2000a0" /* must be lowercase */
-
 enum hid_time_channel {
 	CHANNEL_SCAN_INDEX_YEAR,
 	CHANNEL_SCAN_INDEX_MONTH,
@@ -283,9 +279,11 @@ static int hid_time_probe(struct platform_device *pdev)
 					"hid-sensor-time", &hid_time_rtc_ops,
 					THIS_MODULE);
 
-	if (IS_ERR(time_state->rtc)) {
+	if (IS_ERR_OR_NULL(time_state->rtc)) {
+		ret = time_state->rtc ? PTR_ERR(time_state->rtc) : -ENODEV;
+		time_state->rtc = NULL;
+		sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_TIME);
 		dev_err(&pdev->dev, "rtc device register failed!\n");
-		return PTR_ERR(time_state->rtc);
 	}
 
 	return ret;
@@ -300,9 +298,19 @@ static int hid_time_remove(struct platform_device *pdev)
 	return 0;
 }
 
+static struct platform_device_id hid_time_ids[] = {
+	{
+		/* Format: HID-SENSOR-usage_id_in_hex_lowercase */
+		.name = "HID-SENSOR-2000a0",
+	},
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, hid_time_ids);
+
 static struct platform_driver hid_time_platform_driver = {
+	.id_table = hid_time_ids,
 	.driver = {
-		.name	= DRIVER_NAME,
+		.name	= KBUILD_MODNAME,
 		.owner	= THIS_MODULE,
 	},
 	.probe		= hid_time_probe,

+ 4 - 12
drivers/rtc/rtc-imxdi.c

@@ -375,24 +375,16 @@ static int __init dryice_rtc_probe(struct platform_device *pdev)
 	struct imxdi_dev *imxdi;
 	int rc;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		return -ENODEV;
-
 	imxdi = devm_kzalloc(&pdev->dev, sizeof(*imxdi), GFP_KERNEL);
 	if (!imxdi)
 		return -ENOMEM;
 
 	imxdi->pdev = pdev;
 
-	if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
-				pdev->name))
-		return -EBUSY;
-
-	imxdi->ioaddr = devm_ioremap(&pdev->dev, res->start,
-			resource_size(res));
-	if (imxdi->ioaddr == NULL)
-		return -ENOMEM;
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	imxdi->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(imxdi->ioaddr))
+		return PTR_ERR(imxdi->ioaddr);
 
 	spin_lock_init(&imxdi->irq_lock);
 

+ 4 - 20
drivers/rtc/rtc-lpc32xx.c

@@ -201,16 +201,9 @@ static int lpc32xx_rtc_probe(struct platform_device *pdev)
 {
 	struct resource *res;
 	struct lpc32xx_rtc *rtc;
-	resource_size_t size;
 	int rtcirq;
 	u32 tmp;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		dev_err(&pdev->dev, "Can't get memory resource\n");
-		return -ENOENT;
-	}
-
 	rtcirq = platform_get_irq(pdev, 0);
 	if (rtcirq < 0 || rtcirq >= NR_IRQS) {
 		dev_warn(&pdev->dev, "Can't get interrupt resource\n");
@@ -224,19 +217,10 @@ static int lpc32xx_rtc_probe(struct platform_device *pdev)
 	}
 	rtc->irq = rtcirq;
 
-	size = resource_size(res);
-
-	if (!devm_request_mem_region(&pdev->dev, res->start, size,
-				     pdev->name)) {
-		dev_err(&pdev->dev, "RTC registers are not free\n");
-		return -EBUSY;
-	}
-
-	rtc->rtc_base = devm_ioremap(&pdev->dev, res->start, size);
-	if (!rtc->rtc_base) {
-		dev_err(&pdev->dev, "Can't map memory\n");
-		return -ENOMEM;
-	}
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	rtc->rtc_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(rtc->rtc_base))
+		return PTR_ERR(rtc->rtc_base);
 
 	spin_lock_init(&rtc->lock);
 

+ 2 - 2
drivers/rtc/rtc-max77686.c

@@ -240,9 +240,9 @@ static int max77686_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
 	}
 
 	alrm->pending = 0;
-	ret = regmap_read(info->max77686->regmap, MAX77686_REG_STATUS1, &val);
+	ret = regmap_read(info->max77686->regmap, MAX77686_REG_STATUS2, &val);
 	if (ret < 0) {
-		dev_err(info->dev, "%s:%d fail to read status1 reg(%d)\n",
+		dev_err(info->dev, "%s:%d fail to read status2 reg(%d)\n",
 				__func__, __LINE__, ret);
 		goto out;
 	}

+ 330 - 0
drivers/rtc/rtc-moxart.c

@@ -0,0 +1,330 @@
+/*
+ * MOXA ART RTC driver.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * Based on code from
+ * Moxa Technology Co., Ltd. <www.moxa.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/rtc.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+
+#define GPIO_RTC_RESERVED			0x0C
+#define GPIO_RTC_DATA_SET			0x10
+#define GPIO_RTC_DATA_CLEAR			0x14
+#define GPIO_RTC_PIN_PULL_ENABLE		0x18
+#define GPIO_RTC_PIN_PULL_TYPE			0x1C
+#define GPIO_RTC_INT_ENABLE			0x20
+#define GPIO_RTC_INT_RAW_STATE			0x24
+#define GPIO_RTC_INT_MASKED_STATE		0x28
+#define GPIO_RTC_INT_MASK			0x2C
+#define GPIO_RTC_INT_CLEAR			0x30
+#define GPIO_RTC_INT_TRIGGER			0x34
+#define GPIO_RTC_INT_BOTH			0x38
+#define GPIO_RTC_INT_RISE_NEG			0x3C
+#define GPIO_RTC_BOUNCE_ENABLE			0x40
+#define GPIO_RTC_BOUNCE_PRE_SCALE		0x44
+#define GPIO_RTC_PROTECT_W			0x8E
+#define GPIO_RTC_PROTECT_R			0x8F
+#define GPIO_RTC_YEAR_W				0x8C
+#define GPIO_RTC_YEAR_R				0x8D
+#define GPIO_RTC_DAY_W				0x8A
+#define GPIO_RTC_DAY_R				0x8B
+#define GPIO_RTC_MONTH_W			0x88
+#define GPIO_RTC_MONTH_R			0x89
+#define GPIO_RTC_DATE_W				0x86
+#define GPIO_RTC_DATE_R				0x87
+#define GPIO_RTC_HOURS_W			0x84
+#define GPIO_RTC_HOURS_R			0x85
+#define GPIO_RTC_MINUTES_W			0x82
+#define GPIO_RTC_MINUTES_R			0x83
+#define GPIO_RTC_SECONDS_W			0x80
+#define GPIO_RTC_SECONDS_R			0x81
+#define GPIO_RTC_DELAY_TIME			8
+
+struct moxart_rtc {
+	struct rtc_device *rtc;
+	spinlock_t rtc_lock;
+	int gpio_data, gpio_sclk, gpio_reset;
+};
+
+static int day_of_year[12] =	{ 0, 31, 59, 90, 120, 151, 181,
+				  212, 243, 273, 304, 334 };
+
+static void moxart_rtc_write_byte(struct device *dev, u8 data)
+{
+	struct moxart_rtc *moxart_rtc = dev_get_drvdata(dev);
+	int i;
+
+	for (i = 0; i < 8; i++, data >>= 1) {
+		gpio_set_value(moxart_rtc->gpio_sclk, 0);
+		gpio_set_value(moxart_rtc->gpio_data, ((data & 1) == 1));
+		udelay(GPIO_RTC_DELAY_TIME);
+		gpio_set_value(moxart_rtc->gpio_sclk, 1);
+		udelay(GPIO_RTC_DELAY_TIME);
+	}
+}
+
+static u8 moxart_rtc_read_byte(struct device *dev)
+{
+	struct moxart_rtc *moxart_rtc = dev_get_drvdata(dev);
+	int i;
+	u8 data = 0;
+
+	for (i = 0; i < 8; i++) {
+		gpio_set_value(moxart_rtc->gpio_sclk, 0);
+		udelay(GPIO_RTC_DELAY_TIME);
+		gpio_set_value(moxart_rtc->gpio_sclk, 1);
+		udelay(GPIO_RTC_DELAY_TIME);
+		if (gpio_get_value(moxart_rtc->gpio_data))
+			data |= (1 << i);
+		udelay(GPIO_RTC_DELAY_TIME);
+	}
+	return data;
+}
+
+static u8 moxart_rtc_read_register(struct device *dev, u8 cmd)
+{
+	struct moxart_rtc *moxart_rtc = dev_get_drvdata(dev);
+	u8 data;
+	unsigned long flags;
+
+	local_irq_save(flags);
+
+	gpio_direction_output(moxart_rtc->gpio_data, 0);
+	gpio_set_value(moxart_rtc->gpio_reset, 1);
+	udelay(GPIO_RTC_DELAY_TIME);
+	moxart_rtc_write_byte(dev, cmd);
+	gpio_direction_input(moxart_rtc->gpio_data);
+	udelay(GPIO_RTC_DELAY_TIME);
+	data = moxart_rtc_read_byte(dev);
+	gpio_set_value(moxart_rtc->gpio_sclk, 0);
+	gpio_set_value(moxart_rtc->gpio_reset, 0);
+	udelay(GPIO_RTC_DELAY_TIME);
+
+	local_irq_restore(flags);
+
+	return data;
+}
+
+static void moxart_rtc_write_register(struct device *dev, u8 cmd, u8 data)
+{
+	struct moxart_rtc *moxart_rtc = dev_get_drvdata(dev);
+	unsigned long flags;
+
+	local_irq_save(flags);
+
+	gpio_direction_output(moxart_rtc->gpio_data, 0);
+	gpio_set_value(moxart_rtc->gpio_reset, 1);
+	udelay(GPIO_RTC_DELAY_TIME);
+	moxart_rtc_write_byte(dev, cmd);
+	moxart_rtc_write_byte(dev, data);
+	gpio_set_value(moxart_rtc->gpio_sclk, 0);
+	gpio_set_value(moxart_rtc->gpio_reset, 0);
+	udelay(GPIO_RTC_DELAY_TIME);
+
+	local_irq_restore(flags);
+}
+
+static int moxart_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+	struct moxart_rtc *moxart_rtc = dev_get_drvdata(dev);
+
+	spin_lock_irq(&moxart_rtc->rtc_lock);
+
+	moxart_rtc_write_register(dev, GPIO_RTC_PROTECT_W, 0);
+	moxart_rtc_write_register(dev, GPIO_RTC_YEAR_W,
+				  (((tm->tm_year - 100) / 10) << 4) |
+				  ((tm->tm_year - 100) % 10));
+
+	moxart_rtc_write_register(dev, GPIO_RTC_MONTH_W,
+				  (((tm->tm_mon + 1) / 10) << 4) |
+				  ((tm->tm_mon + 1) % 10));
+
+	moxart_rtc_write_register(dev, GPIO_RTC_DATE_W,
+				  ((tm->tm_mday / 10) << 4) |
+				  (tm->tm_mday % 10));
+
+	moxart_rtc_write_register(dev, GPIO_RTC_HOURS_W,
+				  ((tm->tm_hour / 10) << 4) |
+				  (tm->tm_hour % 10));
+
+	moxart_rtc_write_register(dev, GPIO_RTC_MINUTES_W,
+				  ((tm->tm_min / 10) << 4) |
+				  (tm->tm_min % 10));
+
+	moxart_rtc_write_register(dev, GPIO_RTC_SECONDS_W,
+				  ((tm->tm_sec / 10) << 4) |
+				  (tm->tm_sec % 10));
+
+	moxart_rtc_write_register(dev, GPIO_RTC_PROTECT_W, 0x80);
+
+	spin_unlock_irq(&moxart_rtc->rtc_lock);
+
+	dev_dbg(dev, "%s: success tm_year=%d tm_mon=%d\n"
+		"tm_mday=%d tm_hour=%d tm_min=%d tm_sec=%d\n",
+		__func__, tm->tm_year, tm->tm_mon, tm->tm_mday,
+		tm->tm_hour, tm->tm_min, tm->tm_sec);
+
+	return 0;
+}
+
+static int moxart_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+	struct moxart_rtc *moxart_rtc = dev_get_drvdata(dev);
+	unsigned char v;
+
+	spin_lock_irq(&moxart_rtc->rtc_lock);
+
+	v = moxart_rtc_read_register(dev, GPIO_RTC_SECONDS_R);
+	tm->tm_sec = (((v & 0x70) >> 4) * 10) + (v & 0x0F);
+
+	v = moxart_rtc_read_register(dev, GPIO_RTC_MINUTES_R);
+	tm->tm_min = (((v & 0x70) >> 4) * 10) + (v & 0x0F);
+
+	v = moxart_rtc_read_register(dev, GPIO_RTC_HOURS_R);
+	if (v & 0x80) { /* 12-hour mode */
+		tm->tm_hour = (((v & 0x10) >> 4) * 10) + (v & 0x0F);
+		if (v & 0x20) { /* PM mode */
+			tm->tm_hour += 12;
+			if (tm->tm_hour >= 24)
+				tm->tm_hour = 0;
+		}
+	} else { /* 24-hour mode */
+		tm->tm_hour = (((v & 0x30) >> 4) * 10) + (v & 0x0F);
+	}
+
+	v = moxart_rtc_read_register(dev, GPIO_RTC_DATE_R);
+	tm->tm_mday = (((v & 0x30) >> 4) * 10) + (v & 0x0F);
+
+	v = moxart_rtc_read_register(dev, GPIO_RTC_MONTH_R);
+	tm->tm_mon = (((v & 0x10) >> 4) * 10) + (v & 0x0F);
+	tm->tm_mon--;
+
+	v = moxart_rtc_read_register(dev, GPIO_RTC_YEAR_R);
+	tm->tm_year = (((v & 0xF0) >> 4) * 10) + (v & 0x0F);
+	tm->tm_year += 100;
+	if (tm->tm_year <= 69)
+		tm->tm_year += 100;
+
+	v = moxart_rtc_read_register(dev, GPIO_RTC_DAY_R);
+	tm->tm_wday = (v & 0x0f) - 1;
+	tm->tm_yday = day_of_year[tm->tm_mon];
+	tm->tm_yday += (tm->tm_mday - 1);
+	if (tm->tm_mon >= 2) {
+		if (!(tm->tm_year % 4) && (tm->tm_year % 100))
+			tm->tm_yday++;
+	}
+
+	tm->tm_isdst = 0;
+
+	spin_unlock_irq(&moxart_rtc->rtc_lock);
+
+	return 0;
+}
+
+static const struct rtc_class_ops moxart_rtc_ops = {
+	.read_time	= moxart_rtc_read_time,
+	.set_time	= moxart_rtc_set_time,
+};
+
+static int moxart_rtc_probe(struct platform_device *pdev)
+{
+	struct moxart_rtc *moxart_rtc;
+	int ret = 0;
+
+	moxart_rtc = devm_kzalloc(&pdev->dev, sizeof(*moxart_rtc), GFP_KERNEL);
+	if (!moxart_rtc) {
+		dev_err(&pdev->dev, "devm_kzalloc failed\n");
+		return -ENOMEM;
+	}
+
+	moxart_rtc->gpio_data = of_get_named_gpio(pdev->dev.of_node,
+						  "gpio-rtc-data", 0);
+	if (!gpio_is_valid(moxart_rtc->gpio_data)) {
+		dev_err(&pdev->dev, "invalid gpio (data): %d\n",
+			moxart_rtc->gpio_data);
+		return moxart_rtc->gpio_data;
+	}
+
+	moxart_rtc->gpio_sclk = of_get_named_gpio(pdev->dev.of_node,
+						  "gpio-rtc-sclk", 0);
+	if (!gpio_is_valid(moxart_rtc->gpio_sclk)) {
+		dev_err(&pdev->dev, "invalid gpio (sclk): %d\n",
+			moxart_rtc->gpio_sclk);
+		return moxart_rtc->gpio_sclk;
+	}
+
+	moxart_rtc->gpio_reset = of_get_named_gpio(pdev->dev.of_node,
+						   "gpio-rtc-reset", 0);
+	if (!gpio_is_valid(moxart_rtc->gpio_reset)) {
+		dev_err(&pdev->dev, "invalid gpio (reset): %d\n",
+			moxart_rtc->gpio_reset);
+		return moxart_rtc->gpio_reset;
+	}
+
+	spin_lock_init(&moxart_rtc->rtc_lock);
+	platform_set_drvdata(pdev, moxart_rtc);
+
+	ret = devm_gpio_request(&pdev->dev, moxart_rtc->gpio_data, "rtc_data");
+	if (ret) {
+		dev_err(&pdev->dev, "can't get rtc_data gpio\n");
+		return ret;
+	}
+
+	ret = devm_gpio_request_one(&pdev->dev, moxart_rtc->gpio_sclk,
+				    GPIOF_DIR_OUT, "rtc_sclk");
+	if (ret) {
+		dev_err(&pdev->dev, "can't get rtc_sclk gpio\n");
+		return ret;
+	}
+
+	ret = devm_gpio_request_one(&pdev->dev, moxart_rtc->gpio_reset,
+				    GPIOF_DIR_OUT, "rtc_reset");
+	if (ret) {
+		dev_err(&pdev->dev, "can't get rtc_reset gpio\n");
+		return ret;
+	}
+
+	moxart_rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
+						   &moxart_rtc_ops,
+						   THIS_MODULE);
+	if (IS_ERR(moxart_rtc->rtc)) {
+		dev_err(&pdev->dev, "devm_rtc_device_register failed\n");
+		return PTR_ERR(moxart_rtc->rtc);
+	}
+
+	return 0;
+}
+
+static const struct of_device_id moxart_rtc_match[] = {
+	{ .compatible = "moxa,moxart-rtc" },
+	{ },
+};
+
+static struct platform_driver moxart_rtc_driver = {
+	.probe	= moxart_rtc_probe,
+	.driver	= {
+		.name		= "moxart-rtc",
+		.owner		= THIS_MODULE,
+		.of_match_table	= moxart_rtc_match,
+	},
+};
+module_platform_driver(moxart_rtc_driver);
+
+MODULE_DESCRIPTION("MOXART RTC driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");

+ 4 - 13
drivers/rtc/rtc-mv.c

@@ -221,26 +221,17 @@ static int __init mv_rtc_probe(struct platform_device *pdev)
 {
 	struct resource *res;
 	struct rtc_plat_data *pdata;
-	resource_size_t size;
 	u32 rtc_time;
 	int ret = 0;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		return -ENODEV;
-
 	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
 	if (!pdata)
 		return -ENOMEM;
 
-	size = resource_size(res);
-	if (!devm_request_mem_region(&pdev->dev, res->start, size,
-				     pdev->name))
-		return -EBUSY;
-
-	pdata->ioaddr = devm_ioremap(&pdev->dev, res->start, size);
-	if (!pdata->ioaddr)
-		return -ENOMEM;
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(pdata->ioaddr))
+		return PTR_ERR(pdata->ioaddr);
 
 	pdata->clk = devm_clk_get(&pdev->dev, NULL);
 	/* Not all SoCs require a clock.*/

+ 4 - 10
drivers/rtc/rtc-mxc.c

@@ -377,22 +377,16 @@ static int mxc_rtc_probe(struct platform_device *pdev)
 	unsigned long rate;
 	int ret;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		return -ENODEV;
-
 	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
 	if (!pdata)
 		return -ENOMEM;
 
 	pdata->devtype = pdev->id_entry->driver_data;
 
-	if (!devm_request_mem_region(&pdev->dev, res->start,
-				     resource_size(res), pdev->name))
-		return -EBUSY;
-
-	pdata->ioaddr = devm_ioremap(&pdev->dev, res->start,
-				     resource_size(res));
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(pdata->ioaddr))
+		return PTR_ERR(pdata->ioaddr);
 
 	pdata->clk = devm_clk_get(&pdev->dev, NULL);
 	if (IS_ERR(pdata->clk)) {

+ 1 - 1
drivers/rtc/rtc-nuc900.c

@@ -99,7 +99,7 @@ static int *check_rtc_access_enable(struct nuc900_rtc *nuc900_rtc)
 	if (!timeout)
 		return ERR_PTR(-EPERM);
 
-	return 0;
+	return NULL;
 }
 
 static int nuc900_rtc_bcd2bin(unsigned int timereg,

+ 52 - 8
drivers/rtc/rtc-omap.c

@@ -70,6 +70,8 @@
 #define OMAP_RTC_KICK0_REG		0x6c
 #define OMAP_RTC_KICK1_REG		0x70
 
+#define OMAP_RTC_IRQWAKEEN		0x7c
+
 /* OMAP_RTC_CTRL_REG bit fields: */
 #define OMAP_RTC_CTRL_SPLIT		(1<<7)
 #define OMAP_RTC_CTRL_DISABLE		(1<<6)
@@ -94,12 +96,21 @@
 #define OMAP_RTC_INTERRUPTS_IT_ALARM    (1<<3)
 #define OMAP_RTC_INTERRUPTS_IT_TIMER    (1<<2)
 
+/* OMAP_RTC_IRQWAKEEN bit fields: */
+#define OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN    (1<<1)
+
 /* OMAP_RTC_KICKER values */
 #define	KICK0_VALUE			0x83e70b13
 #define	KICK1_VALUE			0x95a4f1e0
 
 #define	OMAP_RTC_HAS_KICKER		0x1
 
+/*
+ * Few RTC IP revisions has special WAKE-EN Register to enable Wakeup
+ * generation for event Alarm.
+ */
+#define	OMAP_RTC_HAS_IRQWAKEEN		0x2
+
 static void __iomem	*rtc_base;
 
 #define rtc_read(addr)		readb(rtc_base + (addr))
@@ -299,12 +310,18 @@ static struct rtc_class_ops omap_rtc_ops = {
 static int omap_rtc_alarm;
 static int omap_rtc_timer;
 
-#define	OMAP_RTC_DATA_DA830_IDX	1
+#define	OMAP_RTC_DATA_AM3352_IDX	1
+#define	OMAP_RTC_DATA_DA830_IDX		2
 
 static struct platform_device_id omap_rtc_devtype[] = {
 	{
 		.name	= DRIVER_NAME,
-	}, {
+	},
+	[OMAP_RTC_DATA_AM3352_IDX] = {
+		.name	= "am3352-rtc",
+		.driver_data = OMAP_RTC_HAS_KICKER | OMAP_RTC_HAS_IRQWAKEEN,
+	},
+	[OMAP_RTC_DATA_DA830_IDX] = {
 		.name	= "da830-rtc",
 		.driver_data = OMAP_RTC_HAS_KICKER,
 	},
@@ -316,6 +333,9 @@ static const struct of_device_id omap_rtc_of_match[] = {
 	{	.compatible	= "ti,da830-rtc",
 		.data		= &omap_rtc_devtype[OMAP_RTC_DATA_DA830_IDX],
 	},
+	{	.compatible	= "ti,am3352-rtc",
+		.data		= &omap_rtc_devtype[OMAP_RTC_DATA_AM3352_IDX],
+	},
 	{},
 };
 MODULE_DEVICE_TABLE(of, omap_rtc_of_match);
@@ -464,16 +484,28 @@ static u8 irqstat;
 
 static int omap_rtc_suspend(struct device *dev)
 {
+	u8 irqwake_stat;
+	struct platform_device *pdev = to_platform_device(dev);
+	const struct platform_device_id *id_entry =
+					platform_get_device_id(pdev);
+
 	irqstat = rtc_read(OMAP_RTC_INTERRUPTS_REG);
 
 	/* FIXME the RTC alarm is not currently acting as a wakeup event
-	 * source, and in fact this enable() call is just saving a flag
-	 * that's never used...
+	 * source on some platforms, and in fact this enable() call is just
+	 * saving a flag that's never used...
 	 */
-	if (device_may_wakeup(dev))
+	if (device_may_wakeup(dev)) {
 		enable_irq_wake(omap_rtc_alarm);
-	else
+
+		if (id_entry->driver_data & OMAP_RTC_HAS_IRQWAKEEN) {
+			irqwake_stat = rtc_read(OMAP_RTC_IRQWAKEEN);
+			irqwake_stat |= OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN;
+			rtc_write(irqwake_stat, OMAP_RTC_IRQWAKEEN);
+		}
+	} else {
 		rtc_write(0, OMAP_RTC_INTERRUPTS_REG);
+	}
 
 	/* Disable the clock/module */
 	pm_runtime_put_sync(dev);
@@ -483,13 +515,25 @@ static int omap_rtc_suspend(struct device *dev)
 
 static int omap_rtc_resume(struct device *dev)
 {
+	u8 irqwake_stat;
+	struct platform_device *pdev = to_platform_device(dev);
+	const struct platform_device_id *id_entry =
+				platform_get_device_id(pdev);
+
 	/* Enable the clock/module so that we can access the registers */
 	pm_runtime_get_sync(dev);
 
-	if (device_may_wakeup(dev))
+	if (device_may_wakeup(dev)) {
 		disable_irq_wake(omap_rtc_alarm);
-	else
+
+		if (id_entry->driver_data & OMAP_RTC_HAS_IRQWAKEEN) {
+			irqwake_stat = rtc_read(OMAP_RTC_IRQWAKEEN);
+			irqwake_stat &= ~OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN;
+			rtc_write(irqwake_stat, OMAP_RTC_IRQWAKEEN);
+		}
+	} else {
 		rtc_write(irqstat, OMAP_RTC_INTERRUPTS_REG);
+	}
 	return 0;
 }
 #endif

+ 35 - 0
drivers/rtc/rtc-palmas.c

@@ -238,6 +238,15 @@ static int palmas_rtc_probe(struct platform_device *pdev)
 	struct palmas *palmas = dev_get_drvdata(pdev->dev.parent);
 	struct palmas_rtc *palmas_rtc = NULL;
 	int ret;
+	bool enable_bb_charging = false;
+	bool high_bb_charging;
+
+	if (pdev->dev.of_node) {
+		enable_bb_charging = of_property_read_bool(pdev->dev.of_node,
+					"ti,backup-battery-chargeable");
+		high_bb_charging = of_property_read_bool(pdev->dev.of_node,
+					"ti,backup-battery-charge-high-current");
+	}
 
 	palmas_rtc = devm_kzalloc(&pdev->dev, sizeof(struct palmas_rtc),
 			GFP_KERNEL);
@@ -254,6 +263,32 @@ static int palmas_rtc_probe(struct platform_device *pdev)
 	palmas_rtc->dev = &pdev->dev;
 	platform_set_drvdata(pdev, palmas_rtc);
 
+	if (enable_bb_charging) {
+		unsigned reg = PALMAS_BACKUP_BATTERY_CTRL_BBS_BBC_LOW_ICHRG;
+
+		if (high_bb_charging)
+			reg = 0;
+
+		ret = palmas_update_bits(palmas, PALMAS_PMU_CONTROL_BASE,
+			PALMAS_BACKUP_BATTERY_CTRL,
+			PALMAS_BACKUP_BATTERY_CTRL_BBS_BBC_LOW_ICHRG, reg);
+		if (ret < 0) {
+			dev_err(&pdev->dev,
+				"BACKUP_BATTERY_CTRL update failed, %d\n", ret);
+			return ret;
+		}
+
+		ret = palmas_update_bits(palmas, PALMAS_PMU_CONTROL_BASE,
+			PALMAS_BACKUP_BATTERY_CTRL,
+			PALMAS_BACKUP_BATTERY_CTRL_BB_CHG_EN,
+			PALMAS_BACKUP_BATTERY_CTRL_BB_CHG_EN);
+		if (ret < 0) {
+			dev_err(&pdev->dev,
+				"BACKUP_BATTERY_CTRL update failed, %d\n", ret);
+			return ret;
+		}
+	}
+
 	/* Start RTC */
 	ret = palmas_update_bits(palmas, PALMAS_RTC_BASE, PALMAS_RTC_CTRL_REG,
 			PALMAS_RTC_CTRL_REG_STOP_RTC,

+ 0 - 6
drivers/rtc/rtc-pcf2127.c

@@ -203,11 +203,6 @@ static int pcf2127_probe(struct i2c_client *client,
 	return 0;
 }
 
-static int pcf2127_remove(struct i2c_client *client)
-{
-	return 0;
-}
-
 static const struct i2c_device_id pcf2127_id[] = {
 	{ "pcf2127", 0 },
 	{ }
@@ -229,7 +224,6 @@ static struct i2c_driver pcf2127_driver = {
 		.of_match_table = of_match_ptr(pcf2127_of_match),
 	},
 	.probe		= pcf2127_probe,
-	.remove		= pcf2127_remove,
 	.id_table	= pcf2127_id,
 };
 

+ 9 - 4
drivers/rtc/rtc-sirfsoc.c

@@ -44,6 +44,7 @@ struct sirfsoc_rtc_drv {
 	struct rtc_device	*rtc;
 	u32			rtc_base;
 	u32			irq;
+	unsigned		irq_wake;
 	/* Overflow for every 8 years extra time */
 	u32			overflow_rtc;
 #ifdef CONFIG_PM
@@ -355,8 +356,8 @@ static int sirfsoc_rtc_suspend(struct device *dev)
 	rtcdrv->saved_counter =
 		sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_CN);
 	rtcdrv->saved_overflow_rtc = rtcdrv->overflow_rtc;
-	if (device_may_wakeup(&pdev->dev))
-		enable_irq_wake(rtcdrv->irq);
+	if (device_may_wakeup(&pdev->dev) && !enable_irq_wake(rtcdrv->irq))
+		rtcdrv->irq_wake = 1;
 
 	return 0;
 }
@@ -423,8 +424,10 @@ static int sirfsoc_rtc_resume(struct device *dev)
 	struct platform_device *pdev = to_platform_device(dev);
 	struct sirfsoc_rtc_drv *rtcdrv = platform_get_drvdata(pdev);
 	sirfsoc_rtc_thaw(dev);
-	if (device_may_wakeup(&pdev->dev))
+	if (device_may_wakeup(&pdev->dev) && rtcdrv->irq_wake) {
 		disable_irq_wake(rtcdrv->irq);
+		rtcdrv->irq_wake = 0;
+	}
 
 	return 0;
 }
@@ -434,8 +437,10 @@ static int sirfsoc_rtc_restore(struct device *dev)
 	struct platform_device *pdev = to_platform_device(dev);
 	struct sirfsoc_rtc_drv *rtcdrv = platform_get_drvdata(pdev);
 
-	if (device_may_wakeup(&pdev->dev))
+	if (device_may_wakeup(&pdev->dev) && rtcdrv->irq_wake) {
 		disable_irq_wake(rtcdrv->irq);
+		rtcdrv->irq_wake = 0;
+	}
 	return 0;
 }
 

+ 5 - 10
drivers/rtc/rtc-stk17ta8.c

@@ -294,19 +294,14 @@ static int stk17ta8_rtc_probe(struct platform_device *pdev)
 	void __iomem *ioaddr;
 	int ret = 0;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		return -ENODEV;
-
 	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
 	if (!pdata)
 		return -ENOMEM;
-	if (!devm_request_mem_region(&pdev->dev, res->start, RTC_REG_SIZE,
-			pdev->name))
-		return -EBUSY;
-	ioaddr = devm_ioremap(&pdev->dev, res->start, RTC_REG_SIZE);
-	if (!ioaddr)
-		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ioaddr = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(ioaddr))
+		return PTR_ERR(ioaddr);
 	pdata->ioaddr = ioaddr;
 	pdata->irq = platform_get_irq(pdev, 0);
 

+ 4 - 10
drivers/rtc/rtc-tx4939.c

@@ -244,9 +244,6 @@ static int __init tx4939_rtc_probe(struct platform_device *pdev)
 	struct resource *res;
 	int irq, ret;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		return -ENODEV;
 	irq = platform_get_irq(pdev, 0);
 	if (irq < 0)
 		return -ENODEV;
@@ -255,13 +252,10 @@ static int __init tx4939_rtc_probe(struct platform_device *pdev)
 		return -ENOMEM;
 	platform_set_drvdata(pdev, pdata);
 
-	if (!devm_request_mem_region(&pdev->dev, res->start,
-				     resource_size(res), pdev->name))
-		return -EBUSY;
-	pdata->rtcreg = devm_ioremap(&pdev->dev, res->start,
-				     resource_size(res));
-	if (!pdata->rtcreg)
-		return -EBUSY;
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	pdata->rtcreg = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(pdata->rtcreg))
+		return PTR_ERR(pdata->rtcreg);
 
 	spin_lock_init(&pdata->lock);
 	tx4939_rtc_cmd(pdata->rtcreg, TX4939_RTCCTL_COMMAND_NOP);

+ 3 - 3
drivers/s390/char/zcore.c

@@ -30,8 +30,8 @@
 
 #define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
 
-#define TO_USER		0
-#define TO_KERNEL	1
+#define TO_USER		1
+#define TO_KERNEL	0
 #define CHUNK_INFO_SIZE	34 /* 2 16-byte char, each followed by blank */
 
 enum arch_id {
@@ -73,7 +73,7 @@ static struct ipl_parameter_block *ipl_block;
  * @count: Size of buffer, which should be copied
  * @mode:  Either TO_KERNEL or TO_USER
  */
-static int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode)
+int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode)
 {
 	int offs, blk_num;
 	static char buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));

+ 1 - 265
drivers/video/acornfb.c

@@ -37,14 +37,6 @@
 
 #include "acornfb.h"
 
-/*
- * VIDC machines can't do 16 or 32BPP modes.
- */
-#ifdef HAS_VIDC
-#undef FBCON_HAS_CFB16
-#undef FBCON_HAS_CFB32
-#endif
-
 /*
  * Default resolution.
  * NOTE that it has to be supported in the table towards
@@ -106,238 +98,6 @@ static struct vidc_timing current_vidc;
 
 extern unsigned int vram_size;	/* set by setup.c */
 
-#ifdef HAS_VIDC
-
-#define MAX_SIZE	480*1024
-
-/* CTL     VIDC	Actual
- * 24.000  0	 8.000
- * 25.175  0	 8.392
- * 36.000  0	12.000
- * 24.000  1	12.000
- * 25.175  1	12.588
- * 24.000  2	16.000
- * 25.175  2	16.783
- * 36.000  1	18.000
- * 24.000  3	24.000
- * 36.000  2	24.000
- * 25.175  3	25.175
- * 36.000  3	36.000
- */
-struct pixclock {
-	u_long	min_clock;
-	u_long	max_clock;
-	u_int	vidc_ctl;
-	u_int	vid_ctl;
-};
-
-static struct pixclock arc_clocks[] = {
-	/* we allow +/-1% on these */
-	{ 123750, 126250, VIDC_CTRL_DIV3,   VID_CTL_24MHz },	/*  8.000MHz */
-	{  82500,  84167, VIDC_CTRL_DIV2,   VID_CTL_24MHz },	/* 12.000MHz */
-	{  61875,  63125, VIDC_CTRL_DIV1_5, VID_CTL_24MHz },	/* 16.000MHz */
-	{  41250,  42083, VIDC_CTRL_DIV1,   VID_CTL_24MHz },	/* 24.000MHz */
-};
-
-static struct pixclock *
-acornfb_valid_pixrate(struct fb_var_screeninfo *var)
-{
-	u_long pixclock = var->pixclock;
-	u_int i;
-
-	if (!var->pixclock)
-		return NULL;
-
-	for (i = 0; i < ARRAY_SIZE(arc_clocks); i++)
-		if (pixclock > arc_clocks[i].min_clock &&
-		    pixclock < arc_clocks[i].max_clock)
-			return arc_clocks + i;
-
-	return NULL;
-}
-
-/* VIDC Rules:
- * hcr  : must be even (interlace, hcr/2 must be even)
- * hswr : must be even
- * hdsr : must be odd
- * hder : must be odd
- *
- * vcr  : must be odd
- * vswr : >= 1
- * vdsr : >= 1
- * vder : >= vdsr
- * if interlaced, then hcr/2 must be even
- */
-static void
-acornfb_set_timing(struct fb_var_screeninfo *var)
-{
-	struct pixclock *pclk;
-	struct vidc_timing vidc;
-	u_int horiz_correction;
-	u_int sync_len, display_start, display_end, cycle;
-	u_int is_interlaced;
-	u_int vid_ctl, vidc_ctl;
-	u_int bandwidth;
-
-	memset(&vidc, 0, sizeof(vidc));
-
-	pclk = acornfb_valid_pixrate(var);
-	vidc_ctl = pclk->vidc_ctl;
-	vid_ctl  = pclk->vid_ctl;
-
-	bandwidth = var->pixclock * 8 / var->bits_per_pixel;
-	/* 25.175, 4bpp = 79.444ns per byte, 317.776ns per word: fifo = 2,6 */
-	if (bandwidth > 143500)
-		vidc_ctl |= VIDC_CTRL_FIFO_3_7;
-	else if (bandwidth > 71750)
-		vidc_ctl |= VIDC_CTRL_FIFO_2_6;
-	else if (bandwidth > 35875)
-		vidc_ctl |= VIDC_CTRL_FIFO_1_5;
-	else
-		vidc_ctl |= VIDC_CTRL_FIFO_0_4;
-
-	switch (var->bits_per_pixel) {
-	case 1:
-		horiz_correction = 19;
-		vidc_ctl |= VIDC_CTRL_1BPP;
-		break;
-
-	case 2:
-		horiz_correction = 11;
-		vidc_ctl |= VIDC_CTRL_2BPP;
-		break;
-
-	case 4:
-		horiz_correction = 7;
-		vidc_ctl |= VIDC_CTRL_4BPP;
-		break;
-
-	default:
-	case 8:
-		horiz_correction = 5;
-		vidc_ctl |= VIDC_CTRL_8BPP;
-		break;
-	}
-
-	if (var->sync & FB_SYNC_COMP_HIGH_ACT) /* should be FB_SYNC_COMP */
-		vidc_ctl |= VIDC_CTRL_CSYNC;
-	else {
-		if (!(var->sync & FB_SYNC_HOR_HIGH_ACT))
-			vid_ctl |= VID_CTL_HS_NHSYNC;
-
-		if (!(var->sync & FB_SYNC_VERT_HIGH_ACT))
-			vid_ctl |= VID_CTL_VS_NVSYNC;
-	}
-
-	sync_len	= var->hsync_len;
-	display_start	= sync_len + var->left_margin;
-	display_end	= display_start + var->xres;
-	cycle		= display_end + var->right_margin;
-
-	/* if interlaced, then hcr/2 must be even */
-	is_interlaced = (var->vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED;
-
-	if (is_interlaced) {
-		vidc_ctl |= VIDC_CTRL_INTERLACE;
-		if (cycle & 2) {
-			cycle += 2;
-			var->right_margin += 2;
-		}
-	}
-
-	vidc.h_cycle		= (cycle - 2) / 2;
-	vidc.h_sync_width	= (sync_len - 2) / 2;
-	vidc.h_border_start	= (display_start - 1) / 2;
-	vidc.h_display_start	= (display_start - horiz_correction) / 2;
-	vidc.h_display_end	= (display_end - horiz_correction) / 2;
-	vidc.h_border_end	= (display_end - 1) / 2;
-	vidc.h_interlace	= (vidc.h_cycle + 1) / 2;
-
-	sync_len	= var->vsync_len;
-	display_start	= sync_len + var->upper_margin;
-	display_end	= display_start + var->yres;
-	cycle		= display_end + var->lower_margin;
-
-	if (is_interlaced)
-		cycle = (cycle - 3) / 2;
-	else
-		cycle = cycle - 1;
-
-	vidc.v_cycle		= cycle;
-	vidc.v_sync_width	= sync_len - 1;
-	vidc.v_border_start	= display_start - 1;
-	vidc.v_display_start	= vidc.v_border_start;
-	vidc.v_display_end	= display_end - 1;
-	vidc.v_border_end	= vidc.v_display_end;
-
-	if (machine_is_a5k())
-		__raw_writeb(vid_ctl, IOEB_VID_CTL);
-
-	if (memcmp(&current_vidc, &vidc, sizeof(vidc))) {
-		current_vidc = vidc;
-
-		vidc_writel(0xe0000000 | vidc_ctl);
-		vidc_writel(0x80000000 | (vidc.h_cycle << 14));
-		vidc_writel(0x84000000 | (vidc.h_sync_width << 14));
-		vidc_writel(0x88000000 | (vidc.h_border_start << 14));
-		vidc_writel(0x8c000000 | (vidc.h_display_start << 14));
-		vidc_writel(0x90000000 | (vidc.h_display_end << 14));
-		vidc_writel(0x94000000 | (vidc.h_border_end << 14));
-		vidc_writel(0x98000000);
-		vidc_writel(0x9c000000 | (vidc.h_interlace << 14));
-		vidc_writel(0xa0000000 | (vidc.v_cycle << 14));
-		vidc_writel(0xa4000000 | (vidc.v_sync_width << 14));
-		vidc_writel(0xa8000000 | (vidc.v_border_start << 14));
-		vidc_writel(0xac000000 | (vidc.v_display_start << 14));
-		vidc_writel(0xb0000000 | (vidc.v_display_end << 14));
-		vidc_writel(0xb4000000 | (vidc.v_border_end << 14));
-		vidc_writel(0xb8000000);
-		vidc_writel(0xbc000000);
-	}
-#ifdef DEBUG_MODE_SELECTION
-	printk(KERN_DEBUG "VIDC registers for %dx%dx%d:\n", var->xres,
-	       var->yres, var->bits_per_pixel);
-	printk(KERN_DEBUG " H-cycle          : %d\n", vidc.h_cycle);
-	printk(KERN_DEBUG " H-sync-width     : %d\n", vidc.h_sync_width);
-	printk(KERN_DEBUG " H-border-start   : %d\n", vidc.h_border_start);
-	printk(KERN_DEBUG " H-display-start  : %d\n", vidc.h_display_start);
-	printk(KERN_DEBUG " H-display-end    : %d\n", vidc.h_display_end);
-	printk(KERN_DEBUG " H-border-end     : %d\n", vidc.h_border_end);
-	printk(KERN_DEBUG " H-interlace      : %d\n", vidc.h_interlace);
-	printk(KERN_DEBUG " V-cycle          : %d\n", vidc.v_cycle);
-	printk(KERN_DEBUG " V-sync-width     : %d\n", vidc.v_sync_width);
-	printk(KERN_DEBUG " V-border-start   : %d\n", vidc.v_border_start);
-	printk(KERN_DEBUG " V-display-start  : %d\n", vidc.v_display_start);
-	printk(KERN_DEBUG " V-display-end    : %d\n", vidc.v_display_end);
-	printk(KERN_DEBUG " V-border-end     : %d\n", vidc.v_border_end);
-	printk(KERN_DEBUG " VIDC Ctrl (E)    : 0x%08X\n", vidc_ctl);
-	printk(KERN_DEBUG " IOEB Ctrl        : 0x%08X\n", vid_ctl);
-#endif
-}
-
-static int
-acornfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
-		  u_int trans, struct fb_info *info)
-{
-	union palette pal;
-
-	if (regno >= current_par.palette_size)
-		return 1;
-
-	pal.p = 0;
-	pal.vidc.reg   = regno;
-	pal.vidc.red   = red >> 12;
-	pal.vidc.green = green >> 12;
-	pal.vidc.blue  = blue >> 12;
-
-	current_par.palette[regno] = pal;
-
-	vidc_writel(pal.p);
-
-	return 0;
-}
-#endif
-
 #ifdef HAS_VIDC20
 #include <mach/acornfb.h>
 
@@ -634,16 +394,7 @@ acornfb_adjust_timing(struct fb_info *info, struct fb_var_screeninfo *var, u_int
 	/* hsync_len must be even */
 	var->hsync_len = (var->hsync_len + 1) & ~1;
 
-#ifdef HAS_VIDC
-	/* left_margin must be odd */
-	if ((var->left_margin & 1) == 0) {
-		var->left_margin -= 1;
-		var->right_margin += 1;
-	}
-
-	/* right_margin must be odd */
-	var->right_margin |= 1;
-#elif defined(HAS_VIDC20)
+#if defined(HAS_VIDC20)
 	/* left_margin must be even */
 	if (var->left_margin & 1) {
 		var->left_margin += 1;
@@ -787,11 +538,7 @@ static int acornfb_set_par(struct fb_info *info)
 		break;
 	case 8:
 		current_par.palette_size = VIDC_PALETTE_SIZE;
-#ifdef HAS_VIDC
-		info->fix.visual = FB_VISUAL_STATIC_PSEUDOCOLOR;
-#else
 		info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
-#endif
 		break;
 #ifdef HAS_VIDC20
 	case 16:
@@ -971,9 +718,6 @@ static void acornfb_init_fbinfo(void)
 #if defined(HAS_VIDC20)
 	fb_info.var.red.length	   = 8;
 	fb_info.var.transp.length  = 4;
-#elif defined(HAS_VIDC)
-	fb_info.var.red.length	   = 4;
-	fb_info.var.transp.length  = 1;
 #endif
 	fb_info.var.green	   = fb_info.var.red;
 	fb_info.var.blue	   = fb_info.var.red;
@@ -1310,14 +1054,6 @@ static int acornfb_probe(struct platform_device *dev)
 		fb_info.fix.smem_start = handle;
 	}
 #endif
-#if defined(HAS_VIDC)
-	/*
-	 * Archimedes/A5000 machines use a fixed address for their
-	 * framebuffers.  Free unused pages
-	 */
-	free_unused_pages(PAGE_OFFSET + size, PAGE_OFFSET + MAX_SIZE);
-#endif
-
 	fb_info.fix.smem_len = size;
 	current_par.palette_size   = VIDC_PALETTE_SIZE;
 

+ 0 - 29
drivers/video/acornfb.h

@@ -13,10 +13,6 @@
 #include <asm/hardware/iomd.h>
 #define VIDC_PALETTE_SIZE	256
 #define VIDC_NAME		"VIDC20"
-#elif defined(HAS_VIDC)
-#include <asm/hardware/memc.h>
-#define VIDC_PALETTE_SIZE	16
-#define VIDC_NAME		"VIDC"
 #endif
 
 #define EXTEND8(x) ((x)|(x)<<8)
@@ -101,31 +97,6 @@ struct modex_params {
 	const struct modey_params *modey;
 };
 
-#ifdef HAS_VIDC
-
-#define VID_CTL_VS_NVSYNC	(1 << 3)
-#define VID_CTL_HS_NHSYNC	(1 << 2)
-#define VID_CTL_24MHz		(0)
-#define VID_CTL_25MHz		(1)
-#define VID_CTL_36MHz		(2)
-
-#define VIDC_CTRL_CSYNC		(1 << 7)
-#define VIDC_CTRL_INTERLACE	(1 << 6)
-#define VIDC_CTRL_FIFO_0_4	(0 << 4)
-#define VIDC_CTRL_FIFO_1_5	(1 << 4)
-#define VIDC_CTRL_FIFO_2_6	(2 << 4)
-#define VIDC_CTRL_FIFO_3_7	(3 << 4)
-#define VIDC_CTRL_1BPP		(0 << 2)
-#define VIDC_CTRL_2BPP		(1 << 2)
-#define VIDC_CTRL_4BPP		(2 << 2)
-#define VIDC_CTRL_8BPP		(3 << 2)
-#define VIDC_CTRL_DIV3		(0 << 0)
-#define VIDC_CTRL_DIV2		(1 << 0)
-#define VIDC_CTRL_DIV1_5	(2 << 0)
-#define VIDC_CTRL_DIV1		(3 << 0)
-
-#endif
-
 #ifdef HAS_VIDC20
 /*
  * VIDC20 registers

+ 0 - 2
drivers/w1/masters/mxc_w1.c

@@ -152,8 +152,6 @@ static int mxc_w1_remove(struct platform_device *pdev)
 
 	clk_disable_unprepare(mdev->clk);
 
-	platform_set_drvdata(pdev, NULL);
-
 	return 0;
 }
 

+ 8 - 4
drivers/w1/w1.c

@@ -234,9 +234,11 @@ static ssize_t w1_master_attribute_store_search(struct device * dev,
 {
 	long tmp;
 	struct w1_master *md = dev_to_w1_master(dev);
+	int ret;
 
-	if (strict_strtol(buf, 0, &tmp) == -EINVAL)
-		return -EINVAL;
+	ret = kstrtol(buf, 0, &tmp);
+	if (ret)
+		return ret;
 
 	mutex_lock(&md->mutex);
 	md->search_count = tmp;
@@ -266,9 +268,11 @@ static ssize_t w1_master_attribute_store_pullup(struct device *dev,
 {
 	long tmp;
 	struct w1_master *md = dev_to_w1_master(dev);
+	int ret;
 
-	if (strict_strtol(buf, 0, &tmp) == -EINVAL)
-		return -EINVAL;
+	ret = kstrtol(buf, 0, &tmp);
+	if (ret)
+		return ret;
 
 	mutex_lock(&md->mutex);
 	md->enable_pullup = tmp;

+ 4 - 2
drivers/watchdog/hpwdt.c

@@ -162,7 +162,8 @@ extern asmlinkage void asminline_call(struct cmn_registers *pi86Regs,
 #define HPWDT_ARCH	32
 
 asm(".text                          \n\t"
-    ".align 4                       \n"
+    ".align 4                       \n\t"
+    ".globl asminline_call	    \n"
     "asminline_call:                \n\t"
     "pushl       %ebp               \n\t"
     "movl        %esp, %ebp         \n\t"
@@ -352,7 +353,8 @@ static int detect_cru_service(void)
 #define HPWDT_ARCH	64
 
 asm(".text                      \n\t"
-    ".align 4                   \n"
+    ".align 4                   \n\t"
+    ".globl asminline_call	\n"
     "asminline_call:            \n\t"
     "pushq      %rbp            \n\t"
     "movq       %rsp, %rbp      \n\t"

+ 1 - 1
fs/affs/file.c

@@ -836,7 +836,7 @@ affs_truncate(struct inode *inode)
 		struct address_space *mapping = inode->i_mapping;
 		struct page *page;
 		void *fsdata;
-		u32 size = inode->i_size;
+		loff_t size = inode->i_size;
 		int res;
 
 		res = mapping->a_ops->write_begin(NULL, mapping, size, 0, 0, &page, &fsdata);

Some files were not shown because too many files changed in this diff