Przeglądaj źródła

Merge branch 'sh/kfr2r09'

Paul Mundt 16 lat temu
rodzic
commit
a3beddd0aa
100 zmienionych plików z 5122 dodań i 359 usunięć
  1. 1 1
      CREDITS
  2. 6 1
      Documentation/RCU/rculist_nulls.txt
  3. 2 2
      Documentation/connector/cn_test.c
  4. 1 1
      Documentation/connector/ucon.c
  5. 1 1
      Documentation/networking/6pack.txt
  6. 9 2
      MAINTAINERS
  7. 1 1
      Makefile
  8. 1 0
      arch/sh/Makefile
  9. 7 0
      arch/sh/boards/Kconfig
  10. 1 0
      arch/sh/boards/mach-kfr2r09/Makefile
  11. 153 0
      arch/sh/boards/mach-kfr2r09/setup.c
  12. 877 0
      arch/sh/configs/kfr2r09_defconfig
  13. 1 0
      arch/sh/tools/mach-types
  14. 233 20
      arch/x86/kernel/cpu/perf_counter.c
  15. 6 5
      block/blk-sysfs.c
  16. 9 4
      block/elevator.c
  17. 1 1
      drivers/block/ataflop.c
  18. 6 1
      drivers/block/virtio_blk.c
  19. 1 1
      drivers/block/z2ram.c
  20. 1 1
      drivers/connector/cn_queue.c
  21. 2 2
      drivers/connector/connector.c
  22. 4 2
      drivers/hid/hid-core.c
  23. 3 1
      drivers/hid/usbhid/hiddev.c
  24. 1 0
      drivers/ide/ide-disk.c
  25. 1 0
      drivers/ide/ide-tape.c
  26. 1 2
      drivers/input/evdev.c
  27. 1 1
      drivers/input/joydev.c
  28. 13 13
      drivers/input/joystick/xpad.c
  29. 32 0
      drivers/input/keyboard/atkbd.c
  30. 6 2
      drivers/input/misc/pcspkr.c
  31. 9 0
      drivers/input/misc/wistron_btns.c
  32. 1 0
      drivers/mtd/ubi/gluebi.c
  33. 1 1
      drivers/mtd/ubi/scan.c
  34. 6 0
      drivers/net/Kconfig
  35. 1 0
      drivers/net/Makefile
  36. 8 0
      drivers/net/arm/Kconfig
  37. 1 0
      drivers/net/arm/Makefile
  38. 1105 0
      drivers/net/arm/w90p910_ether.c
  39. 4 4
      drivers/net/atl1c/atl1c.h
  40. 1 1
      drivers/net/atl1c/atl1c_main.c
  41. 2 1
      drivers/net/bnx2x_link.c
  42. 10 2
      drivers/net/bonding/bond_main.c
  43. 6 2
      drivers/net/can/dev.c
  44. 0 1
      drivers/net/can/sja1000/sja1000.c
  45. 3 0
      drivers/net/e100.c
  46. 1 1
      drivers/net/hamradio/6pack.c
  47. 4 3
      drivers/net/ibm_newemac/rgmii.c
  48. 2 4
      drivers/net/ixgbe/ixgbe_dcb_nl.c
  49. 1 0
      drivers/net/jazzsonic.c
  50. 1322 0
      drivers/net/ks8851.c
  51. 296 0
      drivers/net/ks8851.h
  52. 8 7
      drivers/net/macsonic.c
  53. 1 1
      drivers/net/mlx4/en_ethtool.c
  54. 3 0
      drivers/net/netxen/netxen_nic.h
  55. 7 6
      drivers/net/netxen/netxen_nic_ctx.c
  56. 5 4
      drivers/net/netxen/netxen_nic_hw.c
  57. 3 2
      drivers/net/netxen/netxen_nic_init.c
  58. 22 14
      drivers/net/netxen/netxen_nic_main.c
  59. 14 7
      drivers/net/pcmcia/3c589_cs.c
  60. 1 0
      drivers/net/sc92031.c
  61. 9 16
      drivers/net/sky2.c
  62. 8 0
      drivers/net/usb/Kconfig
  63. 1 0
      drivers/net/usb/Makefile
  64. 461 0
      drivers/net/usb/cdc-phonet.c
  65. 1 1
      drivers/net/usb/cdc_eem.c
  66. 2 2
      drivers/video/fbmon.c
  67. 1 26
      fs/Kconfig
  68. 3 15
      fs/nfs/client.c
  69. 1 1
      fs/nfs/dir.c
  70. 6 0
      fs/nfs/nfs4_fs.h
  71. 29 11
      fs/nfs/nfs4proc.c
  72. 1 1
      fs/nfs/nfs4state.c
  73. 25 0
      fs/nilfs2/Kconfig
  74. 2 2
      fs/pipe.c
  75. 28 0
      include/linux/interrupt.h
  76. 4 11
      include/linux/perf_counter.h
  77. 2 1
      include/linux/sched.h
  78. 19 13
      include/net/sock.h
  79. 5 0
      include/net/tcp.h
  80. 1 1
      init/Kconfig
  81. 3 6
      kernel/fork.c
  82. 7 0
      kernel/freezer.c
  83. 1 2
      kernel/irq/internals.h
  84. 49 6
      kernel/irq/manage.c
  85. 1 1
      kernel/irq/migration.c
  86. 92 94
      kernel/perf_counter.c
  87. 2 2
      kernel/sched.c
  88. 8 2
      kernel/sched_fair.c
  89. 63 1
      kernel/softirq.c
  90. 1 1
      kernel/time/clocksource.c
  91. 1 1
      kernel/timer.c
  92. 4 0
      net/can/bcm.c
  93. 4 0
      net/can/raw.c
  94. 19 3
      net/core/sock.c
  95. 2 1
      net/ipv4/tcp_ipv4.c
  96. 1 1
      net/ipv4/tcp_output.c
  97. 2 1
      net/ipv6/tcp_ipv6.c
  98. 18 3
      net/netfilter/nf_conntrack_core.c
  99. 3 2
      net/netfilter/xt_osf.c
  100. 2 5
      sound/core/seq/Makefile

+ 1 - 1
CREDITS

@@ -1856,7 +1856,7 @@ E: rfkoenig@immd4.informatik.uni-erlangen.de
 D: The Linux Support Team Erlangen
 
 N: Andreas Koensgen
-E: ajk@iehk.rwth-aachen.de
+E: ajk@comnets.uni-bremen.de
 D: 6pack driver for AX.25
 
 N: Harald Koerfgen

+ 6 - 1
Documentation/RCU/rculist_nulls.txt

@@ -83,11 +83,12 @@ not detect it missed following items in original chain.
 obj = kmem_cache_alloc(...);
 lock_chain(); // typically a spin_lock()
 obj->key = key;
-atomic_inc(&obj->refcnt);
 /*
  * we need to make sure obj->key is updated before obj->next
+ * or obj->refcnt
  */
 smp_wmb();
+atomic_set(&obj->refcnt, 1);
 hlist_add_head_rcu(&obj->obj_node, list);
 unlock_chain(); // typically a spin_unlock()
 
@@ -159,6 +160,10 @@ out:
 obj = kmem_cache_alloc(cachep);
 lock_chain(); // typically a spin_lock()
 obj->key = key;
+/*
+ * changes to obj->key must be visible before refcnt one
+ */
+smp_wmb();
 atomic_set(&obj->refcnt, 1);
 /*
  * insert obj in RCU way (readers might be traversing chain)

+ 2 - 2
Documentation/connector/cn_test.c

@@ -1,7 +1,7 @@
 /*
  * 	cn_test.c
  * 
- * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
+ * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
  * All rights reserved.
  * 
  * This program is free software; you can redistribute it and/or modify
@@ -194,5 +194,5 @@ module_init(cn_test_init);
 module_exit(cn_test_fini);
 
 MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>");
+MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
 MODULE_DESCRIPTION("Connector's test module");

+ 1 - 1
Documentation/connector/ucon.c

@@ -1,7 +1,7 @@
 /*
  * 	ucon.c
  *
- * Copyright (c) 2004+ Evgeniy Polyakov <johnpol@2ka.mipt.ru>
+ * Copyright (c) 2004+ Evgeniy Polyakov <zbr@ioremap.net>
  *
  *
  * This program is free software; you can redistribute it and/or modify

+ 1 - 1
Documentation/networking/6pack.txt

@@ -1,7 +1,7 @@
 This is the 6pack-mini-HOWTO, written by
 
 Andreas Könsgen DG3KQ
-Internet: ajk@iehk.rwth-aachen.de
+Internet: ajk@comnets.uni-bremen.de
 AMPR-net: dg3kq@db0pra.ampr.org
 AX.25:    dg3kq@db0ach.#nrw.deu.eu
 

+ 9 - 2
MAINTAINERS

@@ -150,7 +150,7 @@ F:	drivers/scsi/53c700*
 
 6PACK NETWORK DRIVER FOR AX.25
 P:	Andreas Koensgen
-M:	ajk@iehk.rwth-aachen.de
+M:	ajk@comnets.uni-bremen.de
 L:	linux-hams@vger.kernel.org
 S:	Maintained
 F:	drivers/net/hamradio/6pack.c
@@ -1612,6 +1612,13 @@ S:	Supported
 F:	fs/configfs/
 F:	include/linux/configfs.h
 
+CONNECTOR
+P:	Evgeniy Polyakov
+M:	zbr@ioremap.net
+L:	netdev@vger.kernel.org
+S:	Maintained
+F:	drivers/connector/
+
 CONTROL GROUPS (CGROUPS)
 P:	Paul Menage
 M:	menage@google.com
@@ -4089,6 +4096,7 @@ L:	netfilter@vger.kernel.org
 L:	coreteam@netfilter.org
 W:	http://www.netfilter.org/
 W:	http://www.iptables.org/
+T:	git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-2.6.git
 S:	Supported
 F:	include/linux/netfilter*
 F:	include/linux/netfilter/
@@ -5586,7 +5594,6 @@ S:	Odd Fixes
 F:	drivers/net/starfire*
 
 STARMODE RADIO IP (STRIP) PROTOCOL DRIVER
-W:	http://mosquitonet.Stanford.EDU/strip.html
 S:	Orphan
 F:	drivers/net/wireless/strip.c
 F:	include/linux/if_strip.h

+ 1 - 1
Makefile

@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 31
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc4
 NAME = Man-Eating Seals of Antiquity
 
 # *DOCUMENTATION*

+ 1 - 0
arch/sh/Makefile

@@ -136,6 +136,7 @@ machdir-$(CONFIG_SH_7751_SYSTEMH)		+= mach-systemh
 machdir-$(CONFIG_SH_EDOSK7705)			+= mach-edosk7705
 machdir-$(CONFIG_SH_HIGHLANDER)			+= mach-highlander
 machdir-$(CONFIG_SH_MIGOR)			+= mach-migor
+machdir-$(CONFIG_SH_KFR2R09)			+= mach-kfr2r09
 machdir-$(CONFIG_SH_SDK7780)			+= mach-sdk7780
 machdir-$(CONFIG_SH_X3PROTO)			+= mach-x3proto
 machdir-$(CONFIG_SH_SH7763RDP)			+= mach-sh7763rdp

+ 7 - 0
arch/sh/boards/Kconfig

@@ -193,6 +193,13 @@ config SH_AP325RXA
 	  Renesas "AP-325RXA" support.
 	  Compatible with ALGO SYSTEM CO.,LTD. "AP-320A"
 
+config SH_KFR2R09
+	bool "KFR2R09"
+	depends on CPU_SUBTYPE_SH7724
+	select ARCH_REQUIRE_GPIOLIB
+	help
+	  "Kit For R2R for 2009" support.
+
 config SH_SH7763RDP
 	bool "SH7763RDP"
 	depends on CPU_SUBTYPE_SH7763

+ 1 - 0
arch/sh/boards/mach-kfr2r09/Makefile

@@ -0,0 +1 @@
+obj-y	 := setup.o

+ 153 - 0
arch/sh/boards/mach-kfr2r09/setup.c

@@ -0,0 +1,153 @@
+/*
+ * KFR2R09 board support code
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/mtd/physmap.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/input.h>
+#include <asm/clock.h>
+#include <asm/machvec.h>
+#include <asm/io.h>
+#include <asm/sh_keysc.h>
+#include <cpu/sh7724.h>
+
+static struct mtd_partition kfr2r09_nor_flash_partitions[] =
+{
+	{
+		.name = "boot",
+		.offset = 0,
+		.size = (4 * 1024 * 1024),
+		.mask_flags = MTD_WRITEABLE,	/* Read-only */
+	},
+	{
+		.name = "other",
+		.offset = MTDPART_OFS_APPEND,
+		.size = MTDPART_SIZ_FULL,
+	},
+};
+
+static struct physmap_flash_data kfr2r09_nor_flash_data = {
+	.width		= 2,
+	.parts		= kfr2r09_nor_flash_partitions,
+	.nr_parts	= ARRAY_SIZE(kfr2r09_nor_flash_partitions),
+};
+
+static struct resource kfr2r09_nor_flash_resources[] = {
+	[0] = {
+		.name		= "NOR Flash",
+		.start		= 0x00000000,
+		.end		= 0x03ffffff,
+		.flags		= IORESOURCE_MEM,
+	}
+};
+
+static struct platform_device kfr2r09_nor_flash_device = {
+	.name		= "physmap-flash",
+	.resource	= kfr2r09_nor_flash_resources,
+	.num_resources	= ARRAY_SIZE(kfr2r09_nor_flash_resources),
+	.dev		= {
+		.platform_data = &kfr2r09_nor_flash_data,
+	},
+};
+
+static struct sh_keysc_info kfr2r09_sh_keysc_info = {
+	.mode = SH_KEYSC_MODE_1, /* KEYOUT0->4, KEYIN0->4 */
+	.scan_timing = 3,
+	.delay = 10,
+	.keycodes = {
+		KEY_PHONE, KEY_CLEAR, KEY_MAIL, KEY_WWW, KEY_ENTER,
+		KEY_1, KEY_2, KEY_3, 0, KEY_UP,
+		KEY_4, KEY_5, KEY_6, 0, KEY_LEFT,
+		KEY_7, KEY_8, KEY_9, KEY_PROG1, KEY_RIGHT,
+		KEY_S, KEY_0, KEY_P, KEY_PROG2, KEY_DOWN,
+		0, 0, 0, 0, 0
+	},
+};
+
+static struct resource kfr2r09_sh_keysc_resources[] = {
+	[0] = {
+		.name	= "KEYSC",
+		.start  = 0x044b0000,
+		.end    = 0x044b000f,
+		.flags  = IORESOURCE_MEM,
+	},
+	[1] = {
+		.start  = 79,
+		.flags  = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device kfr2r09_sh_keysc_device = {
+	.name           = "sh_keysc",
+	.id             = 0, /* "keysc0" clock */
+	.num_resources  = ARRAY_SIZE(kfr2r09_sh_keysc_resources),
+	.resource       = kfr2r09_sh_keysc_resources,
+	.dev	= {
+		.platform_data	= &kfr2r09_sh_keysc_info,
+	},
+};
+
+static struct platform_device *kfr2r09_devices[] __initdata = {
+	&kfr2r09_nor_flash_device,
+	&kfr2r09_sh_keysc_device,
+};
+
+#define BSC_CS0BCR 0xfec10004
+#define BSC_CS0WCR 0xfec10024
+
+static int __init kfr2r09_devices_setup(void)
+{
+	/* enable SCIF1 serial port for YC401 console support */
+	gpio_request(GPIO_FN_SCIF1_RXD, NULL);
+	gpio_request(GPIO_FN_SCIF1_TXD, NULL);
+
+	/* setup NOR flash at CS0 */
+	ctrl_outl(0x36db0400, BSC_CS0BCR);
+	ctrl_outl(0x00000500, BSC_CS0WCR);
+
+	/* setup KEYSC pins */
+	gpio_request(GPIO_FN_KEYOUT0, NULL);
+	gpio_request(GPIO_FN_KEYOUT1, NULL);
+	gpio_request(GPIO_FN_KEYOUT2, NULL);
+	gpio_request(GPIO_FN_KEYOUT3, NULL);
+	gpio_request(GPIO_FN_KEYOUT4_IN6, NULL);
+	gpio_request(GPIO_FN_KEYIN0, NULL);
+	gpio_request(GPIO_FN_KEYIN1, NULL);
+	gpio_request(GPIO_FN_KEYIN2, NULL);
+	gpio_request(GPIO_FN_KEYIN3, NULL);
+	gpio_request(GPIO_FN_KEYIN4, NULL);
+	gpio_request(GPIO_FN_KEYOUT5_IN5, NULL);
+
+	return platform_add_devices(kfr2r09_devices,
+				    ARRAY_SIZE(kfr2r09_devices));
+}
+device_initcall(kfr2r09_devices_setup);
+
+/* Return the board specific boot mode pin configuration */
+static int kfr2r09_mode_pins(void)
+{
+	/* MD0=1, MD1=1, MD2=0: Clock Mode 3
+	 * MD3=0: 16-bit Area0 Bus Width
+	 * MD5=1: Little Endian
+	 * MD8=1: Test Mode Disabled
+	 */
+	return MODE_PIN0 | MODE_PIN1 | MODE_PIN5 | MODE_PIN8;
+}
+
+/*
+ * The Machine Vector
+ */
+static struct sh_machine_vector mv_kfr2r09 __initmv = {
+	.mv_name		= "kfr2r09",
+	.mv_mode_pins		= kfr2r09_mode_pins,
+};

+ 877 - 0
arch/sh/configs/kfr2r09_defconfig

@@ -0,0 +1,877 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.31-rc3
+# Thu Jul 23 17:45:09 2009
+#
+CONFIG_SUPERH=y
+CONFIG_SUPERH32=y
+# CONFIG_SUPERH64 is not set
+CONFIG_ARCH_DEFCONFIG="arch/sh/configs/shx3_defconfig"
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_BUG=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_SYS_SUPPORTS_CMT=y
+CONFIG_SYS_SUPPORTS_TMU=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_ARCH_NO_VIRT_TO_BUS=y
+CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_CLASSIC_RCU=y
+# CONFIG_TREE_RCU is not set
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_PREEMPT_RCU_TRACE is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_GROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_RT_GROUP_SCHED is not set
+CONFIG_USER_SCHED=y
+# CONFIG_CGROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EMBEDDED=y
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+# CONFIG_KALLSYMS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_HAVE_PERF_COUNTERS=y
+
+#
+# Performance Counters
+#
+# CONFIG_PERF_COUNTERS is not set
+CONFIG_VM_EVENT_COUNTERS=y
+# CONFIG_STRIP_ASM_SYMS is not set
+CONFIG_COMPAT_BRK=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+CONFIG_HAVE_IOREMAP_PROT=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_ARCH_TRACEHOOK=y
+CONFIG_HAVE_CLK=y
+CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+# CONFIG_SLOW_WORK is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+# CONFIG_MODULES is not set
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_AS is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+CONFIG_DEFAULT_NOOP=y
+CONFIG_DEFAULT_IOSCHED="noop"
+# CONFIG_FREEZER is not set
+
+#
+# System type
+#
+CONFIG_CPU_SH4=y
+CONFIG_CPU_SH4A=y
+CONFIG_CPU_SHX2=y
+CONFIG_ARCH_SHMOBILE=y
+# CONFIG_CPU_SUBTYPE_SH7619 is not set
+# CONFIG_CPU_SUBTYPE_SH7201 is not set
+# CONFIG_CPU_SUBTYPE_SH7203 is not set
+# CONFIG_CPU_SUBTYPE_SH7206 is not set
+# CONFIG_CPU_SUBTYPE_SH7263 is not set
+# CONFIG_CPU_SUBTYPE_MXG is not set
+# CONFIG_CPU_SUBTYPE_SH7705 is not set
+# CONFIG_CPU_SUBTYPE_SH7706 is not set
+# CONFIG_CPU_SUBTYPE_SH7707 is not set
+# CONFIG_CPU_SUBTYPE_SH7708 is not set
+# CONFIG_CPU_SUBTYPE_SH7709 is not set
+# CONFIG_CPU_SUBTYPE_SH7710 is not set
+# CONFIG_CPU_SUBTYPE_SH7712 is not set
+# CONFIG_CPU_SUBTYPE_SH7720 is not set
+# CONFIG_CPU_SUBTYPE_SH7721 is not set
+# CONFIG_CPU_SUBTYPE_SH7750 is not set
+# CONFIG_CPU_SUBTYPE_SH7091 is not set
+# CONFIG_CPU_SUBTYPE_SH7750R is not set
+# CONFIG_CPU_SUBTYPE_SH7750S is not set
+# CONFIG_CPU_SUBTYPE_SH7751 is not set
+# CONFIG_CPU_SUBTYPE_SH7751R is not set
+# CONFIG_CPU_SUBTYPE_SH7760 is not set
+# CONFIG_CPU_SUBTYPE_SH4_202 is not set
+# CONFIG_CPU_SUBTYPE_SH7723 is not set
+CONFIG_CPU_SUBTYPE_SH7724=y
+# CONFIG_CPU_SUBTYPE_SH7763 is not set
+# CONFIG_CPU_SUBTYPE_SH7770 is not set
+# CONFIG_CPU_SUBTYPE_SH7780 is not set
+# CONFIG_CPU_SUBTYPE_SH7785 is not set
+# CONFIG_CPU_SUBTYPE_SH7786 is not set
+# CONFIG_CPU_SUBTYPE_SHX3 is not set
+# CONFIG_CPU_SUBTYPE_SH7343 is not set
+# CONFIG_CPU_SUBTYPE_SH7722 is not set
+# CONFIG_CPU_SUBTYPE_SH7366 is not set
+
+#
+# Memory management options
+#
+CONFIG_QUICKLIST=y
+CONFIG_MMU=y
+CONFIG_PAGE_OFFSET=0x80000000
+CONFIG_FORCE_MAX_ZONEORDER=11
+CONFIG_MEMORY_START=0x08000000
+CONFIG_MEMORY_SIZE=0x08000000
+CONFIG_29BIT=y
+# CONFIG_X2TLB is not set
+CONFIG_VSYSCALL=y
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_ARCH_SPARSEMEM_DEFAULT=y
+CONFIG_MAX_ACTIVE_REGIONS=1
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_PAGE_SIZE_4KB=y
+# CONFIG_PAGE_SIZE_8KB is not set
+# CONFIG_PAGE_SIZE_16KB is not set
+# CONFIG_PAGE_SIZE_64KB is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_SPARSEMEM_STATIC=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_NR_QUICK=2
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+
+#
+# Cache configuration
+#
+CONFIG_CACHE_WRITEBACK=y
+# CONFIG_CACHE_WRITETHROUGH is not set
+# CONFIG_CACHE_OFF is not set
+
+#
+# Processor features
+#
+CONFIG_CPU_LITTLE_ENDIAN=y
+# CONFIG_CPU_BIG_ENDIAN is not set
+CONFIG_SH_FPU=y
+# CONFIG_SH_STORE_QUEUES is not set
+CONFIG_CPU_HAS_INTEVT=y
+CONFIG_CPU_HAS_SR_RB=y
+CONFIG_CPU_HAS_FPU=y
+
+#
+# Board support
+#
+# CONFIG_SH_7724_SOLUTION_ENGINE is not set
+CONFIG_SH_KFR2R09=y
+
+#
+# Timer and clock configuration
+#
+# CONFIG_SH_TIMER_TMU is not set
+CONFIG_SH_TIMER_CMT=y
+CONFIG_SH_PCLK_FREQ=33333333
+CONFIG_SH_CLK_CPG=y
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+
+#
+# CPU Frequency scaling
+#
+# CONFIG_CPU_FREQ is not set
+
+#
+# DMA support
+#
+# CONFIG_SH_DMA is not set
+
+#
+# Companion Chips
+#
+
+#
+# Additional SuperH Device Drivers
+#
+# CONFIG_HEARTBEAT is not set
+# CONFIG_PUSH_SWITCH is not set
+
+#
+# Kernel features
+#
+# CONFIG_HZ_100 is not set
+# CONFIG_HZ_250 is not set
+# CONFIG_HZ_300 is not set
+CONFIG_HZ_1000=y
+CONFIG_HZ=1000
+# CONFIG_SCHED_HRTICK is not set
+CONFIG_KEXEC=y
+# CONFIG_CRASH_DUMP is not set
+# CONFIG_SECCOMP is not set
+# CONFIG_PREEMPT_NONE is not set
+CONFIG_PREEMPT_VOLUNTARY=y
+# CONFIG_PREEMPT is not set
+CONFIG_GUSA=y
+# CONFIG_SPARSE_IRQ is not set
+
+#
+# Boot options
+#
+CONFIG_ZERO_PAGE_OFFSET=0x00001000
+CONFIG_BOOT_LINK_OFFSET=0x00800000
+CONFIG_ENTRY_OFFSET=0x00001000
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="console=ttySC1,115200"
+
+#
+# Bus options
+#
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Power management options (EXPERIMENTAL)
+#
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+# CONFIG_SUSPEND is not set
+# CONFIG_HIBERNATION is not set
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_CPU_IDLE_GOV_MENU=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+# CONFIG_WIRELESS is not set
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+CONFIG_MTD_CFI_INTELEXT=y
+# CONFIG_MTD_CFI_AMDSTD is not set
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+CONFIG_MTD_PHYSMAP=y
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_NAND is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
+# UBI - Unsorted block images
+#
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MTD_UBI_BEB_RESERVE=1
+# CONFIG_MTD_UBI_GLUEBI is not set
+
+#
+# UBI debugging options
+#
+# CONFIG_MTD_UBI_DEBUG is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_RAM is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_BLK_DEV_HD is not set
+# CONFIG_MISC_DEVICES is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+# CONFIG_NETDEVICES is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_GPIO is not set
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+CONFIG_KEYBOARD_SH_KEYSC=y
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_SH_SCI_NR_UARTS=6
+CONFIG_SERIAL_SH_SCI_CONSOLE=y
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+# CONFIG_I2C_CHARDEV is not set
+CONFIG_I2C_HELPER_AUTO=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_DESIGNWARE is not set
+# CONFIG_I2C_GPIO is not set
+# CONFIG_I2C_OCORES is not set
+CONFIG_I2C_SH_MOBILE=y
+# CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_PCF8575 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+
+#
+# Memory mapped GPIO expanders:
+#
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TWL4030_CORE is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_SOUND is not set
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+CONFIG_UIO=y
+# CONFIG_UIO_PDRV is not set
+CONFIG_UIO_PDRV_GENIRQ=y
+# CONFIG_UIO_SMX is not set
+# CONFIG_UIO_SERCOS3 is not set
+
+#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+# CONFIG_EXT2_FS is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+CONFIG_FILE_LOCKING=y
+# CONFIG_FSNOTIFY is not set
+# CONFIG_INOTIFY is not set
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+# CONFIG_MISC_FILESYSTEMS is not set
+# CONFIG_NETWORK_FILESYSTEMS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_NLS is not set
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_DEBUG_KERNEL is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_LATENCYTOP is not set
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_FTRACE_SYSCALLS=y
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_DMA_API_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_SH_STANDARD_BIOS is not set
+# CONFIG_EARLY_SCIF_CONSOLE is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_CRYPTO is not set
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+CONFIG_CRC_T10DIF=y
+CONFIG_CRC_ITU_T=y
+CONFIG_CRC32=y
+CONFIG_CRC7=y
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
+CONFIG_NLATTR=y
+CONFIG_GENERIC_ATOMIC64=y

+ 1 - 0
arch/sh/tools/mach-types

@@ -56,3 +56,4 @@ SH7785LCR		SH_SH7785LCR
 URQUELL			SH_URQUELL
 ESPT			SH_ESPT
 POLARIS			SH_POLARIS
+KFR2R09			SH_KFR2R09

+ 233 - 20
arch/x86/kernel/cpu/perf_counter.c

@@ -65,6 +65,52 @@ static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = {
 	.enabled = 1,
 };
 
+/*
+ * Not sure about some of these
+ */
+static const u64 p6_perfmon_event_map[] =
+{
+  [PERF_COUNT_HW_CPU_CYCLES]		= 0x0079,
+  [PERF_COUNT_HW_INSTRUCTIONS]		= 0x00c0,
+  [PERF_COUNT_HW_CACHE_REFERENCES]	= 0x0000,
+  [PERF_COUNT_HW_CACHE_MISSES]		= 0x0000,
+  [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= 0x00c4,
+  [PERF_COUNT_HW_BRANCH_MISSES]		= 0x00c5,
+  [PERF_COUNT_HW_BUS_CYCLES]		= 0x0062,
+};
+
+static u64 p6_pmu_event_map(int event)
+{
+	return p6_perfmon_event_map[event];
+}
+
+/*
+ * Counter setting that is specified not to count anything.
+ * We use this to effectively disable a counter.
+ *
+ * L2_RQSTS with 0 MESI unit mask.
+ */
+#define P6_NOP_COUNTER			0x0000002EULL
+
+static u64 p6_pmu_raw_event(u64 event)
+{
+#define P6_EVNTSEL_EVENT_MASK		0x000000FFULL
+#define P6_EVNTSEL_UNIT_MASK		0x0000FF00ULL
+#define P6_EVNTSEL_EDGE_MASK		0x00040000ULL
+#define P6_EVNTSEL_INV_MASK		0x00800000ULL
+#define P6_EVNTSEL_COUNTER_MASK		0xFF000000ULL
+
+#define P6_EVNTSEL_MASK			\
+	(P6_EVNTSEL_EVENT_MASK |	\
+	 P6_EVNTSEL_UNIT_MASK  |	\
+	 P6_EVNTSEL_EDGE_MASK  |	\
+	 P6_EVNTSEL_INV_MASK   |	\
+	 P6_EVNTSEL_COUNTER_MASK)
+
+	return event & P6_EVNTSEL_MASK;
+}
+
+
 /*
  * Intel PerfMon v3. Used on Core2 and later.
  */
@@ -666,6 +712,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
 {
 	struct perf_counter_attr *attr = &counter->attr;
 	struct hw_perf_counter *hwc = &counter->hw;
+	u64 config;
 	int err;
 
 	if (!x86_pmu_initialized())
@@ -718,14 +765,40 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
 
 	if (attr->config >= x86_pmu.max_events)
 		return -EINVAL;
+
 	/*
 	 * The generic map:
 	 */
-	hwc->config |= x86_pmu.event_map(attr->config);
+	config = x86_pmu.event_map(attr->config);
+
+	if (config == 0)
+		return -ENOENT;
+
+	if (config == -1LL)
+		return -EINVAL;
+
+	hwc->config |= config;
 
 	return 0;
 }
 
+static void p6_pmu_disable_all(void)
+{
+	struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+	u64 val;
+
+	if (!cpuc->enabled)
+		return;
+
+	cpuc->enabled = 0;
+	barrier();
+
+	/* p6 only has one enable register */
+	rdmsrl(MSR_P6_EVNTSEL0, val);
+	val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
+	wrmsrl(MSR_P6_EVNTSEL0, val);
+}
+
 static void intel_pmu_disable_all(void)
 {
 	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
@@ -767,6 +840,23 @@ void hw_perf_disable(void)
 	return x86_pmu.disable_all();
 }
 
+static void p6_pmu_enable_all(void)
+{
+	struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+	unsigned long val;
+
+	if (cpuc->enabled)
+		return;
+
+	cpuc->enabled = 1;
+	barrier();
+
+	/* p6 only has one enable register */
+	rdmsrl(MSR_P6_EVNTSEL0, val);
+	val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+	wrmsrl(MSR_P6_EVNTSEL0, val);
+}
+
 static void intel_pmu_enable_all(void)
 {
 	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
@@ -784,13 +874,13 @@ static void amd_pmu_enable_all(void)
 	barrier();
 
 	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+		struct perf_counter *counter = cpuc->counters[idx];
 		u64 val;
 
 		if (!test_bit(idx, cpuc->active_mask))
 			continue;
-		rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
-		if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
-			continue;
+
+		val = counter->hw.config;
 		val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
 		wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
 	}
@@ -819,16 +909,13 @@ static inline void intel_pmu_ack_status(u64 ack)
 
 static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
 {
-	int err;
-	err = checking_wrmsrl(hwc->config_base + idx,
+	(void)checking_wrmsrl(hwc->config_base + idx,
 			      hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
 }
 
 static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
 {
-	int err;
-	err = checking_wrmsrl(hwc->config_base + idx,
-			      hwc->config);
+	(void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
 }
 
 static inline void
@@ -836,13 +923,24 @@ intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx)
 {
 	int idx = __idx - X86_PMC_IDX_FIXED;
 	u64 ctrl_val, mask;
-	int err;
 
 	mask = 0xfULL << (idx * 4);
 
 	rdmsrl(hwc->config_base, ctrl_val);
 	ctrl_val &= ~mask;
-	err = checking_wrmsrl(hwc->config_base, ctrl_val);
+	(void)checking_wrmsrl(hwc->config_base, ctrl_val);
+}
+
+static inline void
+p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
+{
+	struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+	u64 val = P6_NOP_COUNTER;
+
+	if (cpuc->enabled)
+		val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+
+	(void)checking_wrmsrl(hwc->config_base + idx, val);
 }
 
 static inline void
@@ -943,6 +1041,19 @@ intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx)
 	err = checking_wrmsrl(hwc->config_base, ctrl_val);
 }
 
+static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
+{
+	struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+	u64 val;
+
+	val = hwc->config;
+	if (cpuc->enabled)
+		val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+
+	(void)checking_wrmsrl(hwc->config_base + idx, val);
+}
+
+
 static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
 {
 	if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
@@ -959,8 +1070,6 @@ static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
 
 	if (cpuc->enabled)
 		x86_pmu_enable_counter(hwc, idx);
-	else
-		x86_pmu_disable_counter(hwc, idx);
 }
 
 static int
@@ -1176,6 +1285,49 @@ static void intel_pmu_reset(void)
 	local_irq_restore(flags);
 }
 
+static int p6_pmu_handle_irq(struct pt_regs *regs)
+{
+	struct perf_sample_data data;
+	struct cpu_hw_counters *cpuc;
+	struct perf_counter *counter;
+	struct hw_perf_counter *hwc;
+	int idx, handled = 0;
+	u64 val;
+
+	data.regs = regs;
+	data.addr = 0;
+
+	cpuc = &__get_cpu_var(cpu_hw_counters);
+
+	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+		if (!test_bit(idx, cpuc->active_mask))
+			continue;
+
+		counter = cpuc->counters[idx];
+		hwc = &counter->hw;
+
+		val = x86_perf_counter_update(counter, hwc, idx);
+		if (val & (1ULL << (x86_pmu.counter_bits - 1)))
+			continue;
+
+		/*
+		 * counter overflow
+		 */
+		handled		= 1;
+		data.period	= counter->hw.last_period;
+
+		if (!x86_perf_counter_set_period(counter, hwc, idx))
+			continue;
+
+		if (perf_counter_overflow(counter, 1, &data))
+			p6_pmu_disable_counter(hwc, idx);
+	}
+
+	if (handled)
+		inc_irq_stat(apic_perf_irqs);
+
+	return handled;
+}
 
 /*
  * This handler is triggered by the local APIC, so the APIC IRQ handling
@@ -1185,14 +1337,13 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
 {
 	struct perf_sample_data data;
 	struct cpu_hw_counters *cpuc;
-	int bit, cpu, loops;
+	int bit, loops;
 	u64 ack, status;
 
 	data.regs = regs;
 	data.addr = 0;
 
-	cpu = smp_processor_id();
-	cpuc = &per_cpu(cpu_hw_counters, cpu);
+	cpuc = &__get_cpu_var(cpu_hw_counters);
 
 	perf_disable();
 	status = intel_pmu_get_status();
@@ -1249,14 +1400,13 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
 	struct cpu_hw_counters *cpuc;
 	struct perf_counter *counter;
 	struct hw_perf_counter *hwc;
-	int cpu, idx, handled = 0;
+	int idx, handled = 0;
 	u64 val;
 
 	data.regs = regs;
 	data.addr = 0;
 
-	cpu = smp_processor_id();
-	cpuc = &per_cpu(cpu_hw_counters, cpu);
+	cpuc = &__get_cpu_var(cpu_hw_counters);
 
 	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
 		if (!test_bit(idx, cpuc->active_mask))
@@ -1353,6 +1503,32 @@ static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
 	.priority		= 1
 };
 
+static struct x86_pmu p6_pmu = {
+	.name			= "p6",
+	.handle_irq		= p6_pmu_handle_irq,
+	.disable_all		= p6_pmu_disable_all,
+	.enable_all		= p6_pmu_enable_all,
+	.enable			= p6_pmu_enable_counter,
+	.disable		= p6_pmu_disable_counter,
+	.eventsel		= MSR_P6_EVNTSEL0,
+	.perfctr		= MSR_P6_PERFCTR0,
+	.event_map		= p6_pmu_event_map,
+	.raw_event		= p6_pmu_raw_event,
+	.max_events		= ARRAY_SIZE(p6_perfmon_event_map),
+	.max_period		= (1ULL << 31) - 1,
+	.version		= 0,
+	.num_counters		= 2,
+	/*
+	 * Counters have 40 bits implemented. However they are designed such
+	 * that bits [32-39] are sign extensions of bit 31. As such the
+	 * effective width of a counter for P6-like PMU is 32 bits only.
+	 *
+	 * See IA-32 Intel Architecture Software developer manual Vol 3B
+	 */
+	.counter_bits		= 32,
+	.counter_mask		= (1ULL << 32) - 1,
+};
+
 static struct x86_pmu intel_pmu = {
 	.name			= "Intel",
 	.handle_irq		= intel_pmu_handle_irq,
@@ -1392,6 +1568,37 @@ static struct x86_pmu amd_pmu = {
 	.max_period		= (1ULL << 47) - 1,
 };
 
+static int p6_pmu_init(void)
+{
+	switch (boot_cpu_data.x86_model) {
+	case 1:
+	case 3:  /* Pentium Pro */
+	case 5:
+	case 6:  /* Pentium II */
+	case 7:
+	case 8:
+	case 11: /* Pentium III */
+		break;
+	case 9:
+	case 13:
+		/* Pentium M */
+		break;
+	default:
+		pr_cont("unsupported p6 CPU model %d ",
+			boot_cpu_data.x86_model);
+		return -ENODEV;
+	}
+
+	if (!cpu_has_apic) {
+		pr_info("no Local APIC, try rebooting with lapic");
+		return -ENODEV;
+	}
+
+	x86_pmu				= p6_pmu;
+
+	return 0;
+}
+
 static int intel_pmu_init(void)
 {
 	union cpuid10_edx edx;
@@ -1400,8 +1607,14 @@ static int intel_pmu_init(void)
 	unsigned int ebx;
 	int version;
 
-	if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
+	if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
+		/* check for P6 processor family */
+	   if (boot_cpu_data.x86 == 6) {
+		return p6_pmu_init();
+	   } else {
 		return -ENODEV;
+	   }
+	}
 
 	/*
 	 * Check whether the Architectural PerfMon supports

+ 6 - 5
block/blk-sysfs.c

@@ -16,9 +16,9 @@ struct queue_sysfs_entry {
 };
 
 static ssize_t
-queue_var_show(unsigned int var, char *page)
+queue_var_show(unsigned long var, char *page)
 {
-	return sprintf(page, "%d\n", var);
+	return sprintf(page, "%lu\n", var);
 }
 
 static ssize_t
@@ -77,7 +77,8 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
 
 static ssize_t queue_ra_show(struct request_queue *q, char *page)
 {
-	int ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
+	unsigned long ra_kb = q->backing_dev_info.ra_pages <<
+					(PAGE_CACHE_SHIFT - 10);
 
 	return queue_var_show(ra_kb, (page));
 }
@@ -189,9 +190,9 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
 
 static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
 {
-	unsigned int set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
+	bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
 
-	return queue_var_show(set != 0, page);
+	return queue_var_show(set, page);
 }
 
 static ssize_t

+ 9 - 4
block/elevator.c

@@ -101,11 +101,16 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
 		return 0;
 
 	/*
-	 * Don't merge if failfast settings don't match
+	 * Don't merge if failfast settings don't match.
+	 *
+	 * FIXME: The negation in front of each condition is necessary
+	 * because bio and request flags use different bit positions
+	 * and the accessors return those bits directly.  This
+	 * ugliness will soon go away.
 	 */
-	if (bio_failfast_dev(bio)	!= blk_failfast_dev(rq)		||
-	    bio_failfast_transport(bio)	!= blk_failfast_transport(rq)	||
-	    bio_failfast_driver(bio)	!= blk_failfast_driver(rq))
+	if (!bio_failfast_dev(bio)	 != !blk_failfast_dev(rq)	||
+	    !bio_failfast_transport(bio) != !blk_failfast_transport(rq)	||
+	    !bio_failfast_driver(bio)	 != !blk_failfast_driver(rq))
 		return 0;
 
 	if (!elv_iosched_allow_merge(rq, bio))

+ 1 - 1
drivers/block/ataflop.c

@@ -1627,7 +1627,7 @@ static int fd_ioctl(struct block_device *bdev, fmode_t mode,
 				drive, dtp->blocks, dtp->spt, dtp->stretch);
 
 		/* sanity check */
-		if (!dtp || setprm.track != dtp->blocks/dtp->spt/2 ||
+		if (setprm.track != dtp->blocks/dtp->spt/2 ||
 		    setprm.head != 2) {
 			redo_fd_request();
 			return -EINVAL;

+ 6 - 1
drivers/block/virtio_blk.c

@@ -427,7 +427,12 @@ static unsigned int features[] = {
 	VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_IDENTIFY
 };
 
-static struct virtio_driver virtio_blk = {
+/*
+ * virtio_blk causes spurious section mismatch warning by
+ * simultaneously referring to a __devinit and a __devexit function.
+ * Use __refdata to avoid this warning.
+ */
+static struct virtio_driver __refdata virtio_blk = {
 	.feature_table = features,
 	.feature_table_size = ARRAY_SIZE(features),
 	.driver.name =	KBUILD_MODNAME,

+ 1 - 1
drivers/block/z2ram.c

@@ -374,7 +374,7 @@ err:
 static void __exit z2_exit(void)
 {
     int i, j;
-    blk_unregister_region(MKDEV(Z2RAM_MAJOR, 0), 256);
+    blk_unregister_region(MKDEV(Z2RAM_MAJOR, 0), Z2MINOR_COUNT);
     unregister_blkdev(Z2RAM_MAJOR, DEVICE_NAME);
     del_gendisk(z2ram_gendisk);
     put_disk(z2ram_gendisk);

+ 1 - 1
drivers/connector/cn_queue.c

@@ -1,7 +1,7 @@
 /*
  * 	cn_queue.c
  *
- * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
+ * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify

+ 2 - 2
drivers/connector/connector.c

@@ -1,7 +1,7 @@
 /*
  * 	connector.c
  *
- * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
+ * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
@@ -33,7 +33,7 @@
 #include <net/sock.h>
 
 MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>");
+MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
 MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector.");
 
 static u32 cn_idx = CN_IDX_CONNECTOR;

+ 4 - 2
drivers/hid/hid-core.c

@@ -1075,14 +1075,16 @@ EXPORT_SYMBOL_GPL(hid_report_raw_event);
  */
 int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int interrupt)
 {
-	struct hid_report_enum *report_enum = hid->report_enum + type;
-	struct hid_driver *hdrv = hid->driver;
+	struct hid_report_enum *report_enum;
+	struct hid_driver *hdrv;
 	struct hid_report *report;
 	unsigned int i;
 	int ret;
 
 	if (!hid || !hid->driver)
 		return -ENODEV;
+	report_enum = hid->report_enum + type;
+	hdrv = hid->driver;
 
 	if (!size) {
 		dbg_hid("empty report\n");

+ 3 - 1
drivers/hid/usbhid/hiddev.c

@@ -527,8 +527,10 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
 			goto goodreturn;
 
 		case HIDIOCGCOLLECTIONINDEX:
+			i = field->usage[uref->usage_index].collection_index;
+			unlock_kernel();
 			kfree(uref_multi);
-			return field->usage[uref->usage_index].collection_index;
+			return i;
 		case HIDIOCGUSAGES:
 			for (i = 0; i < uref_multi->num_values; i++)
 				uref_multi->values[i] =

+ 1 - 0
drivers/ide/ide-disk.c

@@ -455,6 +455,7 @@ static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
 
 	rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
 	rq->special = cmd;
+	cmd->rq = rq;
 }
 
 ide_devset_get(multcount, mult_count);

+ 1 - 0
drivers/ide/ide-tape.c

@@ -1064,6 +1064,7 @@ static int idetape_blkdev_ioctl(ide_drive_t *drive, unsigned int cmd,
 		tape->best_dsc_rw_freq = config.dsc_rw_frequency;
 		break;
 	case 0x0350:
+		memset(&config, 0, sizeof(config));
 		config.dsc_rw_frequency = (int) tape->best_dsc_rw_freq;
 		config.nr_stages = 1;
 		if (copy_to_user(argp, &config, sizeof(config)))

+ 1 - 2
drivers/input/evdev.c

@@ -608,8 +608,7 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
 						    p, compat_mode);
 
 			if (_IOC_NR(cmd) == _IOC_NR(EVIOCGNAME(0)))
-				return str_to_user(dev_name(&evdev->dev),
-						   _IOC_SIZE(cmd), p);
+				return str_to_user(dev->name, _IOC_SIZE(cmd), p);
 
 			if (_IOC_NR(cmd) == _IOC_NR(EVIOCGPHYS(0)))
 				return str_to_user(dev->phys, _IOC_SIZE(cmd), p);

+ 1 - 1
drivers/input/joydev.c

@@ -536,7 +536,7 @@ static int joydev_ioctl_common(struct joydev *joydev,
 	default:
 		if ((cmd & ~IOCSIZE_MASK) == JSIOCGNAME(0)) {
 			int len;
-			const char *name = dev_name(&dev->dev);
+			const char *name = dev->name;
 
 			if (!name)
 				return 0;

+ 13 - 13
drivers/input/joystick/xpad.c

@@ -470,20 +470,20 @@ static void xpad_irq_out(struct urb *urb)
 	status = urb->status;
 
 	switch (status) {
-		case 0:
+	case 0:
 		/* success */
-		break;
-		case -ECONNRESET:
-		case -ENOENT:
-		case -ESHUTDOWN:
-			/* this urb is terminated, clean up */
-			dbg("%s - urb shutting down with status: %d",
-				__func__, status);
-			return;
-		default:
-			dbg("%s - nonzero urb status received: %d",
-				__func__, status);
-			goto exit;
+		return;
+
+	case -ECONNRESET:
+	case -ENOENT:
+	case -ESHUTDOWN:
+		/* this urb is terminated, clean up */
+		dbg("%s - urb shutting down with status: %d", __func__, status);
+		return;
+
+	default:
+		dbg("%s - nonzero urb status received: %d", __func__, status);
+		goto exit;
 	}
 
 exit:

+ 32 - 0
drivers/input/keyboard/atkbd.c

@@ -894,6 +894,13 @@ static unsigned int atkbd_amilo_pa1510_forced_release_keys[] = {
 	0xb0, 0xae, -1U
 };
 
+/*
+ * Amilo Pi 3525 key release for Fn+Volume keys not working
+ */
+static unsigned int atkbd_amilo_pi3525_forced_release_keys[] = {
+	0x20, 0xa0, 0x2e, 0xae, 0x30, 0xb0, -1U
+};
+
 /*
  * Amilo Xi 3650 key release for light touch bar not working
  */
@@ -901,6 +908,13 @@ static unsigned int atkbd_amilo_xi3650_forced_release_keys[] = {
 	0x67, 0xed, 0x90, 0xa2, 0x99, 0xa4, 0xae, 0xb0, -1U
 };
 
+/*
+ * Soltech TA12 system with broken key release on volume keys and mute key
+ */
+static unsigned int atkdb_soltech_ta12_forced_release_keys[] = {
+	0xa0, 0xae, 0xb0, -1U
+};
+
 /*
  * atkbd_set_keycode_table() initializes keyboard's keycode table
  * according to the selected scancode set
@@ -1567,6 +1581,15 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
 		.callback = atkbd_setup_forced_release,
 		.driver_data = atkbd_amilo_pa1510_forced_release_keys,
 	},
+	{
+		.ident = "Fujitsu Amilo Pi 3525",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pi 3525"),
+		},
+		.callback = atkbd_setup_forced_release,
+		.driver_data = atkbd_amilo_pi3525_forced_release_keys,
+	},
 	{
 		.ident = "Fujitsu Amilo Xi 3650",
 		.matches = {
@@ -1576,6 +1599,15 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
 		.callback = atkbd_setup_forced_release,
 		.driver_data = atkbd_amilo_xi3650_forced_release_keys,
 	},
+	{
+		.ident = "Soltech Corporation TA12",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Soltech Corporation"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "TA12"),
+		},
+		.callback = atkbd_setup_forced_release,
+		.driver_data = atkdb_soltech_ta12_forced_release_keys,
+	},
 	{ }
 };
 

+ 6 - 2
drivers/input/misc/pcspkr.c

@@ -114,7 +114,7 @@ static int __devexit pcspkr_remove(struct platform_device *dev)
 	return 0;
 }
 
-static int pcspkr_suspend(struct platform_device *dev, pm_message_t state)
+static int pcspkr_suspend(struct device *dev)
 {
 	pcspkr_event(NULL, EV_SND, SND_BELL, 0);
 
@@ -127,14 +127,18 @@ static void pcspkr_shutdown(struct platform_device *dev)
 	pcspkr_event(NULL, EV_SND, SND_BELL, 0);
 }
 
+static struct dev_pm_ops pcspkr_pm_ops = {
+	.suspend = pcspkr_suspend,
+};
+
 static struct platform_driver pcspkr_platform_driver = {
 	.driver		= {
 		.name	= "pcspkr",
 		.owner	= THIS_MODULE,
+		.pm	= &pcspkr_pm_ops,
 	},
 	.probe		= pcspkr_probe,
 	.remove		= __devexit_p(pcspkr_remove),
-	.suspend	= pcspkr_suspend,
 	.shutdown	= pcspkr_shutdown,
 };
 

+ 9 - 0
drivers/input/misc/wistron_btns.c

@@ -644,6 +644,15 @@ static struct dmi_system_id dmi_ids[] __initdata = {
 		},
 		.driver_data = keymap_fs_amilo_pro_v2000
 	},
+	{
+		.callback = dmi_matched,
+		.ident = "Maxdata Pro 7000 DX",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "MAXDATA"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Pro 7000"),
+		},
+		.driver_data = keymap_fs_amilo_pro_v2000
+	},
 	{
 		.callback = dmi_matched,
 		.ident = "Fujitsu N3510",

+ 1 - 0
drivers/mtd/ubi/gluebi.c

@@ -332,6 +332,7 @@ static int gluebi_create(struct ubi_device_info *di,
 	}
 
 	gluebi->vol_id = vi->vol_id;
+	gluebi->ubi_num = vi->ubi_num;
 	mtd->type = MTD_UBIVOLUME;
 	if (!di->ro_mode)
 		mtd->flags = MTD_WRITEABLE;

+ 1 - 1
drivers/mtd/ubi/scan.c

@@ -781,7 +781,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
 			return -EINVAL;
 		}
 
-		image_seq = be32_to_cpu(ech->ec);
+		image_seq = be32_to_cpu(ech->image_seq);
 		if (!si->image_seq_set) {
 			ubi->image_seq = image_seq;
 			si->image_seq_set = 1;

+ 6 - 0
drivers/net/Kconfig

@@ -1729,6 +1729,12 @@ config KS8842
 	help
 	  This platform driver is for Micrel KSZ8842 chip.
 
+config KS8851
+       tristate "Micrel KS8851 SPI"
+       depends on SPI
+       help
+         SPI driver for Micrel KS8851 SPI attached network chip.
+
 config VIA_RHINE
 	tristate "VIA Rhine support"
 	depends on NET_PCI && PCI

+ 1 - 0
drivers/net/Makefile

@@ -88,6 +88,7 @@ obj-$(CONFIG_SKGE) += skge.o
 obj-$(CONFIG_SKY2) += sky2.o
 obj-$(CONFIG_SKFP) += skfp/
 obj-$(CONFIG_KS8842)	+= ks8842.o
+obj-$(CONFIG_KS8851)	+= ks8851.o
 obj-$(CONFIG_VIA_RHINE) += via-rhine.o
 obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
 obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o

+ 8 - 0
drivers/net/arm/Kconfig

@@ -63,3 +63,11 @@ config IXP4XX_ETH
 	help
 	  Say Y here if you want to use built-in Ethernet ports
 	  on IXP4xx processor.
+
+config W90P910_ETH
+	tristate "Nuvoton w90p910 Ethernet support"
+	depends on ARM && ARCH_W90X900
+	select PHYLIB
+	help
+	  Say Y here if you want to use built-in Ethernet ports
+	  on w90p910 processor.

+ 1 - 0
drivers/net/arm/Makefile

@@ -11,3 +11,4 @@ obj-$(CONFIG_ARM_AT91_ETHER)	+= at91_ether.o
 obj-$(CONFIG_ARM_KS8695_ETHER)	+= ks8695net.o
 obj-$(CONFIG_EP93XX_ETH)	+= ep93xx_eth.o
 obj-$(CONFIG_IXP4XX_ETH)	+= ixp4xx_eth.o
+obj-$(CONFIG_W90P910_ETH)	+= w90p910_ether.o

+ 1105 - 0
drivers/net/arm/w90p910_ether.c

@@ -0,0 +1,1105 @@
+/*
+ * Copyright (c) 2008-2009 Nuvoton technology corporation.
+ *
+ * Wan ZongShun <mcuos.com@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation;version 2 of the License.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mii.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+#define DRV_MODULE_NAME		"w90p910-emc"
+#define DRV_MODULE_VERSION	"0.1"
+
+/* Ethernet MAC Registers */
+#define REG_CAMCMR		0x00
+#define REG_CAMEN		0x04
+#define REG_CAMM_BASE		0x08
+#define REG_CAML_BASE		0x0c
+#define REG_TXDLSA		0x88
+#define REG_RXDLSA		0x8C
+#define REG_MCMDR		0x90
+#define REG_MIID		0x94
+#define REG_MIIDA		0x98
+#define REG_FFTCR		0x9C
+#define REG_TSDR		0xa0
+#define REG_RSDR		0xa4
+#define REG_DMARFC		0xa8
+#define REG_MIEN		0xac
+#define REG_MISTA		0xb0
+#define REG_CTXDSA		0xcc
+#define REG_CTXBSA		0xd0
+#define REG_CRXDSA		0xd4
+#define REG_CRXBSA		0xd8
+
+/* mac controller bit */
+#define MCMDR_RXON		0x01
+#define MCMDR_ACP		(0x01 << 3)
+#define MCMDR_SPCRC		(0x01 << 5)
+#define MCMDR_TXON		(0x01 << 8)
+#define MCMDR_FDUP		(0x01 << 18)
+#define MCMDR_ENMDC		(0x01 << 19)
+#define MCMDR_OPMOD		(0x01 << 20)
+#define SWR			(0x01 << 24)
+
+/* cam command regiser */
+#define CAMCMR_AUP		0x01
+#define CAMCMR_AMP		(0x01 << 1)
+#define CAMCMR_ABP		(0x01 << 2)
+#define CAMCMR_CCAM		(0x01 << 3)
+#define CAMCMR_ECMP		(0x01 << 4)
+#define CAM0EN			0x01
+
+/* mac mii controller bit */
+#define MDCCR			(0x0a << 20)
+#define PHYAD			(0x01 << 8)
+#define PHYWR			(0x01 << 16)
+#define PHYBUSY			(0x01 << 17)
+#define PHYPRESP		(0x01 << 18)
+#define CAM_ENTRY_SIZE		0x08
+
+/* rx and tx status */
+#define TXDS_TXCP		(0x01 << 19)
+#define RXDS_CRCE		(0x01 << 17)
+#define RXDS_PTLE		(0x01 << 19)
+#define RXDS_RXGD		(0x01 << 20)
+#define RXDS_ALIE		(0x01 << 21)
+#define RXDS_RP			(0x01 << 22)
+
+/* mac interrupt status*/
+#define MISTA_EXDEF		(0x01 << 19)
+#define MISTA_TXBERR		(0x01 << 24)
+#define MISTA_TDU		(0x01 << 23)
+#define MISTA_RDU		(0x01 << 10)
+#define MISTA_RXBERR		(0x01 << 11)
+
+#define ENSTART			0x01
+#define ENRXINTR		0x01
+#define ENRXGD			(0x01 << 4)
+#define ENRXBERR		(0x01 << 11)
+#define ENTXINTR		(0x01 << 16)
+#define ENTXCP			(0x01 << 18)
+#define ENTXABT			(0x01 << 21)
+#define ENTXBERR		(0x01 << 24)
+#define ENMDC			(0x01 << 19)
+#define PHYBUSY			(0x01 << 17)
+#define MDCCR_VAL		0xa00000
+
+/* rx and tx owner bit */
+#define RX_OWEN_DMA		(0x01 << 31)
+#define RX_OWEN_CPU		(~(0x03 << 30))
+#define TX_OWEN_DMA		(0x01 << 31)
+#define TX_OWEN_CPU		(~(0x01 << 31))
+
+/* tx frame desc controller bit */
+#define MACTXINTEN		0x04
+#define CRCMODE			0x02
+#define PADDINGMODE		0x01
+
+/* fftcr controller bit */
+#define TXTHD 			(0x03 << 8)
+#define BLENGTH			(0x01 << 20)
+
+/* global setting for driver */
+#define RX_DESC_SIZE		50
+#define TX_DESC_SIZE		10
+#define MAX_RBUFF_SZ		0x600
+#define MAX_TBUFF_SZ		0x600
+#define TX_TIMEOUT		50
+#define DELAY			1000
+#define CAM0			0x0
+
+static int w90p910_mdio_read(struct net_device *dev, int phy_id, int reg);
+
+struct w90p910_rxbd {
+	unsigned int sl;
+	unsigned int buffer;
+	unsigned int reserved;
+	unsigned int next;
+};
+
+struct w90p910_txbd {
+	unsigned int mode;
+	unsigned int buffer;
+	unsigned int sl;
+	unsigned int next;
+};
+
+struct recv_pdesc {
+	struct w90p910_rxbd desclist[RX_DESC_SIZE];
+	char recv_buf[RX_DESC_SIZE][MAX_RBUFF_SZ];
+};
+
+struct tran_pdesc {
+	struct w90p910_txbd desclist[TX_DESC_SIZE];
+	char tran_buf[RX_DESC_SIZE][MAX_TBUFF_SZ];
+};
+
+struct  w90p910_ether {
+	struct recv_pdesc *rdesc;
+	struct recv_pdesc *rdesc_phys;
+	struct tran_pdesc *tdesc;
+	struct tran_pdesc *tdesc_phys;
+	struct net_device_stats stats;
+	struct platform_device *pdev;
+	struct sk_buff *skb;
+	struct clk *clk;
+	struct clk *rmiiclk;
+	struct mii_if_info mii;
+	struct timer_list check_timer;
+	void __iomem *reg;
+	unsigned int rxirq;
+	unsigned int txirq;
+	unsigned int cur_tx;
+	unsigned int cur_rx;
+	unsigned int finish_tx;
+	unsigned int rx_packets;
+	unsigned int rx_bytes;
+	unsigned int start_tx_ptr;
+	unsigned int start_rx_ptr;
+	unsigned int linkflag;
+	spinlock_t lock;
+};
+
+static void update_linkspeed_register(struct net_device *dev,
+				unsigned int speed, unsigned int duplex)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	unsigned int val;
+
+	val = __raw_readl(ether->reg + REG_MCMDR);
+
+	if (speed == SPEED_100) {
+		/* 100 full/half duplex */
+		if (duplex == DUPLEX_FULL) {
+			val |= (MCMDR_OPMOD | MCMDR_FDUP);
+		} else {
+			val |= MCMDR_OPMOD;
+			val &= ~MCMDR_FDUP;
+		}
+	} else {
+		/* 10 full/half duplex */
+		if (duplex == DUPLEX_FULL) {
+			val |= MCMDR_FDUP;
+			val &= ~MCMDR_OPMOD;
+		} else {
+			val &= ~(MCMDR_FDUP | MCMDR_OPMOD);
+		}
+	}
+
+	__raw_writel(val, ether->reg + REG_MCMDR);
+}
+
+static void update_linkspeed(struct net_device *dev)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	struct platform_device *pdev;
+	unsigned int bmsr, bmcr, lpa, speed, duplex;
+
+	pdev = ether->pdev;
+
+	if (!mii_link_ok(&ether->mii)) {
+		ether->linkflag = 0x0;
+		netif_carrier_off(dev);
+		dev_warn(&pdev->dev, "%s: Link down.\n", dev->name);
+		return;
+	}
+
+	if (ether->linkflag == 1)
+		return;
+
+	bmsr = w90p910_mdio_read(dev, ether->mii.phy_id, MII_BMSR);
+	bmcr = w90p910_mdio_read(dev, ether->mii.phy_id, MII_BMCR);
+
+	if (bmcr & BMCR_ANENABLE) {
+		if (!(bmsr & BMSR_ANEGCOMPLETE))
+			return;
+
+		lpa = w90p910_mdio_read(dev, ether->mii.phy_id, MII_LPA);
+
+		if ((lpa & LPA_100FULL) || (lpa & LPA_100HALF))
+			speed = SPEED_100;
+		else
+			speed = SPEED_10;
+
+		if ((lpa & LPA_100FULL) || (lpa & LPA_10FULL))
+			duplex = DUPLEX_FULL;
+		else
+			duplex = DUPLEX_HALF;
+
+	} else {
+		speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
+		duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+	}
+
+	update_linkspeed_register(dev, speed, duplex);
+
+	dev_info(&pdev->dev, "%s: Link now %i-%s\n", dev->name, speed,
+			(duplex == DUPLEX_FULL) ? "FullDuplex" : "HalfDuplex");
+	ether->linkflag = 0x01;
+
+	netif_carrier_on(dev);
+}
+
+static void w90p910_check_link(unsigned long dev_id)
+{
+	struct net_device *dev = (struct net_device *) dev_id;
+	struct w90p910_ether *ether = netdev_priv(dev);
+
+	update_linkspeed(dev);
+	mod_timer(&ether->check_timer, jiffies + msecs_to_jiffies(1000));
+}
+
+static void w90p910_write_cam(struct net_device *dev,
+				unsigned int x, unsigned char *pval)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	unsigned int msw, lsw;
+
+	msw = (pval[0] << 24) | (pval[1] << 16) | (pval[2] << 8) | pval[3];
+
+	lsw = (pval[4] << 24) | (pval[5] << 16);
+
+	__raw_writel(lsw, ether->reg + REG_CAML_BASE + x * CAM_ENTRY_SIZE);
+	__raw_writel(msw, ether->reg + REG_CAMM_BASE + x * CAM_ENTRY_SIZE);
+}
+
+static void w90p910_init_desc(struct net_device *dev)
+{
+	struct w90p910_ether *ether;
+	struct w90p910_txbd  *tdesc, *tdesc_phys;
+	struct w90p910_rxbd  *rdesc, *rdesc_phys;
+	unsigned int i, j;
+
+	ether = netdev_priv(dev);
+
+	ether->tdesc = (struct tran_pdesc *)
+			dma_alloc_coherent(NULL, sizeof(struct tran_pdesc),
+				(dma_addr_t *) &ether->tdesc_phys, GFP_KERNEL);
+
+	ether->rdesc = (struct recv_pdesc *)
+			dma_alloc_coherent(NULL, sizeof(struct recv_pdesc),
+				(dma_addr_t *) &ether->rdesc_phys, GFP_KERNEL);
+
+	for (i = 0; i < TX_DESC_SIZE; i++) {
+		tdesc = &(ether->tdesc->desclist[i]);
+
+		j = ((i + 1) / TX_DESC_SIZE);
+
+		if (j != 0) {
+			tdesc_phys = &(ether->tdesc_phys->desclist[0]);
+			ether->start_tx_ptr = (unsigned int)tdesc_phys;
+			tdesc->next = (unsigned int)ether->start_tx_ptr;
+		} else {
+			tdesc_phys = &(ether->tdesc_phys->desclist[i+1]);
+			tdesc->next = (unsigned int)tdesc_phys;
+		}
+
+		tdesc->buffer = (unsigned int)ether->tdesc_phys->tran_buf[i];
+		tdesc->sl = 0;
+		tdesc->mode = 0;
+	}
+
+	for (i = 0; i < RX_DESC_SIZE; i++) {
+		rdesc = &(ether->rdesc->desclist[i]);
+
+		j = ((i + 1) / RX_DESC_SIZE);
+
+		if (j != 0) {
+			rdesc_phys = &(ether->rdesc_phys->desclist[0]);
+			ether->start_rx_ptr = (unsigned int)rdesc_phys;
+			rdesc->next = (unsigned int)ether->start_rx_ptr;
+		} else {
+			rdesc_phys = &(ether->rdesc_phys->desclist[i+1]);
+			rdesc->next = (unsigned int)rdesc_phys;
+		}
+
+		rdesc->sl = RX_OWEN_DMA;
+		rdesc->buffer = (unsigned int)ether->rdesc_phys->recv_buf[i];
+	  }
+}
+
+static void w90p910_set_fifo_threshold(struct net_device *dev)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	unsigned int val;
+
+	val = TXTHD | BLENGTH;
+	__raw_writel(val, ether->reg + REG_FFTCR);
+}
+
+static void w90p910_return_default_idle(struct net_device *dev)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	unsigned int val;
+
+	val = __raw_readl(ether->reg + REG_MCMDR);
+	val |= SWR;
+	__raw_writel(val, ether->reg + REG_MCMDR);
+}
+
+static void w90p910_trigger_rx(struct net_device *dev)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+
+	__raw_writel(ENSTART, ether->reg + REG_RSDR);
+}
+
+static void w90p910_trigger_tx(struct net_device *dev)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+
+	__raw_writel(ENSTART, ether->reg + REG_TSDR);
+}
+
+static void w90p910_enable_mac_interrupt(struct net_device *dev)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	unsigned int val;
+
+	val = ENTXINTR | ENRXINTR | ENRXGD | ENTXCP;
+	val |= ENTXBERR | ENRXBERR | ENTXABT;
+
+	__raw_writel(val, ether->reg + REG_MIEN);
+}
+
+static void w90p910_get_and_clear_int(struct net_device *dev,
+							unsigned int *val)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+
+	*val = __raw_readl(ether->reg + REG_MISTA);
+	__raw_writel(*val, ether->reg + REG_MISTA);
+}
+
+static void w90p910_set_global_maccmd(struct net_device *dev)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	unsigned int val;
+
+	val = __raw_readl(ether->reg + REG_MCMDR);
+	val |= MCMDR_SPCRC | MCMDR_ENMDC | MCMDR_ACP | ENMDC;
+	__raw_writel(val, ether->reg + REG_MCMDR);
+}
+
+static void w90p910_enable_cam(struct net_device *dev)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	unsigned int val;
+
+	w90p910_write_cam(dev, CAM0, dev->dev_addr);
+
+	val = __raw_readl(ether->reg + REG_CAMEN);
+	val |= CAM0EN;
+	__raw_writel(val, ether->reg + REG_CAMEN);
+}
+
+static void w90p910_enable_cam_command(struct net_device *dev)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	unsigned int val;
+
+	val = CAMCMR_ECMP | CAMCMR_ABP | CAMCMR_AMP;
+	__raw_writel(val, ether->reg + REG_CAMCMR);
+}
+
+static void w90p910_enable_tx(struct net_device *dev, unsigned int enable)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	unsigned int val;
+
+	val = __raw_readl(ether->reg + REG_MCMDR);
+
+	if (enable)
+		val |= MCMDR_TXON;
+	else
+		val &= ~MCMDR_TXON;
+
+	__raw_writel(val, ether->reg + REG_MCMDR);
+}
+
+static void w90p910_enable_rx(struct net_device *dev, unsigned int enable)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	unsigned int val;
+
+	val = __raw_readl(ether->reg + REG_MCMDR);
+
+	if (enable)
+		val |= MCMDR_RXON;
+	else
+		val &= ~MCMDR_RXON;
+
+	__raw_writel(val, ether->reg + REG_MCMDR);
+}
+
+static void w90p910_set_curdest(struct net_device *dev)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+
+	__raw_writel(ether->start_rx_ptr, ether->reg + REG_RXDLSA);
+	__raw_writel(ether->start_tx_ptr, ether->reg + REG_TXDLSA);
+}
+
+static void w90p910_reset_mac(struct net_device *dev)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+
+	spin_lock(&ether->lock);
+
+	w90p910_enable_tx(dev, 0);
+	w90p910_enable_rx(dev, 0);
+	w90p910_set_fifo_threshold(dev);
+	w90p910_return_default_idle(dev);
+
+	if (!netif_queue_stopped(dev))
+		netif_stop_queue(dev);
+
+	w90p910_init_desc(dev);
+
+	dev->trans_start = jiffies;
+	ether->cur_tx = 0x0;
+	ether->finish_tx = 0x0;
+	ether->cur_rx = 0x0;
+
+	w90p910_set_curdest(dev);
+	w90p910_enable_cam(dev);
+	w90p910_enable_cam_command(dev);
+	w90p910_enable_mac_interrupt(dev);
+	w90p910_enable_tx(dev, 1);
+	w90p910_enable_rx(dev, 1);
+	w90p910_trigger_tx(dev);
+	w90p910_trigger_rx(dev);
+
+	dev->trans_start = jiffies;
+
+	if (netif_queue_stopped(dev))
+		netif_wake_queue(dev);
+
+	spin_unlock(&ether->lock);
+}
+
+static void w90p910_mdio_write(struct net_device *dev,
+					int phy_id, int reg, int data)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	struct platform_device *pdev;
+	unsigned int val, i;
+
+	pdev = ether->pdev;
+
+	__raw_writel(data, ether->reg + REG_MIID);
+
+	val = (phy_id << 0x08) | reg;
+	val |= PHYBUSY | PHYWR | MDCCR_VAL;
+	__raw_writel(val, ether->reg + REG_MIIDA);
+
+	for (i = 0; i < DELAY; i++) {
+		if ((__raw_readl(ether->reg + REG_MIIDA) & PHYBUSY) == 0)
+			break;
+	}
+
+	if (i == DELAY)
+		dev_warn(&pdev->dev, "mdio write timed out\n");
+}
+
+static int w90p910_mdio_read(struct net_device *dev, int phy_id, int reg)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	struct platform_device *pdev;
+	unsigned int val, i, data;
+
+	pdev = ether->pdev;
+
+	val = (phy_id << 0x08) | reg;
+	val |= PHYBUSY | MDCCR_VAL;
+	__raw_writel(val, ether->reg + REG_MIIDA);
+
+	for (i = 0; i < DELAY; i++) {
+		if ((__raw_readl(ether->reg + REG_MIIDA) & PHYBUSY) == 0)
+			break;
+	}
+
+	if (i == DELAY) {
+		dev_warn(&pdev->dev, "mdio read timed out\n");
+		data = 0xffff;
+	} else {
+		data = __raw_readl(ether->reg + REG_MIID);
+	}
+
+	return data;
+}
+
+static int set_mac_address(struct net_device *dev, void *addr)
+{
+	struct sockaddr *address = addr;
+
+	if (!is_valid_ether_addr(address->sa_data))
+		return -EADDRNOTAVAIL;
+
+	memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
+	w90p910_write_cam(dev, CAM0, dev->dev_addr);
+
+	return 0;
+}
+
+static int w90p910_ether_close(struct net_device *dev)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+
+	dma_free_writecombine(NULL, sizeof(struct w90p910_rxbd),
+				ether->rdesc, (dma_addr_t)ether->rdesc_phys);
+	dma_free_writecombine(NULL, sizeof(struct w90p910_txbd),
+				ether->tdesc, (dma_addr_t)ether->tdesc_phys);
+
+	netif_stop_queue(dev);
+
+	del_timer_sync(&ether->check_timer);
+	clk_disable(ether->rmiiclk);
+	clk_disable(ether->clk);
+
+	free_irq(ether->txirq, dev);
+	free_irq(ether->rxirq, dev);
+
+	return 0;
+}
+
+static struct net_device_stats *w90p910_ether_stats(struct net_device *dev)
+{
+	struct w90p910_ether *ether;
+
+	ether = netdev_priv(dev);
+
+	return &ether->stats;
+}
+
+static int w90p910_send_frame(struct net_device *dev,
+					unsigned char *data, int length)
+{
+	struct w90p910_ether *ether;
+	struct w90p910_txbd *txbd;
+	struct platform_device *pdev;
+	unsigned char *buffer;
+
+	ether = netdev_priv(dev);
+	pdev = ether->pdev;
+
+	txbd = &ether->tdesc->desclist[ether->cur_tx];
+	buffer = ether->tdesc->tran_buf[ether->cur_tx];
+	if (length > 1514) {
+		dev_err(&pdev->dev, "send data %d bytes, check it\n", length);
+		length = 1514;
+	}
+
+	txbd->sl = length & 0xFFFF;
+
+	memcpy(buffer, data, length);
+
+	txbd->mode = TX_OWEN_DMA | PADDINGMODE | CRCMODE | MACTXINTEN;
+
+	w90p910_enable_tx(dev, 1);
+
+	w90p910_trigger_tx(dev);
+
+	ether->cur_tx = (ether->cur_tx+1) % TX_DESC_SIZE;
+	txbd = &ether->tdesc->desclist[ether->cur_tx];
+
+	dev->trans_start = jiffies;
+
+	if (txbd->mode & TX_OWEN_DMA)
+		netif_stop_queue(dev);
+
+	return 0;
+}
+
+static int w90p910_ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+
+	if (!(w90p910_send_frame(dev, skb->data, skb->len))) {
+		ether->skb = skb;
+		dev_kfree_skb_irq(skb);
+		return 0;
+	}
+	return -1;
+}
+
+static irqreturn_t w90p910_tx_interrupt(int irq, void *dev_id)
+{
+	struct w90p910_ether *ether;
+	struct w90p910_txbd  *txbd;
+	struct platform_device *pdev;
+	struct tran_pdesc *tran_pdesc;
+	struct net_device *dev;
+	unsigned int cur_entry, entry, status;
+
+	dev = (struct net_device *)dev_id;
+	ether = netdev_priv(dev);
+	pdev = ether->pdev;
+
+	spin_lock(&ether->lock);
+
+	w90p910_get_and_clear_int(dev, &status);
+
+	cur_entry = __raw_readl(ether->reg + REG_CTXDSA);
+
+	tran_pdesc = ether->tdesc_phys;
+	entry = (unsigned int)(&tran_pdesc->desclist[ether->finish_tx]);
+
+	while (entry != cur_entry) {
+		txbd = &ether->tdesc->desclist[ether->finish_tx];
+
+		ether->finish_tx = (ether->finish_tx + 1) % TX_DESC_SIZE;
+
+		if (txbd->sl & TXDS_TXCP) {
+			ether->stats.tx_packets++;
+			ether->stats.tx_bytes += txbd->sl & 0xFFFF;
+		} else {
+			ether->stats.tx_errors++;
+		}
+
+		txbd->sl = 0x0;
+		txbd->mode = 0x0;
+
+		if (netif_queue_stopped(dev))
+			netif_wake_queue(dev);
+
+		entry = (unsigned int)(&tran_pdesc->desclist[ether->finish_tx]);
+	}
+
+	if (status & MISTA_EXDEF) {
+		dev_err(&pdev->dev, "emc defer exceed interrupt\n");
+	} else if (status & MISTA_TXBERR) {
+			dev_err(&pdev->dev, "emc bus error interrupt\n");
+			w90p910_reset_mac(dev);
+		} else if (status & MISTA_TDU) {
+				if (netif_queue_stopped(dev))
+					netif_wake_queue(dev);
+			}
+
+	spin_unlock(&ether->lock);
+
+	return IRQ_HANDLED;
+}
+
+static void netdev_rx(struct net_device *dev)
+{
+	struct w90p910_ether *ether;
+	struct w90p910_rxbd *rxbd;
+	struct platform_device *pdev;
+	struct recv_pdesc *rdesc_phys;
+	struct sk_buff *skb;
+	unsigned char *data;
+	unsigned int length, status, val, entry;
+
+	ether = netdev_priv(dev);
+	pdev = ether->pdev;
+	rdesc_phys = ether->rdesc_phys;
+
+	rxbd = &ether->rdesc->desclist[ether->cur_rx];
+
+	do {
+		val = __raw_readl(ether->reg + REG_CRXDSA);
+		entry = (unsigned int)&rdesc_phys->desclist[ether->cur_rx];
+
+		if (val == entry)
+			break;
+
+		status = rxbd->sl;
+		length = status & 0xFFFF;
+
+		if (status & RXDS_RXGD) {
+			data = ether->rdesc->recv_buf[ether->cur_rx];
+			skb = dev_alloc_skb(length+2);
+			if (!skb) {
+				dev_err(&pdev->dev, "get skb buffer error\n");
+				ether->stats.rx_dropped++;
+				return;
+			}
+
+			skb->dev = dev;
+			skb_reserve(skb, 2);
+			skb_put(skb, length);
+			skb_copy_to_linear_data(skb, data, length);
+			skb->protocol = eth_type_trans(skb, dev);
+			ether->stats.rx_packets++;
+			ether->stats.rx_bytes += length;
+			netif_rx(skb);
+		} else {
+			ether->stats.rx_errors++;
+
+			if (status & RXDS_RP) {
+				dev_err(&pdev->dev, "rx runt err\n");
+				ether->stats.rx_length_errors++;
+			} else if (status & RXDS_CRCE) {
+					dev_err(&pdev->dev, "rx crc err\n");
+					ether->stats.rx_crc_errors++;
+				}
+
+			if (status & RXDS_ALIE) {
+				dev_err(&pdev->dev, "rx aligment err\n");
+				ether->stats.rx_frame_errors++;
+			} else if (status & RXDS_PTLE) {
+					dev_err(&pdev->dev, "rx longer err\n");
+					ether->stats.rx_over_errors++;
+				}
+			}
+
+		rxbd->sl = RX_OWEN_DMA;
+		rxbd->reserved = 0x0;
+		ether->cur_rx = (ether->cur_rx+1) % RX_DESC_SIZE;
+		rxbd = &ether->rdesc->desclist[ether->cur_rx];
+
+		dev->last_rx = jiffies;
+	} while (1);
+}
+
+static irqreturn_t w90p910_rx_interrupt(int irq, void *dev_id)
+{
+	struct net_device *dev;
+	struct w90p910_ether  *ether;
+	struct platform_device *pdev;
+	unsigned int status;
+
+	dev = (struct net_device *)dev_id;
+	ether = netdev_priv(dev);
+	pdev = ether->pdev;
+
+	spin_lock(&ether->lock);
+
+	w90p910_get_and_clear_int(dev, &status);
+
+	if (status & MISTA_RDU) {
+		netdev_rx(dev);
+
+		w90p910_trigger_rx(dev);
+
+		spin_unlock(&ether->lock);
+		return IRQ_HANDLED;
+	} else if (status & MISTA_RXBERR) {
+			dev_err(&pdev->dev, "emc rx bus error\n");
+			w90p910_reset_mac(dev);
+		}
+
+	netdev_rx(dev);
+	spin_unlock(&ether->lock);
+	return IRQ_HANDLED;
+}
+
+static int w90p910_ether_open(struct net_device *dev)
+{
+	struct w90p910_ether *ether;
+	struct platform_device *pdev;
+
+	ether = netdev_priv(dev);
+	pdev = ether->pdev;
+
+	w90p910_reset_mac(dev);
+	w90p910_set_fifo_threshold(dev);
+	w90p910_set_curdest(dev);
+	w90p910_enable_cam(dev);
+	w90p910_enable_cam_command(dev);
+	w90p910_enable_mac_interrupt(dev);
+	w90p910_set_global_maccmd(dev);
+	w90p910_enable_rx(dev, 1);
+
+	ether->rx_packets = 0x0;
+	ether->rx_bytes = 0x0;
+
+	if (request_irq(ether->txirq, w90p910_tx_interrupt,
+						0x0, pdev->name, dev)) {
+		dev_err(&pdev->dev, "register irq tx failed\n");
+		return -EAGAIN;
+	}
+
+	if (request_irq(ether->rxirq, w90p910_rx_interrupt,
+						0x0, pdev->name, dev)) {
+		dev_err(&pdev->dev, "register irq rx failed\n");
+		return -EAGAIN;
+	}
+
+	mod_timer(&ether->check_timer, jiffies + msecs_to_jiffies(1000));
+	netif_start_queue(dev);
+	w90p910_trigger_rx(dev);
+
+	dev_info(&pdev->dev, "%s is OPENED\n", dev->name);
+
+	return 0;
+}
+
+static void w90p910_ether_set_multicast_list(struct net_device *dev)
+{
+	struct w90p910_ether *ether;
+	unsigned int rx_mode;
+
+	ether = netdev_priv(dev);
+
+	if (dev->flags & IFF_PROMISC)
+		rx_mode = CAMCMR_AUP | CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP;
+	else if ((dev->flags & IFF_ALLMULTI) || dev->mc_list)
+			rx_mode = CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP;
+		else
+				rx_mode = CAMCMR_ECMP | CAMCMR_ABP;
+	__raw_writel(rx_mode, ether->reg + REG_CAMCMR);
+}
+
+static int w90p910_ether_ioctl(struct net_device *dev,
+						struct ifreq *ifr, int cmd)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	struct mii_ioctl_data *data = if_mii(ifr);
+
+	return generic_mii_ioctl(&ether->mii, data, cmd, NULL);
+}
+
+static void w90p910_get_drvinfo(struct net_device *dev,
+					struct ethtool_drvinfo *info)
+{
+	strcpy(info->driver, DRV_MODULE_NAME);
+	strcpy(info->version, DRV_MODULE_VERSION);
+}
+
+static int w90p910_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	return mii_ethtool_gset(&ether->mii, cmd);
+}
+
+static int w90p910_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	return mii_ethtool_sset(&ether->mii, cmd);
+}
+
+static int w90p910_nway_reset(struct net_device *dev)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	return mii_nway_restart(&ether->mii);
+}
+
+static u32 w90p910_get_link(struct net_device *dev)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	return mii_link_ok(&ether->mii);
+}
+
+static const struct ethtool_ops w90p910_ether_ethtool_ops = {
+	.get_settings	= w90p910_get_settings,
+	.set_settings	= w90p910_set_settings,
+	.get_drvinfo	= w90p910_get_drvinfo,
+	.nway_reset	= w90p910_nway_reset,
+	.get_link	= w90p910_get_link,
+};
+
+static const struct net_device_ops w90p910_ether_netdev_ops = {
+	.ndo_open		= w90p910_ether_open,
+	.ndo_stop		= w90p910_ether_close,
+	.ndo_start_xmit		= w90p910_ether_start_xmit,
+	.ndo_get_stats		= w90p910_ether_stats,
+	.ndo_set_multicast_list	= w90p910_ether_set_multicast_list,
+	.ndo_set_mac_address	= set_mac_address,
+	.ndo_do_ioctl		= w90p910_ether_ioctl,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_change_mtu		= eth_change_mtu,
+};
+
+static void __init get_mac_address(struct net_device *dev)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	struct platform_device *pdev;
+	char addr[6];
+
+	pdev = ether->pdev;
+
+	addr[0] = 0x00;
+	addr[1] = 0x02;
+	addr[2] = 0xac;
+	addr[3] = 0x55;
+	addr[4] = 0x88;
+	addr[5] = 0xa8;
+
+	if (is_valid_ether_addr(addr))
+		memcpy(dev->dev_addr, &addr, 0x06);
+	else
+		dev_err(&pdev->dev, "invalid mac address\n");
+}
+
+static int w90p910_ether_setup(struct net_device *dev)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+
+	ether_setup(dev);
+	dev->netdev_ops = &w90p910_ether_netdev_ops;
+	dev->ethtool_ops = &w90p910_ether_ethtool_ops;
+
+	dev->tx_queue_len = 16;
+	dev->dma = 0x0;
+	dev->watchdog_timeo = TX_TIMEOUT;
+
+	get_mac_address(dev);
+
+	spin_lock_init(&ether->lock);
+
+	ether->cur_tx = 0x0;
+	ether->cur_rx = 0x0;
+	ether->finish_tx = 0x0;
+	ether->linkflag = 0x0;
+	ether->mii.phy_id = 0x01;
+	ether->mii.phy_id_mask = 0x1f;
+	ether->mii.reg_num_mask = 0x1f;
+	ether->mii.dev = dev;
+	ether->mii.mdio_read = w90p910_mdio_read;
+	ether->mii.mdio_write = w90p910_mdio_write;
+
+	setup_timer(&ether->check_timer, w90p910_check_link,
+						(unsigned long)dev);
+
+	return 0;
+}
+
+static int __devinit w90p910_ether_probe(struct platform_device *pdev)
+{
+	struct w90p910_ether *ether;
+	struct net_device *dev;
+	struct resource *res;
+	int error;
+
+	dev = alloc_etherdev(sizeof(struct w90p910_ether));
+	if (!dev)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL) {
+		dev_err(&pdev->dev, "failed to get I/O memory\n");
+		error = -ENXIO;
+		goto failed_free;
+	}
+
+	res = request_mem_region(res->start, resource_size(res), pdev->name);
+	if (res == NULL) {
+		dev_err(&pdev->dev, "failed to request I/O memory\n");
+		error = -EBUSY;
+		goto failed_free;
+	}
+
+	ether = netdev_priv(dev);
+
+	ether->reg = ioremap(res->start, resource_size(res));
+	if (ether->reg == NULL) {
+		dev_err(&pdev->dev, "failed to remap I/O memory\n");
+		error = -ENXIO;
+		goto failed_free_mem;
+	}
+
+	ether->txirq = platform_get_irq(pdev, 0);
+	if (ether->txirq < 0) {
+		dev_err(&pdev->dev, "failed to get ether tx irq\n");
+		error = -ENXIO;
+		goto failed_free_io;
+	}
+
+	ether->rxirq = platform_get_irq(pdev, 1);
+	if (ether->rxirq < 0) {
+		dev_err(&pdev->dev, "failed to get ether rx irq\n");
+		error = -ENXIO;
+		goto failed_free_txirq;
+	}
+
+	platform_set_drvdata(pdev, dev);
+
+	ether->clk = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(ether->clk)) {
+		dev_err(&pdev->dev, "failed to get ether clock\n");
+		error = PTR_ERR(ether->clk);
+		goto failed_free_rxirq;
+	}
+
+	ether->rmiiclk = clk_get(&pdev->dev, "RMII");
+	if (IS_ERR(ether->rmiiclk)) {
+		dev_err(&pdev->dev, "failed to get ether clock\n");
+		error = PTR_ERR(ether->rmiiclk);
+		goto failed_put_clk;
+	}
+
+	ether->pdev = pdev;
+
+	w90p910_ether_setup(dev);
+
+	error = register_netdev(dev);
+	if (error != 0) {
+		dev_err(&pdev->dev, "Regiter EMC w90p910 FAILED\n");
+		error = -ENODEV;
+		goto failed_put_rmiiclk;
+	}
+
+	return 0;
+failed_put_rmiiclk:
+	clk_put(ether->rmiiclk);
+failed_put_clk:
+	clk_put(ether->clk);
+failed_free_rxirq:
+	free_irq(ether->rxirq, pdev);
+	platform_set_drvdata(pdev, NULL);
+failed_free_txirq:
+	free_irq(ether->txirq, pdev);
+failed_free_io:
+	iounmap(ether->reg);
+failed_free_mem:
+	release_mem_region(res->start, resource_size(res));
+failed_free:
+	free_netdev(dev);
+	return error;
+}
+
+static int __devexit w90p910_ether_remove(struct platform_device *pdev)
+{
+	struct net_device *dev = platform_get_drvdata(pdev);
+	struct w90p910_ether *ether = netdev_priv(dev);
+
+	unregister_netdev(dev);
+	clk_put(ether->rmiiclk);
+	clk_put(ether->clk);
+	del_timer_sync(&ether->check_timer);
+	platform_set_drvdata(pdev, NULL);
+	free_netdev(dev);
+	return 0;
+}
+
+static struct platform_driver w90p910_ether_driver = {
+	.probe		= w90p910_ether_probe,
+	.remove		= __devexit_p(w90p910_ether_remove),
+	.driver		= {
+		.name	= "w90p910-emc",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init w90p910_ether_init(void)
+{
+	return platform_driver_register(&w90p910_ether_driver);
+}
+
+static void __exit w90p910_ether_exit(void)
+{
+	platform_driver_unregister(&w90p910_ether_driver);
+}
+
+module_init(w90p910_ether_init);
+module_exit(w90p910_ether_exit);
+
+MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
+MODULE_DESCRIPTION("w90p910 MAC driver!");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:w90p910-emc");
+

+ 4 - 4
drivers/net/atl1c/atl1c.h

@@ -188,14 +188,14 @@ struct atl1c_tpd_ext_desc {
 #define RRS_HDS_TYPE_DATA	2
 
 #define RRS_IS_NO_HDS_TYPE(flag) \
-	(((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK == 0)
+	((((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK) == 0)
 
 #define RRS_IS_HDS_HEAD(flag) \
-	(((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK == \
+	((((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK) == \
 			RRS_HDS_TYPE_HEAD)
 
 #define RRS_IS_HDS_DATA(flag) \
-	(((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK == \
+	((((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK) == \
 			RRS_HDS_TYPE_DATA)
 
 /* rrs word 3 bit 0:31 */
@@ -245,7 +245,7 @@ struct atl1c_tpd_ext_desc {
 #define RRS_PACKET_TYPE_802_3  	1
 #define RRS_PACKET_TYPE_ETH	0
 #define RRS_PACKET_IS_ETH(word) \
-	(((word) >> RRS_PACKET_TYPE_SHIFT) & RRS_PACKET_TYPE_MASK == \
+	((((word) >> RRS_PACKET_TYPE_SHIFT) & RRS_PACKET_TYPE_MASK) == \
 			RRS_PACKET_TYPE_ETH)
 #define RRS_RXD_IS_VALID(word) \
 	((((word) >> RRS_RXD_UPDATED_SHIFT) & RRS_RXD_UPDATED_MASK) == 1)

+ 1 - 1
drivers/net/atl1c/atl1c_main.c

@@ -1689,7 +1689,7 @@ static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que,
 		if (likely(RRS_RXD_IS_VALID(rrs->word3))) {
 			rfd_num = (rrs->word0 >> RRS_RX_RFD_CNT_SHIFT) &
 				RRS_RX_RFD_CNT_MASK;
-			if (unlikely(rfd_num) != 1)
+			if (unlikely(rfd_num != 1))
 				/* TODO support mul rfd*/
 				if (netif_msg_rx_err(adapter))
 					dev_warn(&pdev->dev,

+ 2 - 1
drivers/net/bnx2x_link.c

@@ -4212,13 +4212,14 @@ static void bnx2x_turn_off_sf(struct bnx2x *bp, u8 port)
 u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
 			      u8 *version, u16 len)
 {
-	struct bnx2x *bp = params->bp;
+	struct bnx2x *bp;
 	u32 ext_phy_type = 0;
 	u32 spirom_ver = 0;
 	u8 status = 0 ;
 
 	if (version == NULL || params == NULL)
 		return -EINVAL;
+	bp = params->bp;
 
 	spirom_ver = REG_RD(bp, params->shmem_base +
 		   offsetof(struct shmem_region,

+ 10 - 2
drivers/net/bonding/bond_main.c

@@ -1459,8 +1459,16 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 	 * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
 	 */
 	if (bond->slave_cnt == 0) {
-		if (slave_dev->type != ARPHRD_ETHER)
-			bond_setup_by_slave(bond_dev, slave_dev);
+		if (bond_dev->type != slave_dev->type) {
+			dev_close(bond_dev);
+			pr_debug("%s: change device type from %d to %d\n",
+				bond_dev->name, bond_dev->type, slave_dev->type);
+			if (slave_dev->type != ARPHRD_ETHER)
+				bond_setup_by_slave(bond_dev, slave_dev);
+			else
+				ether_setup(bond_dev);
+			dev_open(bond_dev);
+		}
 	} else if (bond_dev->type != slave_dev->type) {
 		pr_err(DRV_NAME ": %s ether type (%d) is different "
 			"from other slaves (%d), can not enslave it.\n",

+ 6 - 2
drivers/net/can/dev.c

@@ -346,7 +346,7 @@ void can_restart(unsigned long data)
 	skb = dev_alloc_skb(sizeof(struct can_frame));
 	if (skb == NULL) {
 		err = -ENOMEM;
-		goto out;
+		goto restart;
 	}
 	skb->dev = dev;
 	skb->protocol = htons(ETH_P_CAN);
@@ -361,13 +361,13 @@ void can_restart(unsigned long data)
 	stats->rx_packets++;
 	stats->rx_bytes += cf->can_dlc;
 
+restart:
 	dev_dbg(dev->dev.parent, "restarted\n");
 	priv->can_stats.restarts++;
 
 	/* Now restart the device */
 	err = priv->do_set_mode(dev, CAN_MODE_START);
 
-out:
 	netif_carrier_on(dev);
 	if (err)
 		dev_err(dev->dev.parent, "Error %d during restart", err);
@@ -473,6 +473,10 @@ int open_candev(struct net_device *dev)
 		return -EINVAL;
 	}
 
+	/* Switch carrier on if device was stopped while in bus-off state */
+	if (!netif_carrier_ok(dev))
+		netif_carrier_on(dev);
+
 	setup_timer(&priv->restart_timer, can_restart, (unsigned long)dev);
 
 	return 0;

+ 0 - 1
drivers/net/can/sja1000/sja1000.c

@@ -63,7 +63,6 @@
 #include <linux/can.h>
 #include <linux/can/dev.h>
 #include <linux/can/error.h>
-#include <linux/can/dev.h>
 
 #include "sja1000.h"
 

+ 3 - 0
drivers/net/e100.c

@@ -1897,6 +1897,9 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
 
 			if (ioread8(&nic->csr->scb.status) & rus_no_res)
 				nic->ru_running = RU_SUSPENDED;
+		pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
+					       sizeof(struct rfd),
+					       PCI_DMA_BIDIRECTIONAL);
 		return -ENODATA;
 	}
 

+ 1 - 1
drivers/net/hamradio/6pack.c

@@ -3,7 +3,7 @@
  *		devices like TTY. It interfaces between a raw TTY and the
  *		kernel's AX.25 protocol layers.
  *
- * Authors:	Andreas Könsgen <ajk@iehk.rwth-aachen.de>
+ * Authors:	Andreas Könsgen <ajk@comnets.uni-bremen.de>
  *              Ralf Baechle DL5RB <ralf@linux-mips.org>
  *
  * Quite a lot of stuff "stolen" by Joerg Reuter from slip.c, written by

+ 4 - 3
drivers/net/ibm_newemac/rgmii.c

@@ -188,11 +188,12 @@ void rgmii_put_mdio(struct of_device *ofdev, int input)
 void rgmii_detach(struct of_device *ofdev, int input)
 {
 	struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
-	struct rgmii_regs __iomem *p = dev->base;
-
-	mutex_lock(&dev->lock);
+	struct rgmii_regs __iomem *p;
 
 	BUG_ON(!dev || dev->users == 0);
+	p = dev->base;
+
+	mutex_lock(&dev->lock);
 
 	RGMII_DBG(dev, "detach(%d)" NL, input);
 

+ 2 - 4
drivers/net/ixgbe/ixgbe_dcb_nl.c

@@ -106,8 +106,6 @@ static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
-	DPRINTK(DRV, INFO, "Get DCB Admin Mode.\n");
-
 	return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED);
 }
 
@@ -116,8 +114,6 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
 	u8 err = 0;
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
-	DPRINTK(DRV, INFO, "Set DCB Admin Mode.\n");
-
 	if (state > 0) {
 		/* Turn on DCB */
 		if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
@@ -175,6 +171,8 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	int i, j;
 
+	memset(perm_addr, 0xff, MAX_ADDR_LEN);
+
 	for (i = 0; i < netdev->addr_len; i++)
 		perm_addr[i] = adapter->hw.mac.perm_addr[i];
 

+ 1 - 0
drivers/net/jazzsonic.c

@@ -229,6 +229,7 @@ static int __init jazz_sonic_probe(struct platform_device *pdev)
 	lp = netdev_priv(dev);
 	lp->device = &pdev->dev;
 	SET_NETDEV_DEV(dev, &pdev->dev);
+	platform_set_drvdata(pdev, dev);
 
 	netdev_boot_setup_check(dev);
 

+ 1322 - 0
drivers/net/ks8851.c

@@ -0,0 +1,1322 @@
+/* drivers/net/ks8651.c
+ *
+ * Copyright 2009 Simtec Electronics
+ *	http://www.simtec.co.uk/
+ *	Ben Dooks <ben@simtec.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define DEBUG
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/cache.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+
+#include <linux/spi/spi.h>
+
+#include "ks8851.h"
+
+/**
+ * struct ks8851_rxctrl - KS8851 driver rx control
+ * @mchash: Multicast hash-table data.
+ * @rxcr1: KS_RXCR1 register setting
+ * @rxcr2: KS_RXCR2 register setting
+ *
+ * Representation of the settings needs to control the receive filtering
+ * such as the multicast hash-filter and the receive register settings. This
+ * is used to make the job of working out if the receive settings change and
+ * then issuing the new settings to the worker that will send the necessary
+ * commands.
+ */
+struct ks8851_rxctrl {
+	u16	mchash[4];
+	u16	rxcr1;
+	u16	rxcr2;
+};
+
+/**
+ * union ks8851_tx_hdr - tx header data
+ * @txb: The header as bytes
+ * @txw: The header as 16bit, little-endian words
+ *
+ * A dual representation of the tx header data to allow
+ * access to individual bytes, and to allow 16bit accesses
+ * with 16bit alignment.
+ */
+union ks8851_tx_hdr {
+	u8	txb[6];
+	__le16	txw[3];
+};
+
+/**
+ * struct ks8851_net - KS8851 driver private data
+ * @netdev: The network device we're bound to
+ * @spidev: The spi device we're bound to.
+ * @lock: Lock to ensure that the device is not accessed when busy.
+ * @statelock: Lock on this structure for tx list.
+ * @mii: The MII state information for the mii calls.
+ * @rxctrl: RX settings for @rxctrl_work.
+ * @tx_work: Work queue for tx packets
+ * @irq_work: Work queue for servicing interrupts
+ * @rxctrl_work: Work queue for updating RX mode and multicast lists
+ * @txq: Queue of packets for transmission.
+ * @spi_msg1: pre-setup SPI transfer with one message, @spi_xfer1.
+ * @spi_msg2: pre-setup SPI transfer with two messages, @spi_xfer2.
+ * @txh: Space for generating packet TX header in DMA-able data
+ * @rxd: Space for receiving SPI data, in DMA-able space.
+ * @txd: Space for transmitting SPI data, in DMA-able space.
+ * @msg_enable: The message flags controlling driver output (see ethtool).
+ * @fid: Incrementing frame id tag.
+ * @rc_ier: Cached copy of KS_IER.
+ * @rc_rxqcr: Cached copy of KS_RXQCR.
+ *
+ * The @lock ensures that the chip is protected when certain operations are
+ * in progress. When the read or write packet transfer is in progress, most
+ * of the chip registers are not ccessible until the transfer is finished and
+ * the DMA has been de-asserted.
+ *
+ * The @statelock is used to protect information in the structure which may
+ * need to be accessed via several sources, such as the network driver layer
+ * or one of the work queues.
+ *
+ * We align the buffers we may use for rx/tx to ensure that if the SPI driver
+ * wants to DMA map them, it will not have any problems with data the driver
+ * modifies.
+ */
+struct ks8851_net {
+	struct net_device	*netdev;
+	struct spi_device	*spidev;
+	struct mutex		lock;
+	spinlock_t		statelock;
+
+	union ks8851_tx_hdr	txh ____cacheline_aligned;
+	u8			rxd[8];
+	u8			txd[8];
+
+	u32			msg_enable ____cacheline_aligned;
+	u16			tx_space;
+	u8			fid;
+
+	u16			rc_ier;
+	u16			rc_rxqcr;
+
+	struct mii_if_info	mii;
+	struct ks8851_rxctrl	rxctrl;
+
+	struct work_struct	tx_work;
+	struct work_struct	irq_work;
+	struct work_struct	rxctrl_work;
+
+	struct sk_buff_head	txq;
+
+	struct spi_message	spi_msg1;
+	struct spi_message	spi_msg2;
+	struct spi_transfer	spi_xfer1;
+	struct spi_transfer	spi_xfer2[2];
+};
+
+static int msg_enable;
+
+#define ks_info(_ks, _msg...) dev_info(&(_ks)->spidev->dev, _msg)
+#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->spidev->dev, _msg)
+#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->spidev->dev, _msg)
+#define ks_err(_ks, _msg...) dev_err(&(_ks)->spidev->dev, _msg)
+
+/* shift for byte-enable data */
+#define BYTE_EN(_x)	((_x) << 2)
+
+/* turn register number and byte-enable mask into data for start of packet */
+#define MK_OP(_byteen, _reg) (BYTE_EN(_byteen) | (_reg)  << (8+2) | (_reg) >> 6)
+
+/* SPI register read/write calls.
+ *
+ * All these calls issue SPI transactions to access the chip's registers. They
+ * all require that the necessary lock is held to prevent accesses when the
+ * chip is busy transfering packet data (RX/TX FIFO accesses).
+ */
+
+/**
+ * ks8851_wrreg16 - write 16bit register value to chip
+ * @ks: The chip state
+ * @reg: The register address
+ * @val: The value to write
+ *
+ * Issue a write to put the value @val into the register specified in @reg.
+ */
+static void ks8851_wrreg16(struct ks8851_net *ks, unsigned reg, unsigned val)
+{
+	struct spi_transfer *xfer = &ks->spi_xfer1;
+	struct spi_message *msg = &ks->spi_msg1;
+	__le16 txb[2];
+	int ret;
+
+	txb[0] = cpu_to_le16(MK_OP(reg & 2 ? 0xC : 0x03, reg) | KS_SPIOP_WR);
+	txb[1] = cpu_to_le16(val);
+
+	xfer->tx_buf = txb;
+	xfer->rx_buf = NULL;
+	xfer->len = 4;
+
+	ret = spi_sync(ks->spidev, msg);
+	if (ret < 0)
+		ks_err(ks, "spi_sync() failed\n");
+}
+
+/**
+ * ks8851_rx_1msg - select whether to use one or two messages for spi read
+ * @ks: The device structure
+ *
+ * Return whether to generate a single message with a tx and rx buffer
+ * supplied to spi_sync(), or alternatively send the tx and rx buffers
+ * as separate messages.
+ *
+ * Depending on the hardware in use, a single message may be more efficient
+ * on interrupts or work done by the driver.
+ *
+ * This currently always returns true until we add some per-device data passed
+ * from the platform code to specify which mode is better.
+ */
+static inline bool ks8851_rx_1msg(struct ks8851_net *ks)
+{
+	return true;
+}
+
+/**
+ * ks8851_rdreg - issue read register command and return the data
+ * @ks: The device state
+ * @op: The register address and byte enables in message format.
+ * @rxb: The RX buffer to return the result into
+ * @rxl: The length of data expected.
+ *
+ * This is the low level read call that issues the necessary spi message(s)
+ * to read data from the register specified in @op.
+ */
+static void ks8851_rdreg(struct ks8851_net *ks, unsigned op,
+			 u8 *rxb, unsigned rxl)
+{
+	struct spi_transfer *xfer;
+	struct spi_message *msg;
+	__le16 *txb = (__le16 *)ks->txd;
+	u8 *trx = ks->rxd;
+	int ret;
+
+	txb[0] = cpu_to_le16(op | KS_SPIOP_RD);
+
+	if (ks8851_rx_1msg(ks)) {
+		msg = &ks->spi_msg1;
+		xfer = &ks->spi_xfer1;
+
+		xfer->tx_buf = txb;
+		xfer->rx_buf = trx;
+		xfer->len = rxl + 2;
+	} else {
+		msg = &ks->spi_msg2;
+		xfer = ks->spi_xfer2;
+
+		xfer->tx_buf = txb;
+		xfer->rx_buf = NULL;
+		xfer->len = 2;
+
+		xfer++;
+		xfer->tx_buf = NULL;
+		xfer->rx_buf = trx;
+		xfer->len = rxl;
+	}
+
+	ret = spi_sync(ks->spidev, msg);
+	if (ret < 0)
+		ks_err(ks, "read: spi_sync() failed\n");
+	else if (ks8851_rx_1msg(ks))
+		memcpy(rxb, trx + 2, rxl);
+	else
+		memcpy(rxb, trx, rxl);
+}
+
+/**
+ * ks8851_rdreg8 - read 8 bit register from device
+ * @ks: The chip information
+ * @reg: The register address
+ *
+ * Read a 8bit register from the chip, returning the result
+*/
+static unsigned ks8851_rdreg8(struct ks8851_net *ks, unsigned reg)
+{
+	u8 rxb[1];
+
+	ks8851_rdreg(ks, MK_OP(1 << (reg & 3), reg), rxb, 1);
+	return rxb[0];
+}
+
+/**
+ * ks8851_rdreg16 - read 16 bit register from device
+ * @ks: The chip information
+ * @reg: The register address
+ *
+ * Read a 16bit register from the chip, returning the result
+*/
+static unsigned ks8851_rdreg16(struct ks8851_net *ks, unsigned reg)
+{
+	__le16 rx = 0;
+
+	ks8851_rdreg(ks, MK_OP(reg & 2 ? 0xC : 0x3, reg), (u8 *)&rx, 2);
+	return le16_to_cpu(rx);
+}
+
+/**
+ * ks8851_rdreg32 - read 32 bit register from device
+ * @ks: The chip information
+ * @reg: The register address
+ *
+ * Read a 32bit register from the chip.
+ *
+ * Note, this read requires the address be aligned to 4 bytes.
+*/
+static unsigned ks8851_rdreg32(struct ks8851_net *ks, unsigned reg)
+{
+	__le32 rx = 0;
+
+	WARN_ON(reg & 3);
+
+	ks8851_rdreg(ks, MK_OP(0xf, reg), (u8 *)&rx, 4);
+	return le32_to_cpu(rx);
+}
+
+/**
+ * ks8851_soft_reset - issue one of the soft reset to the device
+ * @ks: The device state.
+ * @op: The bit(s) to set in the GRR
+ *
+ * Issue the relevant soft-reset command to the device's GRR register
+ * specified by @op.
+ *
+ * Note, the delays are in there as a caution to ensure that the reset
+ * has time to take effect and then complete. Since the datasheet does
+ * not currently specify the exact sequence, we have chosen something
+ * that seems to work with our device.
+ */
+static void ks8851_soft_reset(struct ks8851_net *ks, unsigned op)
+{
+	ks8851_wrreg16(ks, KS_GRR, op);
+	mdelay(1);	/* wait a short time to effect reset */
+	ks8851_wrreg16(ks, KS_GRR, 0);
+	mdelay(1);	/* wait for condition to clear */
+}
+
+/**
+ * ks8851_write_mac_addr - write mac address to device registers
+ * @dev: The network device
+ *
+ * Update the KS8851 MAC address registers from the address in @dev.
+ *
+ * This call assumes that the chip is not running, so there is no need to
+ * shutdown the RXQ process whilst setting this.
+*/
+static int ks8851_write_mac_addr(struct net_device *dev)
+{
+	struct ks8851_net *ks = netdev_priv(dev);
+	u16 *mcp = (u16 *)dev->dev_addr;
+
+	mutex_lock(&ks->lock);
+
+	ks8851_wrreg16(ks, KS_MARL, mcp[0]);
+	ks8851_wrreg16(ks, KS_MARM, mcp[1]);
+	ks8851_wrreg16(ks, KS_MARH, mcp[2]);
+
+	mutex_unlock(&ks->lock);
+
+	return 0;
+}
+
+/**
+ * ks8851_init_mac - initialise the mac address
+ * @ks: The device structure
+ *
+ * Get or create the initial mac address for the device and then set that
+ * into the station address register. Currently we assume that the device
+ * does not have a valid mac address in it, and so we use random_ether_addr()
+ * to create a new one.
+ *
+ * In future, the driver should check to see if the device has an EEPROM
+ * attached and whether that has a valid ethernet address in it.
+ */
+static void ks8851_init_mac(struct ks8851_net *ks)
+{
+	struct net_device *dev = ks->netdev;
+
+	random_ether_addr(dev->dev_addr);
+	ks8851_write_mac_addr(dev);
+}
+
+/**
+ * ks8851_irq - device interrupt handler
+ * @irq: Interrupt number passed from the IRQ hnalder.
+ * @pw: The private word passed to register_irq(), our struct ks8851_net.
+ *
+ * Disable the interrupt from happening again until we've processed the
+ * current status by scheduling ks8851_irq_work().
+ */
+static irqreturn_t ks8851_irq(int irq, void *pw)
+{
+	struct ks8851_net *ks = pw;
+
+	disable_irq_nosync(irq);
+	schedule_work(&ks->irq_work);
+	return IRQ_HANDLED;
+}
+
+/**
+ * ks8851_rdfifo - read data from the receive fifo
+ * @ks: The device state.
+ * @buff: The buffer address
+ * @len: The length of the data to read
+ *
+ * Issue an RXQ FIFO read command and read the @len ammount of data from
+ * the FIFO into the buffer specified by @buff.
+ */
+static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len)
+{
+	struct spi_transfer *xfer = ks->spi_xfer2;
+	struct spi_message *msg = &ks->spi_msg2;
+	u8 txb[1];
+	int ret;
+
+	if (netif_msg_rx_status(ks))
+		ks_dbg(ks, "%s: %d@%p\n", __func__, len, buff);
+
+	/* set the operation we're issuing */
+	txb[0] = KS_SPIOP_RXFIFO;
+
+	xfer->tx_buf = txb;
+	xfer->rx_buf = NULL;
+	xfer->len = 1;
+
+	xfer++;
+	xfer->rx_buf = buff;
+	xfer->tx_buf = NULL;
+	xfer->len = len;
+
+	ret = spi_sync(ks->spidev, msg);
+	if (ret < 0)
+		ks_err(ks, "%s: spi_sync() failed\n", __func__);
+}
+
+/**
+ * ks8851_dbg_dumpkkt - dump initial packet contents to debug
+ * @ks: The device state
+ * @rxpkt: The data for the received packet
+ *
+ * Dump the initial data from the packet to dev_dbg().
+*/
+static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt)
+{
+	ks_dbg(ks, "pkt %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
+	       rxpkt[4], rxpkt[5], rxpkt[6], rxpkt[7],
+	       rxpkt[8], rxpkt[9], rxpkt[10], rxpkt[11],
+	       rxpkt[12], rxpkt[13], rxpkt[14], rxpkt[15]);
+}
+
+/**
+ * ks8851_rx_pkts - receive packets from the host
+ * @ks: The device information.
+ *
+ * This is called from the IRQ work queue when the system detects that there
+ * are packets in the receive queue. Find out how many packets there are and
+ * read them from the FIFO.
+ */
+static void ks8851_rx_pkts(struct ks8851_net *ks)
+{
+	struct sk_buff *skb;
+	unsigned rxfc;
+	unsigned rxlen;
+	unsigned rxstat;
+	u32 rxh;
+	u8 *rxpkt;
+
+	rxfc = ks8851_rdreg8(ks, KS_RXFC);
+
+	if (netif_msg_rx_status(ks))
+		ks_dbg(ks, "%s: %d packets\n", __func__, rxfc);
+
+	/* Currently we're issuing a read per packet, but we could possibly
+	 * improve the code by issuing a single read, getting the receive
+	 * header, allocating the packet and then reading the packet data
+	 * out in one go.
+	 *
+	 * This form of operation would require us to hold the SPI bus'
+	 * chipselect low during the entie transaction to avoid any
+	 * reset to the data stream comming from the chip.
+	 */
+
+	for (; rxfc != 0; rxfc--) {
+		rxh = ks8851_rdreg32(ks, KS_RXFHSR);
+		rxstat = rxh & 0xffff;
+		rxlen = rxh >> 16;
+
+		if (netif_msg_rx_status(ks))
+			ks_dbg(ks, "rx: stat 0x%04x, len 0x%04x\n",
+				rxstat, rxlen);
+
+		/* the length of the packet includes the 32bit CRC */
+
+		/* set dma read address */
+		ks8851_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI | 0x00);
+
+		/* start the packet dma process, and set auto-dequeue rx */
+		ks8851_wrreg16(ks, KS_RXQCR,
+			       ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE);
+
+		if (rxlen > 0) {
+			skb = netdev_alloc_skb(ks->netdev, rxlen + 2 + 8);
+			if (!skb) {
+				/* todo - dump frame and move on */
+			}
+
+			/* two bytes to ensure ip is aligned, and four bytes
+			 * for the status header and 4 bytes of garbage */
+			skb_reserve(skb, 2 + 4 + 4);
+
+			rxpkt = skb_put(skb, rxlen - 4) - 8;
+
+			/* align the packet length to 4 bytes, and add 4 bytes
+			 * as we're getting the rx status header as well */
+			ks8851_rdfifo(ks, rxpkt, ALIGN(rxlen, 4) + 8);
+
+			if (netif_msg_pktdata(ks))
+				ks8851_dbg_dumpkkt(ks, rxpkt);
+
+			skb->protocol = eth_type_trans(skb, ks->netdev);
+			netif_rx(skb);
+
+			ks->netdev->stats.rx_packets++;
+			ks->netdev->stats.rx_bytes += rxlen - 4;
+		}
+
+		ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
+	}
+}
+
+/**
+ * ks8851_irq_work - work queue handler for dealing with interrupt requests
+ * @work: The work structure that was scheduled by schedule_work()
+ *
+ * This is the handler invoked when the ks8851_irq() is called to find out
+ * what happened, as we cannot allow ourselves to sleep whilst waiting for
+ * anything other process has the chip's lock.
+ *
+ * Read the interrupt status, work out what needs to be done and then clear
+ * any of the interrupts that are not needed.
+ */
+static void ks8851_irq_work(struct work_struct *work)
+{
+	struct ks8851_net *ks = container_of(work, struct ks8851_net, irq_work);
+	unsigned status;
+	unsigned handled = 0;
+
+	mutex_lock(&ks->lock);
+
+	status = ks8851_rdreg16(ks, KS_ISR);
+
+	if (netif_msg_intr(ks))
+		dev_dbg(&ks->spidev->dev, "%s: status 0x%04x\n",
+			__func__, status);
+
+	if (status & IRQ_LCI) {
+		/* should do something about checking link status */
+		handled |= IRQ_LCI;
+	}
+
+	if (status & IRQ_LDI) {
+		u16 pmecr = ks8851_rdreg16(ks, KS_PMECR);
+		pmecr &= ~PMECR_WKEVT_MASK;
+		ks8851_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
+
+		handled |= IRQ_LDI;
+	}
+
+	if (status & IRQ_RXPSI)
+		handled |= IRQ_RXPSI;
+
+	if (status & IRQ_TXI) {
+		handled |= IRQ_TXI;
+
+		/* no lock here, tx queue should have been stopped */
+
+		/* update our idea of how much tx space is available to the
+		 * system */
+		ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR);
+
+		if (netif_msg_intr(ks))
+			ks_dbg(ks, "%s: txspace %d\n", __func__, ks->tx_space);
+	}
+
+	if (status & IRQ_RXI)
+		handled |= IRQ_RXI;
+
+	if (status & IRQ_SPIBEI) {
+		dev_err(&ks->spidev->dev, "%s: spi bus error\n", __func__);
+		handled |= IRQ_SPIBEI;
+	}
+
+	ks8851_wrreg16(ks, KS_ISR, handled);
+
+	if (status & IRQ_RXI) {
+		/* the datasheet says to disable the rx interrupt during
+		 * packet read-out, however we're masking the interrupt
+		 * from the device so do not bother masking just the RX
+		 * from the device. */
+
+		ks8851_rx_pkts(ks);
+	}
+
+	/* if something stopped the rx process, probably due to wanting
+	 * to change the rx settings, then do something about restarting
+	 * it. */
+	if (status & IRQ_RXPSI) {
+		struct ks8851_rxctrl *rxc = &ks->rxctrl;
+
+		/* update the multicast hash table */
+		ks8851_wrreg16(ks, KS_MAHTR0, rxc->mchash[0]);
+		ks8851_wrreg16(ks, KS_MAHTR1, rxc->mchash[1]);
+		ks8851_wrreg16(ks, KS_MAHTR2, rxc->mchash[2]);
+		ks8851_wrreg16(ks, KS_MAHTR3, rxc->mchash[3]);
+
+		ks8851_wrreg16(ks, KS_RXCR2, rxc->rxcr2);
+		ks8851_wrreg16(ks, KS_RXCR1, rxc->rxcr1);
+	}
+
+	mutex_unlock(&ks->lock);
+
+	if (status & IRQ_TXI)
+		netif_wake_queue(ks->netdev);
+
+	enable_irq(ks->netdev->irq);
+}
+
+/**
+ * calc_txlen - calculate size of message to send packet
+ * @len: Lenght of data
+ *
+ * Returns the size of the TXFIFO message needed to send
+ * this packet.
+ */
+static inline unsigned calc_txlen(unsigned len)
+{
+	return ALIGN(len + 4, 4);
+}
+
+/**
+ * ks8851_wrpkt - write packet to TX FIFO
+ * @ks: The device state.
+ * @txp: The sk_buff to transmit.
+ * @irq: IRQ on completion of the packet.
+ *
+ * Send the @txp to the chip. This means creating the relevant packet header
+ * specifying the length of the packet and the other information the chip
+ * needs, such as IRQ on completion. Send the header and the packet data to
+ * the device.
+ */
+static void ks8851_wrpkt(struct ks8851_net *ks, struct sk_buff *txp, bool irq)
+{
+	struct spi_transfer *xfer = ks->spi_xfer2;
+	struct spi_message *msg = &ks->spi_msg2;
+	unsigned fid = 0;
+	int ret;
+
+	if (netif_msg_tx_queued(ks))
+		dev_dbg(&ks->spidev->dev, "%s: skb %p, %d@%p, irq %d\n",
+			__func__, txp, txp->len, txp->data, irq);
+
+	fid = ks->fid++;
+	fid &= TXFR_TXFID_MASK;
+
+	if (irq)
+		fid |= TXFR_TXIC;	/* irq on completion */
+
+	/* start header at txb[1] to align txw entries */
+	ks->txh.txb[1] = KS_SPIOP_TXFIFO;
+	ks->txh.txw[1] = cpu_to_le16(fid);
+	ks->txh.txw[2] = cpu_to_le16(txp->len);
+
+	xfer->tx_buf = &ks->txh.txb[1];
+	xfer->rx_buf = NULL;
+	xfer->len = 5;
+
+	xfer++;
+	xfer->tx_buf = txp->data;
+	xfer->rx_buf = NULL;
+	xfer->len = ALIGN(txp->len, 4);
+
+	ret = spi_sync(ks->spidev, msg);
+	if (ret < 0)
+		ks_err(ks, "%s: spi_sync() failed\n", __func__);
+}
+
+/**
+ * ks8851_done_tx - update and then free skbuff after transmitting
+ * @ks: The device state
+ * @txb: The buffer transmitted
+ */
+static void ks8851_done_tx(struct ks8851_net *ks, struct sk_buff *txb)
+{
+	struct net_device *dev = ks->netdev;
+
+	dev->stats.tx_bytes += txb->len;
+	dev->stats.tx_packets++;
+
+	dev_kfree_skb(txb);
+}
+
+/**
+ * ks8851_tx_work - process tx packet(s)
+ * @work: The work strucutre what was scheduled.
+ *
+ * This is called when a number of packets have been scheduled for
+ * transmission and need to be sent to the device.
+ */
+static void ks8851_tx_work(struct work_struct *work)
+{
+	struct ks8851_net *ks = container_of(work, struct ks8851_net, tx_work);
+	struct sk_buff *txb;
+	bool last = false;
+
+	mutex_lock(&ks->lock);
+
+	while (!last) {
+		txb = skb_dequeue(&ks->txq);
+		last = skb_queue_empty(&ks->txq);
+
+		ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
+		ks8851_wrpkt(ks, txb, last);
+		ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
+		ks8851_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
+
+		ks8851_done_tx(ks, txb);
+	}
+
+	mutex_unlock(&ks->lock);
+}
+
+/**
+ * ks8851_set_powermode - set power mode of the device
+ * @ks: The device state
+ * @pwrmode: The power mode value to write to KS_PMECR.
+ *
+ * Change the power mode of the chip.
+ */
+static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode)
+{
+	unsigned pmecr;
+
+	if (netif_msg_hw(ks))
+		ks_dbg(ks, "setting power mode %d\n", pwrmode);
+
+	pmecr = ks8851_rdreg16(ks, KS_PMECR);
+	pmecr &= ~PMECR_PM_MASK;
+	pmecr |= pwrmode;
+
+	ks8851_wrreg16(ks, KS_PMECR, pmecr);
+}
+
+/**
+ * ks8851_net_open - open network device
+ * @dev: The network device being opened.
+ *
+ * Called when the network device is marked active, such as a user executing
+ * 'ifconfig up' on the device.
+ */
+static int ks8851_net_open(struct net_device *dev)
+{
+	struct ks8851_net *ks = netdev_priv(dev);
+
+	/* lock the card, even if we may not actually be doing anything
+	 * else at the moment */
+	mutex_lock(&ks->lock);
+
+	if (netif_msg_ifup(ks))
+		ks_dbg(ks, "opening %s\n", dev->name);
+
+	/* bring chip out of any power saving mode it was in */
+	ks8851_set_powermode(ks, PMECR_PM_NORMAL);
+
+	/* issue a soft reset to the RX/TX QMU to put it into a known
+	 * state. */
+	ks8851_soft_reset(ks, GRR_QMU);
+
+	/* setup transmission parameters */
+
+	ks8851_wrreg16(ks, KS_TXCR, (TXCR_TXE | /* enable transmit process */
+				     TXCR_TXPE | /* pad to min length */
+				     TXCR_TXCRC | /* add CRC */
+				     TXCR_TXFCE)); /* enable flow control */
+
+	/* auto-increment tx data, reset tx pointer */
+	ks8851_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI);
+
+	/* setup receiver control */
+
+	ks8851_wrreg16(ks, KS_RXCR1, (RXCR1_RXPAFMA | /*  from mac filter */
+				      RXCR1_RXFCE | /* enable flow control */
+				      RXCR1_RXBE | /* broadcast enable */
+				      RXCR1_RXUE | /* unicast enable */
+				      RXCR1_RXE)); /* enable rx block */
+
+	/* transfer entire frames out in one go */
+	ks8851_wrreg16(ks, KS_RXCR2, RXCR2_SRDBL_FRAME);
+
+	/* set receive counter timeouts */
+	ks8851_wrreg16(ks, KS_RXDTTR, 1000); /* 1ms after first frame to IRQ */
+	ks8851_wrreg16(ks, KS_RXDBCTR, 4096); /* >4Kbytes in buffer to IRQ */
+	ks8851_wrreg16(ks, KS_RXFCTR, 10);  /* 10 frames to IRQ */
+
+	ks->rc_rxqcr = (RXQCR_RXFCTE |  /* IRQ on frame count exceeded */
+			RXQCR_RXDBCTE | /* IRQ on byte count exceeded */
+			RXQCR_RXDTTE);  /* IRQ on time exceeded */
+
+	ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
+
+	/* clear then enable interrupts */
+
+#define STD_IRQ (IRQ_LCI |	/* Link Change */	\
+		 IRQ_TXI |	/* TX done */		\
+		 IRQ_RXI |	/* RX done */		\
+		 IRQ_SPIBEI |	/* SPI bus error */	\
+		 IRQ_TXPSI |	/* TX process stop */	\
+		 IRQ_RXPSI)	/* RX process stop */
+
+	ks->rc_ier = STD_IRQ;
+	ks8851_wrreg16(ks, KS_ISR, STD_IRQ);
+	ks8851_wrreg16(ks, KS_IER, STD_IRQ);
+
+	netif_start_queue(ks->netdev);
+
+	if (netif_msg_ifup(ks))
+		ks_dbg(ks, "network device %s up\n", dev->name);
+
+	mutex_unlock(&ks->lock);
+	return 0;
+}
+
+/**
+ * ks8851_net_stop - close network device
+ * @dev: The device being closed.
+ *
+ * Called to close down a network device which has been active. Cancell any
+ * work, shutdown the RX and TX process and then place the chip into a low
+ * power state whilst it is not being used.
+ */
+static int ks8851_net_stop(struct net_device *dev)
+{
+	struct ks8851_net *ks = netdev_priv(dev);
+
+	if (netif_msg_ifdown(ks))
+		ks_info(ks, "%s: shutting down\n", dev->name);
+
+	netif_stop_queue(dev);
+
+	mutex_lock(&ks->lock);
+
+	/* stop any outstanding work */
+	flush_work(&ks->irq_work);
+	flush_work(&ks->tx_work);
+	flush_work(&ks->rxctrl_work);
+
+	/* turn off the IRQs and ack any outstanding */
+	ks8851_wrreg16(ks, KS_IER, 0x0000);
+	ks8851_wrreg16(ks, KS_ISR, 0xffff);
+
+	/* shutdown RX process */
+	ks8851_wrreg16(ks, KS_RXCR1, 0x0000);
+
+	/* shutdown TX process */
+	ks8851_wrreg16(ks, KS_TXCR, 0x0000);
+
+	/* set powermode to soft power down to save power */
+	ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN);
+
+	/* ensure any queued tx buffers are dumped */
+	while (!skb_queue_empty(&ks->txq)) {
+		struct sk_buff *txb = skb_dequeue(&ks->txq);
+
+		if (netif_msg_ifdown(ks))
+			ks_dbg(ks, "%s: freeing txb %p\n", __func__, txb);
+
+		dev_kfree_skb(txb);
+	}
+
+	mutex_unlock(&ks->lock);
+	return 0;
+}
+
+/**
+ * ks8851_start_xmit - transmit packet
+ * @skb: The buffer to transmit
+ * @dev: The device used to transmit the packet.
+ *
+ * Called by the network layer to transmit the @skb. Queue the packet for
+ * the device and schedule the necessary work to transmit the packet when
+ * it is free.
+ *
+ * We do this to firstly avoid sleeping with the network device locked,
+ * and secondly so we can round up more than one packet to transmit which
+ * means we can try and avoid generating too many transmit done interrupts.
+ */
+static int ks8851_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct ks8851_net *ks = netdev_priv(dev);
+	unsigned needed = calc_txlen(skb->len);
+	int ret = NETDEV_TX_OK;
+
+	if (netif_msg_tx_queued(ks))
+		ks_dbg(ks, "%s: skb %p, %d@%p\n", __func__,
+		       skb, skb->len, skb->data);
+
+	spin_lock(&ks->statelock);
+
+	if (needed > ks->tx_space) {
+		netif_stop_queue(dev);
+		ret = NETDEV_TX_BUSY;
+	} else {
+		ks->tx_space -= needed;
+		skb_queue_tail(&ks->txq, skb);
+	}
+
+	spin_unlock(&ks->statelock);
+	schedule_work(&ks->tx_work);
+
+	return ret;
+}
+
+/**
+ * ks8851_rxctrl_work - work handler to change rx mode
+ * @work: The work structure this belongs to.
+ *
+ * Lock the device and issue the necessary changes to the receive mode from
+ * the network device layer. This is done so that we can do this without
+ * having to sleep whilst holding the network device lock.
+ *
+ * Since the recommendation from Micrel is that the RXQ is shutdown whilst the
+ * receive parameters are programmed, we issue a write to disable the RXQ and
+ * then wait for the interrupt handler to be triggered once the RXQ shutdown is
+ * complete. The interrupt handler then writes the new values into the chip.
+ */
+static void ks8851_rxctrl_work(struct work_struct *work)
+{
+	struct ks8851_net *ks = container_of(work, struct ks8851_net, rxctrl_work);
+
+	mutex_lock(&ks->lock);
+
+	/* need to shutdown RXQ before modifying filter parameters */
+	ks8851_wrreg16(ks, KS_RXCR1, 0x00);
+
+	mutex_unlock(&ks->lock);
+}
+
+static void ks8851_set_rx_mode(struct net_device *dev)
+{
+	struct ks8851_net *ks = netdev_priv(dev);
+	struct ks8851_rxctrl rxctrl;
+
+	memset(&rxctrl, 0, sizeof(rxctrl));
+
+	if (dev->flags & IFF_PROMISC) {
+		/* interface to receive everything */
+
+		rxctrl.rxcr1 = RXCR1_RXAE | RXCR1_RXINVF;
+	} else if (dev->flags & IFF_ALLMULTI) {
+		/* accept all multicast packets */
+
+		rxctrl.rxcr1 = (RXCR1_RXME | RXCR1_RXAE |
+				RXCR1_RXPAFMA | RXCR1_RXMAFMA);
+	} else if (dev->flags & IFF_MULTICAST && dev->mc_count > 0) {
+		struct dev_mc_list *mcptr = dev->mc_list;
+		u32 crc;
+		int i;
+
+		/* accept some multicast */
+
+		for (i = dev->mc_count; i > 0; i--) {
+			crc = ether_crc(ETH_ALEN, mcptr->dmi_addr);
+			crc >>= (32 - 6);  /* get top six bits */
+
+			rxctrl.mchash[crc >> 4] |= (1 << (crc & 0xf));
+			mcptr = mcptr->next;
+		}
+
+		rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXAE | RXCR1_RXPAFMA;
+	} else {
+		/* just accept broadcast / unicast */
+		rxctrl.rxcr1 = RXCR1_RXPAFMA;
+	}
+
+	rxctrl.rxcr1 |= (RXCR1_RXUE | /* unicast enable */
+			 RXCR1_RXBE | /* broadcast enable */
+			 RXCR1_RXE | /* RX process enable */
+			 RXCR1_RXFCE); /* enable flow control */
+
+	rxctrl.rxcr2 |= RXCR2_SRDBL_FRAME;
+
+	/* schedule work to do the actual set of the data if needed */
+
+	spin_lock(&ks->statelock);
+
+	if (memcmp(&rxctrl, &ks->rxctrl, sizeof(rxctrl)) != 0) {
+		memcpy(&ks->rxctrl, &rxctrl, sizeof(ks->rxctrl));
+		schedule_work(&ks->rxctrl_work);
+	}
+
+	spin_unlock(&ks->statelock);
+}
+
+static int ks8851_set_mac_address(struct net_device *dev, void *addr)
+{
+	struct sockaddr *sa = addr;
+
+	if (netif_running(dev))
+		return -EBUSY;
+
+	if (!is_valid_ether_addr(sa->sa_data))
+		return -EADDRNOTAVAIL;
+
+	memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
+	return ks8851_write_mac_addr(dev);
+}
+
+static int ks8851_net_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+	struct ks8851_net *ks = netdev_priv(dev);
+
+	if (!netif_running(dev))
+		return -EINVAL;
+
+	return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL);
+}
+
+static const struct net_device_ops ks8851_netdev_ops = {
+	.ndo_open		= ks8851_net_open,
+	.ndo_stop		= ks8851_net_stop,
+	.ndo_do_ioctl		= ks8851_net_ioctl,
+	.ndo_start_xmit		= ks8851_start_xmit,
+	.ndo_set_mac_address	= ks8851_set_mac_address,
+	.ndo_set_rx_mode	= ks8851_set_rx_mode,
+	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_validate_addr	= eth_validate_addr,
+};
+
+/* ethtool support */
+
+static void ks8851_get_drvinfo(struct net_device *dev,
+			       struct ethtool_drvinfo *di)
+{
+	strlcpy(di->driver, "KS8851", sizeof(di->driver));
+	strlcpy(di->version, "1.00", sizeof(di->version));
+	strlcpy(di->bus_info, dev_name(dev->dev.parent), sizeof(di->bus_info));
+}
+
+static u32 ks8851_get_msglevel(struct net_device *dev)
+{
+	struct ks8851_net *ks = netdev_priv(dev);
+	return ks->msg_enable;
+}
+
+static void ks8851_set_msglevel(struct net_device *dev, u32 to)
+{
+	struct ks8851_net *ks = netdev_priv(dev);
+	ks->msg_enable = to;
+}
+
+static int ks8851_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct ks8851_net *ks = netdev_priv(dev);
+	return mii_ethtool_gset(&ks->mii, cmd);
+}
+
+static int ks8851_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct ks8851_net *ks = netdev_priv(dev);
+	return mii_ethtool_sset(&ks->mii, cmd);
+}
+
+static u32 ks8851_get_link(struct net_device *dev)
+{
+	struct ks8851_net *ks = netdev_priv(dev);
+	return mii_link_ok(&ks->mii);
+}
+
+static int ks8851_nway_reset(struct net_device *dev)
+{
+	struct ks8851_net *ks = netdev_priv(dev);
+	return mii_nway_restart(&ks->mii);
+}
+
+static const struct ethtool_ops ks8851_ethtool_ops = {
+	.get_drvinfo	= ks8851_get_drvinfo,
+	.get_msglevel	= ks8851_get_msglevel,
+	.set_msglevel	= ks8851_set_msglevel,
+	.get_settings	= ks8851_get_settings,
+	.set_settings	= ks8851_set_settings,
+	.get_link	= ks8851_get_link,
+	.nway_reset	= ks8851_nway_reset,
+};
+
+/* MII interface controls */
+
+/**
+ * ks8851_phy_reg - convert MII register into a KS8851 register
+ * @reg: MII register number.
+ *
+ * Return the KS8851 register number for the corresponding MII PHY register
+ * if possible. Return zero if the MII register has no direct mapping to the
+ * KS8851 register set.
+ */
+static int ks8851_phy_reg(int reg)
+{
+	switch (reg) {
+	case MII_BMCR:
+		return KS_P1MBCR;
+	case MII_BMSR:
+		return KS_P1MBSR;
+	case MII_PHYSID1:
+		return KS_PHY1ILR;
+	case MII_PHYSID2:
+		return KS_PHY1IHR;
+	case MII_ADVERTISE:
+		return KS_P1ANAR;
+	case MII_LPA:
+		return KS_P1ANLPR;
+	}
+
+	return 0x0;
+}
+
+/**
+ * ks8851_phy_read - MII interface PHY register read.
+ * @dev: The network device the PHY is on.
+ * @phy_addr: Address of PHY (ignored as we only have one)
+ * @reg: The register to read.
+ *
+ * This call reads data from the PHY register specified in @reg. Since the
+ * device does not support all the MII registers, the non-existant values
+ * are always returned as zero.
+ *
+ * We return zero for unsupported registers as the MII code does not check
+ * the value returned for any error status, and simply returns it to the
+ * caller. The mii-tool that the driver was tested with takes any -ve error
+ * as real PHY capabilities, thus displaying incorrect data to the user.
+ */
+static int ks8851_phy_read(struct net_device *dev, int phy_addr, int reg)
+{
+	struct ks8851_net *ks = netdev_priv(dev);
+	int ksreg;
+	int result;
+
+	ksreg = ks8851_phy_reg(reg);
+	if (!ksreg)
+		return 0x0;	/* no error return allowed, so use zero */
+
+	mutex_lock(&ks->lock);
+	result = ks8851_rdreg16(ks, ksreg);
+	mutex_unlock(&ks->lock);
+
+	return result;
+}
+
+static void ks8851_phy_write(struct net_device *dev,
+			     int phy, int reg, int value)
+{
+	struct ks8851_net *ks = netdev_priv(dev);
+	int ksreg;
+
+	ksreg = ks8851_phy_reg(reg);
+	if (ksreg) {
+		mutex_lock(&ks->lock);
+		ks8851_wrreg16(ks, ksreg, value);
+		mutex_unlock(&ks->lock);
+	}
+}
+
+/**
+ * ks8851_read_selftest - read the selftest memory info.
+ * @ks: The device state
+ *
+ * Read and check the TX/RX memory selftest information.
+ */
+static int ks8851_read_selftest(struct ks8851_net *ks)
+{
+	unsigned both_done = MBIR_TXMBF | MBIR_RXMBF;
+	int ret = 0;
+	unsigned rd;
+
+	rd = ks8851_rdreg16(ks, KS_MBIR);
+
+	if ((rd & both_done) != both_done) {
+		ks_warn(ks, "Memory selftest not finished\n");
+		return 0;
+	}
+
+	if (rd & MBIR_TXMBFA) {
+		ks_err(ks, "TX memory selftest fail\n");
+		ret |= 1;
+	}
+
+	if (rd & MBIR_RXMBFA) {
+		ks_err(ks, "RX memory selftest fail\n");
+		ret |= 2;
+	}
+
+	return 0;
+}
+
+/* driver bus management functions */
+
+static int __devinit ks8851_probe(struct spi_device *spi)
+{
+	struct net_device *ndev;
+	struct ks8851_net *ks;
+	int ret;
+
+	ndev = alloc_etherdev(sizeof(struct ks8851_net));
+	if (!ndev) {
+		dev_err(&spi->dev, "failed to alloc ethernet device\n");
+		return -ENOMEM;
+	}
+
+	spi->bits_per_word = 8;
+
+	ks = netdev_priv(ndev);
+
+	ks->netdev = ndev;
+	ks->spidev = spi;
+	ks->tx_space = 6144;
+
+	mutex_init(&ks->lock);
+	spin_lock_init(&ks->statelock);
+
+	INIT_WORK(&ks->tx_work, ks8851_tx_work);
+	INIT_WORK(&ks->irq_work, ks8851_irq_work);
+	INIT_WORK(&ks->rxctrl_work, ks8851_rxctrl_work);
+
+	/* initialise pre-made spi transfer messages */
+
+	spi_message_init(&ks->spi_msg1);
+	spi_message_add_tail(&ks->spi_xfer1, &ks->spi_msg1);
+
+	spi_message_init(&ks->spi_msg2);
+	spi_message_add_tail(&ks->spi_xfer2[0], &ks->spi_msg2);
+	spi_message_add_tail(&ks->spi_xfer2[1], &ks->spi_msg2);
+
+	/* setup mii state */
+	ks->mii.dev		= ndev;
+	ks->mii.phy_id		= 1,
+	ks->mii.phy_id_mask	= 1;
+	ks->mii.reg_num_mask	= 0xf;
+	ks->mii.mdio_read	= ks8851_phy_read;
+	ks->mii.mdio_write	= ks8851_phy_write;
+
+	dev_info(&spi->dev, "message enable is %d\n", msg_enable);
+
+	/* set the default message enable */
+	ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
+						     NETIF_MSG_PROBE |
+						     NETIF_MSG_LINK));
+
+	skb_queue_head_init(&ks->txq);
+
+	SET_ETHTOOL_OPS(ndev, &ks8851_ethtool_ops);
+	SET_NETDEV_DEV(ndev, &spi->dev);
+
+	dev_set_drvdata(&spi->dev, ks);
+
+	ndev->if_port = IF_PORT_100BASET;
+	ndev->netdev_ops = &ks8851_netdev_ops;
+	ndev->irq = spi->irq;
+
+	/* simple check for a valid chip being connected to the bus */
+
+	if ((ks8851_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
+		dev_err(&spi->dev, "failed to read device ID\n");
+		ret = -ENODEV;
+		goto err_id;
+	}
+
+	ks8851_read_selftest(ks);
+	ks8851_init_mac(ks);
+
+	ret = request_irq(spi->irq, ks8851_irq, IRQF_TRIGGER_LOW,
+			  ndev->name, ks);
+	if (ret < 0) {
+		dev_err(&spi->dev, "failed to get irq\n");
+		goto err_irq;
+	}
+
+	ret = register_netdev(ndev);
+	if (ret) {
+		dev_err(&spi->dev, "failed to register network device\n");
+		goto err_netdev;
+	}
+
+	dev_info(&spi->dev, "revision %d, MAC %pM, IRQ %d\n",
+		 CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)),
+		 ndev->dev_addr, ndev->irq);
+
+	return 0;
+
+
+err_netdev:
+	free_irq(ndev->irq, ndev);
+
+err_id:
+err_irq:
+	free_netdev(ndev);
+	return ret;
+}
+
+static int __devexit ks8851_remove(struct spi_device *spi)
+{
+	struct ks8851_net *priv = dev_get_drvdata(&spi->dev);
+
+	if (netif_msg_drv(priv))
+		dev_info(&spi->dev, "remove");
+
+	unregister_netdev(priv->netdev);
+	free_irq(spi->irq, priv);
+	free_netdev(priv->netdev);
+
+	return 0;
+}
+
+static struct spi_driver ks8851_driver = {
+	.driver = {
+		.name = "ks8851",
+		.owner = THIS_MODULE,
+	},
+	.probe = ks8851_probe,
+	.remove = __devexit_p(ks8851_remove),
+};
+
+static int __init ks8851_init(void)
+{
+	return spi_register_driver(&ks8851_driver);
+}
+
+static void __exit ks8851_exit(void)
+{
+	spi_unregister_driver(&ks8851_driver);
+}
+
+module_init(ks8851_init);
+module_exit(ks8851_exit);
+
+MODULE_DESCRIPTION("KS8851 Network driver");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_LICENSE("GPL");
+
+module_param_named(message, msg_enable, int, 0);
+MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");

+ 296 - 0
drivers/net/ks8851.h

@@ -0,0 +1,296 @@
+/* drivers/net/ks8851.h
+ *
+ * Copyright 2009 Simtec Electronics
+ *      Ben Dooks <ben@simtec.co.uk>
+ *
+ * KS8851 register definitions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#define KS_CCR					0x08
+#define CCR_EEPROM				(1 << 9)
+#define CCR_SPI					(1 << 8)
+#define CCR_32PIN				(1 << 0)
+
+/* MAC address registers */
+#define KS_MARL					0x10
+#define KS_MARM					0x12
+#define KS_MARH					0x14
+
+#define KS_OBCR					0x20
+#define OBCR_ODS_16mA				(1 << 6)
+
+#define KS_EEPCR				0x22
+#define EEPCR_EESA				(1 << 4)
+#define EEPCR_EESB				(1 << 3)
+#define EEPCR_EEDO				(1 << 2)
+#define EEPCR_EESCK				(1 << 1)
+#define EEPCR_EECS				(1 << 0)
+
+#define KS_MBIR					0x24
+#define MBIR_TXMBF				(1 << 12)
+#define MBIR_TXMBFA				(1 << 11)
+#define MBIR_RXMBF				(1 << 4)
+#define MBIR_RXMBFA				(1 << 3)
+
+#define KS_GRR					0x26
+#define GRR_QMU					(1 << 1)
+#define GRR_GSR					(1 << 0)
+
+#define KS_WFCR					0x2A
+#define WFCR_MPRXE				(1 << 7)
+#define WFCR_WF3E				(1 << 3)
+#define WFCR_WF2E				(1 << 2)
+#define WFCR_WF1E				(1 << 1)
+#define WFCR_WF0E				(1 << 0)
+
+#define KS_WF0CRC0				0x30
+#define KS_WF0CRC1				0x32
+#define KS_WF0BM0				0x34
+#define KS_WF0BM1				0x36
+#define KS_WF0BM2				0x38
+#define KS_WF0BM3				0x3A
+
+#define KS_WF1CRC0				0x40
+#define KS_WF1CRC1				0x42
+#define KS_WF1BM0				0x44
+#define KS_WF1BM1				0x46
+#define KS_WF1BM2				0x48
+#define KS_WF1BM3				0x4A
+
+#define KS_WF2CRC0				0x50
+#define KS_WF2CRC1				0x52
+#define KS_WF2BM0				0x54
+#define KS_WF2BM1				0x56
+#define KS_WF2BM2				0x58
+#define KS_WF2BM3				0x5A
+
+#define KS_WF3CRC0				0x60
+#define KS_WF3CRC1				0x62
+#define KS_WF3BM0				0x64
+#define KS_WF3BM1				0x66
+#define KS_WF3BM2				0x68
+#define KS_WF3BM3				0x6A
+
+#define KS_TXCR					0x70
+#define TXCR_TCGICMP				(1 << 8)
+#define TXCR_TCGUDP				(1 << 7)
+#define TXCR_TCGTCP				(1 << 6)
+#define TXCR_TCGIP				(1 << 5)
+#define TXCR_FTXQ				(1 << 4)
+#define TXCR_TXFCE				(1 << 3)
+#define TXCR_TXPE				(1 << 2)
+#define TXCR_TXCRC				(1 << 1)
+#define TXCR_TXE				(1 << 0)
+
+#define KS_TXSR					0x72
+#define TXSR_TXLC				(1 << 13)
+#define TXSR_TXMC				(1 << 12)
+#define TXSR_TXFID_MASK				(0x3f << 0)
+#define TXSR_TXFID_SHIFT			(0)
+#define TXSR_TXFID_GET(_v)			(((_v) >> 0) & 0x3f)
+
+#define KS_RXCR1				0x74
+#define RXCR1_FRXQ				(1 << 15)
+#define RXCR1_RXUDPFCC				(1 << 14)
+#define RXCR1_RXTCPFCC				(1 << 13)
+#define RXCR1_RXIPFCC				(1 << 12)
+#define RXCR1_RXPAFMA				(1 << 11)
+#define RXCR1_RXFCE				(1 << 10)
+#define RXCR1_RXEFE				(1 << 9)
+#define RXCR1_RXMAFMA				(1 << 8)
+#define RXCR1_RXBE				(1 << 7)
+#define RXCR1_RXME				(1 << 6)
+#define RXCR1_RXUE				(1 << 5)
+#define RXCR1_RXAE				(1 << 4)
+#define RXCR1_RXINVF				(1 << 1)
+#define RXCR1_RXE				(1 << 0)
+
+#define KS_RXCR2				0x76
+#define RXCR2_SRDBL_MASK			(0x7 << 5)
+#define RXCR2_SRDBL_SHIFT			(5)
+#define RXCR2_SRDBL_4B				(0x0 << 5)
+#define RXCR2_SRDBL_8B				(0x1 << 5)
+#define RXCR2_SRDBL_16B				(0x2 << 5)
+#define RXCR2_SRDBL_32B				(0x3 << 5)
+#define RXCR2_SRDBL_FRAME			(0x4 << 5)
+#define RXCR2_IUFFP				(1 << 4)
+#define RXCR2_RXIUFCEZ				(1 << 3)
+#define RXCR2_UDPLFE				(1 << 2)
+#define RXCR2_RXICMPFCC				(1 << 1)
+#define RXCR2_RXSAF				(1 << 0)
+
+#define KS_TXMIR				0x78
+
+#define KS_RXFHSR				0x7C
+#define RXFSHR_RXFV				(1 << 15)
+#define RXFSHR_RXICMPFCS			(1 << 13)
+#define RXFSHR_RXIPFCS				(1 << 12)
+#define RXFSHR_RXTCPFCS				(1 << 11)
+#define RXFSHR_RXUDPFCS				(1 << 10)
+#define RXFSHR_RXBF				(1 << 7)
+#define RXFSHR_RXMF				(1 << 6)
+#define RXFSHR_RXUF				(1 << 5)
+#define RXFSHR_RXMR				(1 << 4)
+#define RXFSHR_RXFT				(1 << 3)
+#define RXFSHR_RXFTL				(1 << 2)
+#define RXFSHR_RXRF				(1 << 1)
+#define RXFSHR_RXCE				(1 << 0)
+
+#define KS_RXFHBCR				0x7E
+#define KS_TXQCR				0x80
+#define TXQCR_AETFE				(1 << 2)
+#define TXQCR_TXQMAM				(1 << 1)
+#define TXQCR_METFE				(1 << 0)
+
+#define KS_RXQCR				0x82
+#define RXQCR_RXDTTS				(1 << 12)
+#define RXQCR_RXDBCTS				(1 << 11)
+#define RXQCR_RXFCTS				(1 << 10)
+#define RXQCR_RXIPHTOE				(1 << 9)
+#define RXQCR_RXDTTE				(1 << 7)
+#define RXQCR_RXDBCTE				(1 << 6)
+#define RXQCR_RXFCTE				(1 << 5)
+#define RXQCR_ADRFE				(1 << 4)
+#define RXQCR_SDA				(1 << 3)
+#define RXQCR_RRXEF				(1 << 0)
+
+#define KS_TXFDPR				0x84
+#define TXFDPR_TXFPAI				(1 << 14)
+#define TXFDPR_TXFP_MASK			(0x7ff << 0)
+#define TXFDPR_TXFP_SHIFT			(0)
+
+#define KS_RXFDPR				0x86
+#define RXFDPR_RXFPAI				(1 << 14)
+
+#define KS_RXDTTR				0x8C
+#define KS_RXDBCTR				0x8E
+
+#define KS_IER					0x90
+#define KS_ISR					0x92
+#define IRQ_LCI					(1 << 15)
+#define IRQ_TXI					(1 << 14)
+#define IRQ_RXI					(1 << 13)
+#define IRQ_RXOI				(1 << 11)
+#define IRQ_TXPSI				(1 << 9)
+#define IRQ_RXPSI				(1 << 8)
+#define IRQ_TXSAI				(1 << 6)
+#define IRQ_RXWFDI				(1 << 5)
+#define IRQ_RXMPDI				(1 << 4)
+#define IRQ_LDI					(1 << 3)
+#define IRQ_EDI					(1 << 2)
+#define IRQ_SPIBEI				(1 << 1)
+#define IRQ_DEDI				(1 << 0)
+
+#define KS_RXFCTR				0x9C
+#define KS_RXFC					0x9D
+#define RXFCTR_RXFC_MASK			(0xff << 8)
+#define RXFCTR_RXFC_SHIFT			(8)
+#define RXFCTR_RXFC_GET(_v)			(((_v) >> 8) & 0xff)
+#define RXFCTR_RXFCT_MASK			(0xff << 0)
+#define RXFCTR_RXFCT_SHIFT			(0)
+
+#define KS_TXNTFSR				0x9E
+
+#define KS_MAHTR0				0xA0
+#define KS_MAHTR1				0xA2
+#define KS_MAHTR2				0xA4
+#define KS_MAHTR3				0xA6
+
+#define KS_FCLWR				0xB0
+#define KS_FCHWR				0xB2
+#define KS_FCOWR				0xB4
+
+#define KS_CIDER				0xC0
+#define CIDER_ID				0x8870
+#define CIDER_REV_MASK				(0x7 << 1)
+#define CIDER_REV_SHIFT				(1)
+#define CIDER_REV_GET(_v)			(((_v) >> 1) & 0x7)
+
+#define KS_CGCR					0xC6
+
+#define KS_IACR					0xC8
+#define IACR_RDEN				(1 << 12)
+#define IACR_TSEL_MASK				(0x3 << 10)
+#define IACR_TSEL_SHIFT				(10)
+#define IACR_TSEL_MIB				(0x3 << 10)
+#define IACR_ADDR_MASK				(0x1f << 0)
+#define IACR_ADDR_SHIFT				(0)
+
+#define KS_IADLR				0xD0
+#define KS_IAHDR				0xD2
+
+#define KS_PMECR				0xD4
+#define PMECR_PME_DELAY				(1 << 14)
+#define PMECR_PME_POL				(1 << 12)
+#define PMECR_WOL_WAKEUP			(1 << 11)
+#define PMECR_WOL_MAGICPKT			(1 << 10)
+#define PMECR_WOL_LINKUP			(1 << 9)
+#define PMECR_WOL_ENERGY			(1 << 8)
+#define PMECR_AUTO_WAKE_EN			(1 << 7)
+#define PMECR_WAKEUP_NORMAL			(1 << 6)
+#define PMECR_WKEVT_MASK			(0xf << 2)
+#define PMECR_WKEVT_SHIFT			(2)
+#define PMECR_WKEVT_GET(_v)			(((_v) >> 2) & 0xf)
+#define PMECR_WKEVT_ENERGY			(0x1 << 2)
+#define PMECR_WKEVT_LINK			(0x2 << 2)
+#define PMECR_WKEVT_MAGICPKT			(0x4 << 2)
+#define PMECR_WKEVT_FRAME			(0x8 << 2)
+#define PMECR_PM_MASK				(0x3 << 0)
+#define PMECR_PM_SHIFT				(0)
+#define PMECR_PM_NORMAL				(0x0 << 0)
+#define PMECR_PM_ENERGY				(0x1 << 0)
+#define PMECR_PM_SOFTDOWN			(0x2 << 0)
+#define PMECR_PM_POWERSAVE			(0x3 << 0)
+
+/* Standard MII PHY data */
+#define KS_P1MBCR				0xE4
+#define KS_P1MBSR				0xE6
+#define KS_PHY1ILR				0xE8
+#define KS_PHY1IHR				0xEA
+#define KS_P1ANAR				0xEC
+#define KS_P1ANLPR				0xEE
+
+#define KS_P1SCLMD				0xF4
+#define P1SCLMD_LEDOFF				(1 << 15)
+#define P1SCLMD_TXIDS				(1 << 14)
+#define P1SCLMD_RESTARTAN			(1 << 13)
+#define P1SCLMD_DISAUTOMDIX			(1 << 10)
+#define P1SCLMD_FORCEMDIX			(1 << 9)
+#define P1SCLMD_AUTONEGEN			(1 << 7)
+#define P1SCLMD_FORCE100			(1 << 6)
+#define P1SCLMD_FORCEFDX			(1 << 5)
+#define P1SCLMD_ADV_FLOW			(1 << 4)
+#define P1SCLMD_ADV_100BT_FDX			(1 << 3)
+#define P1SCLMD_ADV_100BT_HDX			(1 << 2)
+#define P1SCLMD_ADV_10BT_FDX			(1 << 1)
+#define P1SCLMD_ADV_10BT_HDX			(1 << 0)
+
+#define KS_P1CR					0xF6
+#define P1CR_HP_MDIX				(1 << 15)
+#define P1CR_REV_POL				(1 << 13)
+#define P1CR_OP_100M				(1 << 10)
+#define P1CR_OP_FDX				(1 << 9)
+#define P1CR_OP_MDI				(1 << 7)
+#define P1CR_AN_DONE				(1 << 6)
+#define P1CR_LINK_GOOD				(1 << 5)
+#define P1CR_PNTR_FLOW				(1 << 4)
+#define P1CR_PNTR_100BT_FDX			(1 << 3)
+#define P1CR_PNTR_100BT_HDX			(1 << 2)
+#define P1CR_PNTR_10BT_FDX			(1 << 1)
+#define P1CR_PNTR_10BT_HDX			(1 << 0)
+
+/* TX Frame control */
+
+#define TXFR_TXIC				(1 << 15)
+#define TXFR_TXFID_MASK				(0x3f << 0)
+#define TXFR_TXFID_SHIFT			(0)
+
+/* SPI frame opcodes */
+#define KS_SPIOP_RD				(0x00)
+#define KS_SPIOP_WR				(0x40)
+#define KS_SPIOP_RXFIFO				(0x80)
+#define KS_SPIOP_TXFIFO				(0xC0)

+ 8 - 7
drivers/net/macsonic.c

@@ -179,7 +179,7 @@ static const struct net_device_ops macsonic_netdev_ops = {
 	.ndo_set_mac_address	= eth_mac_addr,
 };
 
-static int __init macsonic_init(struct net_device *dev)
+static int __devinit macsonic_init(struct net_device *dev)
 {
 	struct sonic_local* lp = netdev_priv(dev);
 
@@ -223,7 +223,7 @@ static int __init macsonic_init(struct net_device *dev)
 	return 0;
 }
 
-static int __init mac_onboard_sonic_ethernet_addr(struct net_device *dev)
+static int __devinit mac_onboard_sonic_ethernet_addr(struct net_device *dev)
 {
 	struct sonic_local *lp = netdev_priv(dev);
 	const int prom_addr = ONBOARD_SONIC_PROM_BASE;
@@ -288,7 +288,7 @@ static int __init mac_onboard_sonic_ethernet_addr(struct net_device *dev)
 	} else return 0;
 }
 
-static int __init mac_onboard_sonic_probe(struct net_device *dev)
+static int __devinit mac_onboard_sonic_probe(struct net_device *dev)
 {
 	/* Bwahahaha */
 	static int once_is_more_than_enough;
@@ -409,7 +409,7 @@ static int __init mac_onboard_sonic_probe(struct net_device *dev)
 	return macsonic_init(dev);
 }
 
-static int __init mac_nubus_sonic_ethernet_addr(struct net_device *dev,
+static int __devinit mac_nubus_sonic_ethernet_addr(struct net_device *dev,
 						unsigned long prom_addr,
 						int id)
 {
@@ -424,7 +424,7 @@ static int __init mac_nubus_sonic_ethernet_addr(struct net_device *dev,
 	return 0;
 }
 
-static int __init macsonic_ident(struct nubus_dev *ndev)
+static int __devinit macsonic_ident(struct nubus_dev *ndev)
 {
 	if (ndev->dr_hw == NUBUS_DRHW_ASANTE_LC &&
 	    ndev->dr_sw == NUBUS_DRSW_SONIC_LC)
@@ -449,7 +449,7 @@ static int __init macsonic_ident(struct nubus_dev *ndev)
 	return -1;
 }
 
-static int __init mac_nubus_sonic_probe(struct net_device *dev)
+static int __devinit mac_nubus_sonic_probe(struct net_device *dev)
 {
 	static int slots;
 	struct nubus_dev* ndev = NULL;
@@ -562,7 +562,7 @@ static int __init mac_nubus_sonic_probe(struct net_device *dev)
 	return macsonic_init(dev);
 }
 
-static int __init mac_sonic_probe(struct platform_device *pdev)
+static int __devinit mac_sonic_probe(struct platform_device *pdev)
 {
 	struct net_device *dev;
 	struct sonic_local *lp;
@@ -575,6 +575,7 @@ static int __init mac_sonic_probe(struct platform_device *pdev)
 	lp = netdev_priv(dev);
 	lp->device = &pdev->dev;
 	SET_NETDEV_DEV(dev, &pdev->dev);
+	platform_set_drvdata(pdev, dev);
 
 	/* This will catch fatal stuff like -ENOMEM as well as success */
 	err = mac_onboard_sonic_probe(dev);

+ 1 - 1
drivers/net/mlx4/en_ethtool.c

@@ -220,7 +220,7 @@ static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
 	cmd->autoneg = AUTONEG_DISABLE;
 	cmd->supported = SUPPORTED_10000baseT_Full;
-	cmd->advertising = SUPPORTED_10000baseT_Full;
+	cmd->advertising = ADVERTISED_1000baseT_Full;
 	if (netif_carrier_ok(dev)) {
 		cmd->speed = SPEED_10000;
 		cmd->duplex = DUPLEX_FULL;

+ 3 - 0
drivers/net/netxen/netxen_nic.h

@@ -210,6 +210,7 @@
 #define NETXEN_CTX_SIGNATURE	0xdee0
 #define NETXEN_CTX_SIGNATURE_V2	0x0002dee0
 #define NETXEN_CTX_RESET	0xbad0
+#define NETXEN_CTX_D3_RESET	0xacc0
 #define NETXEN_RCV_PRODUCER(ringid)	(ringid)
 
 #define PHAN_PEG_RCV_INITIALIZED	0xff01
@@ -773,6 +774,8 @@ struct nx_host_tx_ring {
 	u32 crb_cmd_consumer;
 	u32 num_desc;
 
+	struct netdev_queue *txq;
+
 	struct netxen_cmd_buffer *cmd_buf_arr;
 	struct cmd_desc_type0 *desc_head;
 	dma_addr_t phys_addr;

+ 7 - 6
drivers/net/netxen/netxen_nic_ctx.c

@@ -684,10 +684,8 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
 			goto err_out_free;
 	} else {
 		err = netxen_init_old_ctx(adapter);
-		if (err) {
-			netxen_free_hw_resources(adapter);
-			return err;
-		}
+		if (err)
+			goto err_out_free;
 	}
 
 	return 0;
@@ -708,15 +706,18 @@ void netxen_free_hw_resources(struct netxen_adapter *adapter)
 	int port = adapter->portnum;
 
 	if (adapter->fw_major >= 4) {
-		nx_fw_cmd_destroy_tx_ctx(adapter);
 		nx_fw_cmd_destroy_rx_ctx(adapter);
+		nx_fw_cmd_destroy_tx_ctx(adapter);
 	} else {
 		netxen_api_lock(adapter);
 		NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
-				NETXEN_CTX_RESET | port);
+				NETXEN_CTX_D3_RESET | port);
 		netxen_api_unlock(adapter);
 	}
 
+	/* Allow dma queues to drain after context reset */
+	msleep(20);
+
 	recv_ctx = &adapter->recv_ctx;
 
 	if (recv_ctx->hwctx != NULL) {

+ 5 - 4
drivers/net/netxen/netxen_nic_hw.c

@@ -461,13 +461,14 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter,
 	i = 0;
 
 	tx_ring = adapter->tx_ring;
-	netif_tx_lock_bh(adapter->netdev);
+	__netif_tx_lock_bh(tx_ring->txq);
 
 	producer = tx_ring->producer;
 	consumer = tx_ring->sw_consumer;
 
-	if (nr_desc >= find_diff_among(producer, consumer, tx_ring->num_desc)) {
-		netif_tx_unlock_bh(adapter->netdev);
+	if (nr_desc >= netxen_tx_avail(tx_ring)) {
+		netif_tx_stop_queue(tx_ring->txq);
+		__netif_tx_unlock_bh(tx_ring->txq);
 		return -EBUSY;
 	}
 
@@ -490,7 +491,7 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter,
 
 	netxen_nic_update_cmd_producer(adapter, tx_ring);
 
-	netif_tx_unlock_bh(adapter->netdev);
+	__netif_tx_unlock_bh(tx_ring->txq);
 
 	return 0;
 }

+ 3 - 2
drivers/net/netxen/netxen_nic_init.c

@@ -214,6 +214,7 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
 	adapter->tx_ring = tx_ring;
 
 	tx_ring->num_desc = adapter->num_txd;
+	tx_ring->txq = netdev_get_tx_queue(netdev, 0);
 
 	cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring));
 	if (cmd_buf_arr == NULL) {
@@ -1400,10 +1401,10 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
 		smp_mb();
 
 		if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
-			netif_tx_lock(netdev);
+			__netif_tx_lock(tx_ring->txq, smp_processor_id());
 			if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH)
 				netif_wake_queue(netdev);
-			netif_tx_unlock(netdev);
+			__netif_tx_unlock(tx_ring->txq);
 		}
 	}
 	/*

+ 22 - 14
drivers/net/netxen/netxen_nic_main.c

@@ -215,9 +215,9 @@ netxen_napi_disable(struct netxen_adapter *adapter)
 
 	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
 		sds_ring = &recv_ctx->sds_rings[ring];
-		napi_disable(&sds_ring->napi);
 		netxen_nic_disable_int(sds_ring);
-		synchronize_irq(sds_ring->irq);
+		napi_synchronize(&sds_ring->napi);
+		napi_disable(&sds_ring->napi);
 	}
 }
 
@@ -833,11 +833,11 @@ netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
 
 	adapter->ahw.linkup = 0;
 
-	netxen_napi_enable(adapter);
-
 	if (adapter->max_sds_rings > 1)
 		netxen_config_rss(adapter, 1);
 
+	netxen_napi_enable(adapter);
+
 	if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)
 		netxen_linkevent_request(adapter, 1);
 	else
@@ -851,8 +851,9 @@ netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
 static void
 netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
 {
+	spin_lock(&adapter->tx_clean_lock);
 	netif_carrier_off(netdev);
-	netif_stop_queue(netdev);
+	netif_tx_disable(netdev);
 
 	if (adapter->stop_port)
 		adapter->stop_port(adapter);
@@ -863,9 +864,10 @@ netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
 	netxen_napi_disable(adapter);
 
 	netxen_release_tx_buffers(adapter);
+	spin_unlock(&adapter->tx_clean_lock);
 
-	FLUSH_SCHEDULED_WORK();
 	del_timer_sync(&adapter->watchdog_timer);
+	FLUSH_SCHEDULED_WORK();
 }
 
 
@@ -943,8 +945,8 @@ err_out_free_sw:
 static void
 netxen_nic_detach(struct netxen_adapter *adapter)
 {
-	netxen_release_rx_buffers(adapter);
 	netxen_free_hw_resources(adapter);
+	netxen_release_rx_buffers(adapter);
 	netxen_nic_free_irq(adapter);
 	netxen_free_sw_resources(adapter);
 
@@ -1533,10 +1535,12 @@ static int netxen_nic_check_temp(struct netxen_adapter *adapter)
 		printk(KERN_ALERT
 		       "%s: Device temperature %d degrees C exceeds"
 		       " maximum allowed. Hardware has been shut down.\n",
-		       netxen_nic_driver_name, temp_val);
+		       netdev->name, temp_val);
+
+		netif_device_detach(netdev);
+		netxen_nic_down(adapter, netdev);
+		netxen_nic_detach(adapter);
 
-		netif_carrier_off(netdev);
-		netif_stop_queue(netdev);
 		rv = 1;
 	} else if (temp_state == NX_TEMP_WARN) {
 		if (adapter->temp == NX_TEMP_NORMAL) {
@@ -1544,13 +1548,13 @@ static int netxen_nic_check_temp(struct netxen_adapter *adapter)
 			       "%s: Device temperature %d degrees C "
 			       "exceeds operating range."
 			       " Immediate action needed.\n",
-			       netxen_nic_driver_name, temp_val);
+			       netdev->name, temp_val);
 		}
 	} else {
 		if (adapter->temp == NX_TEMP_WARN) {
 			printk(KERN_INFO
 			       "%s: Device temperature is now %d degrees C"
-			       " in normal range.\n", netxen_nic_driver_name,
+			       " in normal range.\n", netdev->name,
 			       temp_val);
 		}
 	}
@@ -1623,7 +1627,7 @@ void netxen_watchdog_task(struct work_struct *work)
 	struct netxen_adapter *adapter =
 		container_of(work, struct netxen_adapter, watchdog_task);
 
-	if ((adapter->portnum  == 0) && netxen_nic_check_temp(adapter))
+	if (netxen_nic_check_temp(adapter))
 		return;
 
 	if (!adapter->has_link_events)
@@ -1645,6 +1649,9 @@ static void netxen_tx_timeout_task(struct work_struct *work)
 	struct netxen_adapter *adapter =
 		container_of(work, struct netxen_adapter, tx_timeout_task);
 
+	if (!netif_running(adapter->netdev))
+		return;
+
 	printk(KERN_ERR "%s %s: transmit timeout, resetting.\n",
 	       netxen_nic_driver_name, adapter->netdev->name);
 
@@ -1757,7 +1764,8 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
 
 	if ((work_done < budget) && tx_complete) {
 		napi_complete(&sds_ring->napi);
-		netxen_nic_enable_int(sds_ring);
+		if (netif_running(adapter->netdev))
+			netxen_nic_enable_int(sds_ring);
 	}
 
 	return work_done;

+ 14 - 7
drivers/net/pcmcia/3c589_cs.c

@@ -156,6 +156,7 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev);
 static int el3_rx(struct net_device *dev);
 static int el3_close(struct net_device *dev);
 static void el3_tx_timeout(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
 static void set_multicast_list(struct net_device *dev);
 static const struct ethtool_ops netdev_ethtool_ops;
 
@@ -488,8 +489,7 @@ static void tc589_reset(struct net_device *dev)
     /* Switch to register set 1 for normal use. */
     EL3WINDOW(1);
 
-    /* Accept b-cast and phys addr only. */
-    outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
+    set_rx_mode(dev);
     outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
     outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
     outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
@@ -700,7 +700,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
 		if (fifo_diag & 0x2000) {
 		    /* Rx underrun */
 		    tc589_wait_for_completion(dev, RxReset);
-		    set_multicast_list(dev);
+		    set_rx_mode(dev);
 		    outw(RxEnable, ioaddr + EL3_CMD);
 		}
 		outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
@@ -905,14 +905,11 @@ static int el3_rx(struct net_device *dev)
     return 0;
 }
 
-static void set_multicast_list(struct net_device *dev)
+static void set_rx_mode(struct net_device *dev)
 {
-    struct el3_private *lp = netdev_priv(dev);
-    struct pcmcia_device *link = lp->p_dev;
     unsigned int ioaddr = dev->base_addr;
     u16 opts = SetRxFilter | RxStation | RxBroadcast;
 
-    if (!pcmcia_dev_present(link)) return;
     if (dev->flags & IFF_PROMISC)
 	opts |= RxMulticast | RxProm;
     else if (dev->mc_count || (dev->flags & IFF_ALLMULTI))
@@ -920,6 +917,16 @@ static void set_multicast_list(struct net_device *dev)
     outw(opts, ioaddr + EL3_CMD);
 }
 
+static void set_multicast_list(struct net_device *dev)
+{
+	struct el3_private *priv = netdev_priv(dev);
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	set_rx_mode(dev);
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
 static int el3_close(struct net_device *dev)
 {
     struct el3_private *lp = netdev_priv(dev);

+ 1 - 0
drivers/net/sc92031.c

@@ -1593,6 +1593,7 @@ out:
 static struct pci_device_id sc92031_pci_device_id_table[] __devinitdata = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x2031) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x8139) },
+	{ PCI_DEVICE(0x1088, 0x2031) },
 	{ 0, }
 };
 MODULE_DEVICE_TABLE(pci, sc92031_pci_device_id_table);

+ 9 - 16
drivers/net/sky2.c

@@ -1151,14 +1151,7 @@ stopped:
 
 	/* reset the Rx prefetch unit */
 	sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
-
-	/* Reset the RAM Buffer receive queue */
-	sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_RST_SET);
-
-	/* Reset Rx MAC FIFO */
-	sky2_write8(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), GMF_RST_SET);
-
-	sky2_read8(hw, B0_CTST);
+	mmiowb();
 }
 
 /* Clean out receive buffer area, assumes receiver hardware stopped */
@@ -1825,12 +1818,6 @@ static int sky2_down(struct net_device *dev)
 	if (netif_msg_ifdown(sky2))
 		printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
 
-	/* Disable port IRQ */
-	imask = sky2_read32(hw, B0_IMSK);
-	imask &= ~portirq_msk[port];
-	sky2_write32(hw, B0_IMSK, imask);
-	sky2_read32(hw, B0_IMSK);
-
 	/* Force flow control off */
 	sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
 
@@ -1870,8 +1857,6 @@ static int sky2_down(struct net_device *dev)
 
 	sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
 
-	sky2_rx_stop(sky2);
-
 	sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
 	sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
 
@@ -1881,6 +1866,14 @@ static int sky2_down(struct net_device *dev)
 	sky2_write32(hw, STAT_ISR_TIMER_CNT, 0);
 	sky2_read8(hw, STAT_ISR_TIMER_CTRL);
 
+	sky2_rx_stop(sky2);
+
+	/* Disable port IRQ */
+	imask = sky2_read32(hw, B0_IMSK);
+	imask &= ~portirq_msk[port];
+	sky2_write32(hw, B0_IMSK, imask);
+	sky2_read32(hw, B0_IMSK);
+
 	synchronize_irq(hw->pdev->irq);
 	napi_synchronize(&hw->napi);
 

+ 8 - 0
drivers/net/usb/Kconfig

@@ -369,4 +369,12 @@ config USB_NET_INT51X1
 	  (Powerline Communications) solution with an Intellon
 	  INT51x1/INT5200 chip, like the "devolo dLan duo".
 
+config USB_CDC_PHONET
+	tristate "CDC Phonet support"
+	depends on PHONET
+	help
+	  Choose this option to support the Phonet interface to a Nokia
+	  cellular modem, as found on most Nokia handsets with the
+	  "PC suite" USB profile.
+
 endmenu

+ 1 - 0
drivers/net/usb/Makefile

@@ -21,4 +21,5 @@ obj-$(CONFIG_USB_NET_ZAURUS)	+= zaurus.o
 obj-$(CONFIG_USB_NET_MCS7830)	+= mcs7830.o
 obj-$(CONFIG_USB_USBNET)	+= usbnet.o
 obj-$(CONFIG_USB_NET_INT51X1)	+= int51x1.o
+obj-$(CONFIG_USB_CDC_PHONET)	+= cdc-phonet.o
 

+ 461 - 0
drivers/net/usb/cdc-phonet.c

@@ -0,0 +1,461 @@
+/*
+ * phonet.c -- USB CDC Phonet host driver
+ *
+ * Copyright (C) 2008-2009 Nokia Corporation. All rights reserved.
+ *
+ * Author: Rémi Denis-Courmont
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/usb/cdc.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_phonet.h>
+
+#define PN_MEDIA_USB	0x1B
+
+static const unsigned rxq_size = 17;
+
+struct usbpn_dev {
+	struct net_device	*dev;
+
+	struct usb_interface	*intf, *data_intf;
+	struct usb_device	*usb;
+	unsigned int		tx_pipe, rx_pipe;
+	u8 active_setting;
+	u8 disconnected;
+
+	unsigned		tx_queue;
+	spinlock_t		tx_lock;
+
+	spinlock_t		rx_lock;
+	struct sk_buff		*rx_skb;
+	struct urb		*urbs[0];
+};
+
+static void tx_complete(struct urb *req);
+static void rx_complete(struct urb *req);
+
+/*
+ * Network device callbacks
+ */
+static int usbpn_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct usbpn_dev *pnd = netdev_priv(dev);
+	struct urb *req = NULL;
+	unsigned long flags;
+	int err;
+
+	if (skb->protocol != htons(ETH_P_PHONET))
+		goto drop;
+
+	req = usb_alloc_urb(0, GFP_ATOMIC);
+	if (!req)
+		goto drop;
+	usb_fill_bulk_urb(req, pnd->usb, pnd->tx_pipe, skb->data, skb->len,
+				tx_complete, skb);
+	req->transfer_flags = URB_ZERO_PACKET;
+	err = usb_submit_urb(req, GFP_ATOMIC);
+	if (err) {
+		usb_free_urb(req);
+		goto drop;
+	}
+
+	spin_lock_irqsave(&pnd->tx_lock, flags);
+	pnd->tx_queue++;
+	if (pnd->tx_queue >= dev->tx_queue_len)
+		netif_stop_queue(dev);
+	spin_unlock_irqrestore(&pnd->tx_lock, flags);
+	return 0;
+
+drop:
+	dev_kfree_skb(skb);
+	dev->stats.tx_dropped++;
+	return 0;
+}
+
+static void tx_complete(struct urb *req)
+{
+	struct sk_buff *skb = req->context;
+	struct net_device *dev = skb->dev;
+	struct usbpn_dev *pnd = netdev_priv(dev);
+
+	switch (req->status) {
+	case 0:
+		dev->stats.tx_bytes += skb->len;
+		break;
+
+	case -ENOENT:
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		dev->stats.tx_aborted_errors++;
+	default:
+		dev->stats.tx_errors++;
+		dev_dbg(&dev->dev, "TX error (%d)\n", req->status);
+	}
+	dev->stats.tx_packets++;
+
+	spin_lock(&pnd->tx_lock);
+	pnd->tx_queue--;
+	netif_wake_queue(dev);
+	spin_unlock(&pnd->tx_lock);
+
+	dev_kfree_skb_any(skb);
+	usb_free_urb(req);
+}
+
+static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags)
+{
+	struct net_device *dev = pnd->dev;
+	struct page *page;
+	int err;
+
+	page = __netdev_alloc_page(dev, gfp_flags);
+	if (!page)
+		return -ENOMEM;
+
+	usb_fill_bulk_urb(req, pnd->usb, pnd->rx_pipe, page_address(page),
+				PAGE_SIZE, rx_complete, dev);
+	req->transfer_flags = 0;
+	err = usb_submit_urb(req, gfp_flags);
+	if (unlikely(err)) {
+		dev_dbg(&dev->dev, "RX submit error (%d)\n", err);
+		netdev_free_page(dev, page);
+	}
+	return err;
+}
+
+static void rx_complete(struct urb *req)
+{
+	struct net_device *dev = req->context;
+	struct usbpn_dev *pnd = netdev_priv(dev);
+	struct page *page = virt_to_page(req->transfer_buffer);
+	struct sk_buff *skb;
+	unsigned long flags;
+
+	switch (req->status) {
+	case 0:
+		spin_lock_irqsave(&pnd->rx_lock, flags);
+		skb = pnd->rx_skb;
+		if (!skb) {
+			skb = pnd->rx_skb = netdev_alloc_skb(dev, 12);
+			if (likely(skb)) {
+				/* Can't use pskb_pull() on page in IRQ */
+				memcpy(skb_put(skb, 1), page_address(page), 1);
+				skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+						page, 1, req->actual_length);
+				page = NULL;
+			}
+		} else {
+			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+					page, 0, req->actual_length);
+			page = NULL;
+		}
+		if (req->actual_length < PAGE_SIZE)
+			pnd->rx_skb = NULL; /* Last fragment */
+		else
+			skb = NULL;
+		spin_unlock_irqrestore(&pnd->rx_lock, flags);
+		if (skb) {
+			skb->protocol = htons(ETH_P_PHONET);
+			skb_reset_mac_header(skb);
+			__skb_pull(skb, 1);
+			skb->dev = dev;
+			dev->stats.rx_packets++;
+			dev->stats.rx_bytes += skb->len;
+
+			netif_rx(skb);
+		}
+		goto resubmit;
+
+	case -ENOENT:
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		req = NULL;
+		break;
+
+	case -EOVERFLOW:
+		dev->stats.rx_over_errors++;
+		dev_dbg(&dev->dev, "RX overflow\n");
+		break;
+
+	case -EILSEQ:
+		dev->stats.rx_crc_errors++;
+		break;
+	}
+
+	dev->stats.rx_errors++;
+resubmit:
+	if (page)
+		netdev_free_page(dev, page);
+	if (req)
+		rx_submit(pnd, req, GFP_ATOMIC);
+}
+
+static int usbpn_close(struct net_device *dev);
+
+static int usbpn_open(struct net_device *dev)
+{
+	struct usbpn_dev *pnd = netdev_priv(dev);
+	int err;
+	unsigned i;
+	unsigned num = pnd->data_intf->cur_altsetting->desc.bInterfaceNumber;
+
+	err = usb_set_interface(pnd->usb, num, pnd->active_setting);
+	if (err)
+		return err;
+
+	for (i = 0; i < rxq_size; i++) {
+		struct urb *req = usb_alloc_urb(0, GFP_KERNEL);
+
+		if (!req || rx_submit(pnd, req, GFP_KERNEL)) {
+			usbpn_close(dev);
+			return -ENOMEM;
+		}
+		pnd->urbs[i] = req;
+	}
+
+	netif_wake_queue(dev);
+	return 0;
+}
+
+static int usbpn_close(struct net_device *dev)
+{
+	struct usbpn_dev *pnd = netdev_priv(dev);
+	unsigned i;
+	unsigned num = pnd->data_intf->cur_altsetting->desc.bInterfaceNumber;
+
+	netif_stop_queue(dev);
+
+	for (i = 0; i < rxq_size; i++) {
+		struct urb *req = pnd->urbs[i];
+
+		if (!req)
+			continue;
+		usb_kill_urb(req);
+		usb_free_urb(req);
+		pnd->urbs[i] = NULL;
+	}
+
+	return usb_set_interface(pnd->usb, num, !pnd->active_setting);
+}
+
+static int usbpn_set_mtu(struct net_device *dev, int new_mtu)
+{
+	if ((new_mtu < PHONET_MIN_MTU) || (new_mtu > PHONET_MAX_MTU))
+		return -EINVAL;
+
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+static const struct net_device_ops usbpn_ops = {
+	.ndo_open	= usbpn_open,
+	.ndo_stop	= usbpn_close,
+	.ndo_start_xmit = usbpn_xmit,
+	.ndo_change_mtu = usbpn_set_mtu,
+};
+
+static void usbpn_setup(struct net_device *dev)
+{
+	dev->features		= 0;
+	dev->netdev_ops		= &usbpn_ops,
+	dev->header_ops		= &phonet_header_ops;
+	dev->type		= ARPHRD_PHONET;
+	dev->flags		= IFF_POINTOPOINT | IFF_NOARP;
+	dev->mtu		= PHONET_MAX_MTU;
+	dev->hard_header_len	= 1;
+	dev->dev_addr[0]	= PN_MEDIA_USB;
+	dev->addr_len		= 1;
+	dev->tx_queue_len	= 3;
+
+	dev->destructor		= free_netdev;
+}
+
+/*
+ * USB driver callbacks
+ */
+static struct usb_device_id usbpn_ids[] = {
+	{
+		.match_flags = USB_DEVICE_ID_MATCH_VENDOR
+			| USB_DEVICE_ID_MATCH_INT_CLASS
+			| USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+		.idVendor = 0x0421, /* Nokia */
+		.bInterfaceClass = USB_CLASS_COMM,
+		.bInterfaceSubClass = 0xFE,
+	},
+	{ },
+};
+
+MODULE_DEVICE_TABLE(usb, usbpn_ids);
+
+static struct usb_driver usbpn_driver;
+
+int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
+{
+	static const char ifname[] = "usbpn%d";
+	const struct usb_cdc_union_desc *union_header = NULL;
+	const struct usb_cdc_header_desc *phonet_header = NULL;
+	const struct usb_host_interface *data_desc;
+	struct usb_interface *data_intf;
+	struct usb_device *usbdev = interface_to_usbdev(intf);
+	struct net_device *dev;
+	struct usbpn_dev *pnd;
+	u8 *data;
+	int len, err;
+
+	data = intf->altsetting->extra;
+	len = intf->altsetting->extralen;
+	while (len >= 3) {
+		u8 dlen = data[0];
+		if (dlen < 3)
+			return -EINVAL;
+
+		/* bDescriptorType */
+		if (data[1] == USB_DT_CS_INTERFACE) {
+			/* bDescriptorSubType */
+			switch (data[2]) {
+			case USB_CDC_UNION_TYPE:
+				if (union_header || dlen < 5)
+					break;
+				union_header =
+					(struct usb_cdc_union_desc *)data;
+				break;
+			case 0xAB:
+				if (phonet_header || dlen < 5)
+					break;
+				phonet_header =
+					(struct usb_cdc_header_desc *)data;
+				break;
+			}
+		}
+		data += dlen;
+		len -= dlen;
+	}
+
+	if (!union_header || !phonet_header)
+		return -EINVAL;
+
+	data_intf = usb_ifnum_to_if(usbdev, union_header->bSlaveInterface0);
+	if (data_intf == NULL)
+		return -ENODEV;
+	/* Data interface has one inactive and one active setting */
+	if (data_intf->num_altsetting != 2)
+		return -EINVAL;
+	if (data_intf->altsetting[0].desc.bNumEndpoints == 0
+	 && data_intf->altsetting[1].desc.bNumEndpoints == 2)
+		data_desc = data_intf->altsetting + 1;
+	else
+	if (data_intf->altsetting[0].desc.bNumEndpoints == 2
+	 && data_intf->altsetting[1].desc.bNumEndpoints == 0)
+		data_desc = data_intf->altsetting;
+	else
+		return -EINVAL;
+
+	dev = alloc_netdev(sizeof(*pnd) + sizeof(pnd->urbs[0]) * rxq_size,
+				ifname, usbpn_setup);
+	if (!dev)
+		return -ENOMEM;
+
+	pnd = netdev_priv(dev);
+	SET_NETDEV_DEV(dev, &intf->dev);
+	netif_stop_queue(dev);
+
+	pnd->dev = dev;
+	pnd->usb = usb_get_dev(usbdev);
+	pnd->intf = intf;
+	pnd->data_intf = data_intf;
+	spin_lock_init(&pnd->tx_lock);
+	spin_lock_init(&pnd->rx_lock);
+	/* Endpoints */
+	if (usb_pipein(data_desc->endpoint[0].desc.bEndpointAddress)) {
+		pnd->rx_pipe = usb_rcvbulkpipe(usbdev,
+			data_desc->endpoint[0].desc.bEndpointAddress);
+		pnd->tx_pipe = usb_sndbulkpipe(usbdev,
+			data_desc->endpoint[1].desc.bEndpointAddress);
+	} else {
+		pnd->rx_pipe = usb_rcvbulkpipe(usbdev,
+			data_desc->endpoint[1].desc.bEndpointAddress);
+		pnd->tx_pipe = usb_sndbulkpipe(usbdev,
+			data_desc->endpoint[0].desc.bEndpointAddress);
+	}
+	pnd->active_setting = data_desc - data_intf->altsetting;
+
+	err = usb_driver_claim_interface(&usbpn_driver, data_intf, pnd);
+	if (err)
+		goto out;
+
+	/* Force inactive mode until the network device is brought UP */
+	usb_set_interface(usbdev, union_header->bSlaveInterface0,
+				!pnd->active_setting);
+	usb_set_intfdata(intf, pnd);
+
+	err = register_netdev(dev);
+	if (err) {
+		usb_driver_release_interface(&usbpn_driver, data_intf);
+		goto out;
+	}
+
+	dev_dbg(&dev->dev, "USB CDC Phonet device found\n");
+	return 0;
+
+out:
+	usb_set_intfdata(intf, NULL);
+	free_netdev(dev);
+	return err;
+}
+
+static void usbpn_disconnect(struct usb_interface *intf)
+{
+	struct usbpn_dev *pnd = usb_get_intfdata(intf);
+	struct usb_device *usb = pnd->usb;
+
+	if (pnd->disconnected)
+		return;
+
+	pnd->disconnected = 1;
+	usb_driver_release_interface(&usbpn_driver,
+			(pnd->intf == intf) ? pnd->data_intf : pnd->intf);
+	unregister_netdev(pnd->dev);
+	usb_put_dev(usb);
+}
+
+static struct usb_driver usbpn_driver = {
+	.name =		"cdc_phonet",
+	.probe =	usbpn_probe,
+	.disconnect =	usbpn_disconnect,
+	.id_table =	usbpn_ids,
+};
+
+static int __init usbpn_init(void)
+{
+	return usb_register(&usbpn_driver);
+}
+
+static void __exit usbpn_exit(void)
+{
+	usb_deregister(&usbpn_driver);
+}
+
+module_init(usbpn_init);
+module_exit(usbpn_exit);
+
+MODULE_AUTHOR("Remi Denis-Courmont");
+MODULE_DESCRIPTION("USB CDC Phonet host interface");
+MODULE_LICENSE("GPL");

+ 1 - 1
drivers/net/usb/cdc_eem.c

@@ -311,7 +311,7 @@ static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
 			 *	bmCRC = 0	: CRC = 0xDEADBEEF
 			 */
 			if (header & BIT(14))
-				crc2 = ~crc32_le(~0, skb2->data, len);
+				crc2 = ~crc32_le(~0, skb2->data, skb2->len);
 			else
 				crc2 = 0xdeadbeef;
 

+ 2 - 2
drivers/video/fbmon.c

@@ -256,8 +256,8 @@ static void fix_edid(unsigned char *edid, int fix)
 
 static int edid_checksum(unsigned char *edid)
 {
-	unsigned char i, csum = 0, all_null = 0;
-	int err = 0, fix = check_edid(edid);
+	unsigned char csum = 0, all_null = 0;
+	int i, err = 0, fix = check_edid(edid);
 
 	if (fix)
 		fix_edid(edid, fix);

+ 1 - 26
fs/Kconfig

@@ -186,32 +186,7 @@ source "fs/romfs/Kconfig"
 source "fs/sysv/Kconfig"
 source "fs/ufs/Kconfig"
 source "fs/exofs/Kconfig"
-
-config NILFS2_FS
-	tristate "NILFS2 file system support (EXPERIMENTAL)"
-	depends on BLOCK && EXPERIMENTAL
-	select CRC32
-	help
-	  NILFS2 is a log-structured file system (LFS) supporting continuous
-	  snapshotting.  In addition to versioning capability of the entire
-	  file system, users can even restore files mistakenly overwritten or
-	  destroyed just a few seconds ago.  Since this file system can keep
-	  consistency like conventional LFS, it achieves quick recovery after
-	  system crashes.
-
-	  NILFS2 creates a number of checkpoints every few seconds or per
-	  synchronous write basis (unless there is no change).  Users can
-	  select significant versions among continuously created checkpoints,
-	  and can change them into snapshots which will be preserved for long
-	  periods until they are changed back to checkpoints.  Each
-	  snapshot is mountable as a read-only file system concurrently with
-	  its writable mount, and this feature is convenient for online backup.
-
-	  Some features including atime, extended attributes, and POSIX ACLs,
-	  are not supported yet.
-
-	  To compile this file system support as a module, choose M here: the
-	  module will be called nilfs2.  If unsure, say N.
+source "fs/nilfs2/Kconfig"
 
 endif # MISC_FILESYSTEMS
 

+ 3 - 15
fs/nfs/client.c

@@ -1242,20 +1242,6 @@ error:
 	return error;
 }
 
-/*
- * Initialize a session.
- * Note: save the mount rsize and wsize for create_server negotiation.
- */
-static void nfs4_init_session(struct nfs_client *clp,
-			      unsigned int wsize, unsigned int rsize)
-{
-#if defined(CONFIG_NFS_V4_1)
-	if (nfs4_has_session(clp)) {
-		clp->cl_session->fc_attrs.max_rqst_sz = wsize;
-		clp->cl_session->fc_attrs.max_resp_sz = rsize;
-	}
-#endif /* CONFIG_NFS_V4_1 */
-}
 
 /*
  * Session has been established, and the client marked ready.
@@ -1350,7 +1336,9 @@ struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data,
 	BUG_ON(!server->nfs_client->rpc_ops);
 	BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
 
-	nfs4_init_session(server->nfs_client, server->wsize, server->rsize);
+	error = nfs4_init_session(server);
+	if (error < 0)
+		goto error;
 
 	/* Probe the root fh to retrieve its FSID */
 	error = nfs4_path_walk(server, mntfh, data->nfs_server.export_path);

+ 1 - 1
fs/nfs/dir.c

@@ -1025,12 +1025,12 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry
 				res = NULL;
 				goto out;
 			/* This turned out not to be a regular file */
-			case -EISDIR:
 			case -ENOTDIR:
 				goto no_open;
 			case -ELOOP:
 				if (!(nd->intent.open.flags & O_NOFOLLOW))
 					goto no_open;
+			/* case -EISDIR: */
 			/* case -EINVAL: */
 			default:
 				goto out;

+ 6 - 0
fs/nfs/nfs4_fs.h

@@ -220,6 +220,7 @@ extern void nfs4_destroy_session(struct nfs4_session *session);
 extern struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp);
 extern int nfs4_proc_create_session(struct nfs_client *, int reset);
 extern int nfs4_proc_destroy_session(struct nfs4_session *);
+extern int nfs4_init_session(struct nfs_server *server);
 #else /* CONFIG_NFS_v4_1 */
 static inline int nfs4_setup_sequence(struct nfs_client *clp,
 		struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
@@ -227,6 +228,11 @@ static inline int nfs4_setup_sequence(struct nfs_client *clp,
 {
 	return 0;
 }
+
+static inline int nfs4_init_session(struct nfs_server *server)
+{
+	return 0;
+}
 #endif /* CONFIG_NFS_V4_1 */
 
 extern struct nfs4_state_maintenance_ops *nfs4_state_renewal_ops[];

+ 29 - 11
fs/nfs/nfs4proc.c

@@ -2040,15 +2040,9 @@ static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
 		.rpc_argp = &args,
 		.rpc_resp = &res,
 	};
-	int status;
 
 	nfs_fattr_init(info->fattr);
-	status = nfs4_recover_expired_lease(server);
-	if (!status)
-		status = nfs4_check_client_ready(server->nfs_client);
-	if (!status)
-		status = nfs4_call_sync(server, &msg, &args, &res, 0);
-	return status;
+	return nfs4_call_sync(server, &msg, &args, &res, 0);
 }
 
 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
@@ -4099,15 +4093,23 @@ nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
 	if (request->fl_start < 0 || request->fl_end < 0)
 		return -EINVAL;
 
-	if (IS_GETLK(cmd))
-		return nfs4_proc_getlk(state, F_GETLK, request);
+	if (IS_GETLK(cmd)) {
+		if (state != NULL)
+			return nfs4_proc_getlk(state, F_GETLK, request);
+		return 0;
+	}
 
 	if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
 		return -EINVAL;
 
-	if (request->fl_type == F_UNLCK)
-		return nfs4_proc_unlck(state, cmd, request);
+	if (request->fl_type == F_UNLCK) {
+		if (state != NULL)
+			return nfs4_proc_unlck(state, cmd, request);
+		return 0;
+	}
 
+	if (state == NULL)
+		return -ENOLCK;
 	do {
 		status = nfs4_proc_setlk(state, cmd, request);
 		if ((status != -EAGAIN) || IS_SETLK(cmd))
@@ -4793,6 +4795,22 @@ int nfs4_proc_destroy_session(struct nfs4_session *session)
 	return status;
 }
 
+int nfs4_init_session(struct nfs_server *server)
+{
+	struct nfs_client *clp = server->nfs_client;
+	int ret;
+
+	if (!nfs4_has_session(clp))
+		return 0;
+
+	clp->cl_session->fc_attrs.max_rqst_sz = server->wsize;
+	clp->cl_session->fc_attrs.max_resp_sz = server->rsize;
+	ret = nfs4_recover_expired_lease(server);
+	if (!ret)
+		ret = nfs4_check_client_ready(clp);
+	return ret;
+}
+
 /*
  * Renew the cl_session lease.
  */

+ 1 - 1
fs/nfs/nfs4state.c

@@ -553,6 +553,7 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
 	INIT_LIST_HEAD(&lsp->ls_sequence.list);
 	lsp->ls_seqid.sequence = &lsp->ls_sequence;
 	atomic_set(&lsp->ls_count, 1);
+	lsp->ls_state = state;
 	lsp->ls_owner = fl_owner;
 	spin_lock(&clp->cl_lock);
 	nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64);
@@ -587,7 +588,6 @@ static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_
 		if (lsp != NULL)
 			break;
 		if (new != NULL) {
-			new->ls_state = state;
 			list_add(&new->ls_locks, &state->lock_states);
 			set_bit(LK_STATE_IN_USE, &state->flags);
 			lsp = new;

+ 25 - 0
fs/nilfs2/Kconfig

@@ -0,0 +1,25 @@
+config NILFS2_FS
+	tristate "NILFS2 file system support (EXPERIMENTAL)"
+	depends on BLOCK && EXPERIMENTAL
+	select CRC32
+	help
+	  NILFS2 is a log-structured file system (LFS) supporting continuous
+	  snapshotting.  In addition to versioning capability of the entire
+	  file system, users can even restore files mistakenly overwritten or
+	  destroyed just a few seconds ago.  Since this file system can keep
+	  consistency like conventional LFS, it achieves quick recovery after
+	  system crashes.
+
+	  NILFS2 creates a number of checkpoints every few seconds or per
+	  synchronous write basis (unless there is no change).  Users can
+	  select significant versions among continuously created checkpoints,
+	  and can change them into snapshots which will be preserved for long
+	  periods until they are changed back to checkpoints.  Each
+	  snapshot is mountable as a read-only file system concurrently with
+	  its writable mount, and this feature is convenient for online backup.
+
+	  Some features including atime, extended attributes, and POSIX ACLs,
+	  are not supported yet.
+
+	  To compile this file system support as a module, choose M here: the
+	  module will be called nilfs2.  If unsure, say N.

+ 2 - 2
fs/pipe.c

@@ -68,8 +68,8 @@ void pipe_double_lock(struct pipe_inode_info *pipe1,
 		pipe_lock_nested(pipe1, I_MUTEX_PARENT);
 		pipe_lock_nested(pipe2, I_MUTEX_CHILD);
 	} else {
-		pipe_lock_nested(pipe2, I_MUTEX_CHILD);
-		pipe_lock_nested(pipe1, I_MUTEX_PARENT);
+		pipe_lock_nested(pipe2, I_MUTEX_PARENT);
+		pipe_lock_nested(pipe1, I_MUTEX_CHILD);
 	}
 }
 

+ 28 - 0
include/linux/interrupt.h

@@ -14,6 +14,7 @@
 #include <linux/irqflags.h>
 #include <linux/smp.h>
 #include <linux/percpu.h>
+#include <linux/hrtimer.h>
 
 #include <asm/atomic.h>
 #include <asm/ptrace.h>
@@ -64,11 +65,13 @@
  * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
  * IRQTF_DIED      - handler thread died
  * IRQTF_WARNED    - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
+ * IRQTF_AFFINITY  - irq thread is requested to adjust affinity
  */
 enum {
 	IRQTF_RUNTHREAD,
 	IRQTF_DIED,
 	IRQTF_WARNED,
+	IRQTF_AFFINITY,
 };
 
 typedef irqreturn_t (*irq_handler_t)(int, void *);
@@ -517,6 +520,31 @@ extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
 extern void tasklet_init(struct tasklet_struct *t,
 			 void (*func)(unsigned long), unsigned long data);
 
+struct tasklet_hrtimer {
+	struct hrtimer		timer;
+	struct tasklet_struct	tasklet;
+	enum hrtimer_restart	(*function)(struct hrtimer *);
+};
+
+extern void
+tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
+		     enum hrtimer_restart (*function)(struct hrtimer *),
+		     clockid_t which_clock, enum hrtimer_mode mode);
+
+static inline
+int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
+			  const enum hrtimer_mode mode)
+{
+	return hrtimer_start(&ttimer->timer, time, mode);
+}
+
+static inline
+void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
+{
+	hrtimer_cancel(&ttimer->timer);
+	tasklet_kill(&ttimer->tasklet);
+}
+
 /*
  * Autoprobing for irqs:
  *

+ 4 - 11
include/linux/perf_counter.h

@@ -120,8 +120,9 @@ enum perf_counter_sample_format {
 	PERF_SAMPLE_ID				= 1U << 6,
 	PERF_SAMPLE_CPU				= 1U << 7,
 	PERF_SAMPLE_PERIOD			= 1U << 8,
+	PERF_SAMPLE_STREAM_ID			= 1U << 9,
 
-	PERF_SAMPLE_MAX = 1U << 9,		/* non-ABI */
+	PERF_SAMPLE_MAX = 1U << 10,		/* non-ABI */
 };
 
 /*
@@ -312,16 +313,7 @@ enum perf_event_type {
 	 *	struct perf_event_header	header;
 	 *	u64				time;
 	 *	u64				id;
-	 *	u64				sample_period;
-	 * };
-	 */
-	PERF_EVENT_PERIOD		= 4,
-
-	/*
-	 * struct {
-	 *	struct perf_event_header	header;
-	 *	u64				time;
-	 *	u64				id;
+	 *	u64				stream_id;
 	 * };
 	 */
 	PERF_EVENT_THROTTLE		= 5,
@@ -356,6 +348,7 @@ enum perf_event_type {
 	 *	{ u64			time;     } && PERF_SAMPLE_TIME
 	 *	{ u64			addr;     } && PERF_SAMPLE_ADDR
 	 *	{ u64			id;	  } && PERF_SAMPLE_ID
+	 *	{ u64			stream_id;} && PERF_SAMPLE_STREAM_ID
 	 *	{ u32			cpu, res; } && PERF_SAMPLE_CPU
 	 * 	{ u64			period;   } && PERF_SAMPLE_PERIOD
 	 *

+ 2 - 1
include/linux/sched.h

@@ -209,7 +209,7 @@ extern unsigned long long time_sync_thresh;
 			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
 #define task_contributes_to_load(task)	\
 				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
-				 (task->flags & PF_FROZEN) == 0)
+				 (task->flags & PF_FREEZING) == 0)
 
 #define __set_task_state(tsk, state_value)		\
 	do { (tsk)->state = (state_value); } while (0)
@@ -1680,6 +1680,7 @@ extern cputime_t task_gtime(struct task_struct *p);
 #define PF_MEMALLOC	0x00000800	/* Allocating memory */
 #define PF_FLUSHER	0x00001000	/* responsible for disk writeback */
 #define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
+#define PF_FREEZING	0x00004000	/* freeze in progress. do not account to load */
 #define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
 #define PF_FROZEN	0x00010000	/* frozen for system suspend */
 #define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */

+ 19 - 13
include/net/sock.h

@@ -104,15 +104,15 @@ struct net;
 
 /**
  *	struct sock_common - minimal network layer representation of sockets
+ *	@skc_node: main hash linkage for various protocol lookup tables
+ *	@skc_nulls_node: main hash linkage for UDP/UDP-Lite protocol
+ *	@skc_refcnt: reference count
+ *	@skc_hash: hash value used with various protocol lookup tables
  *	@skc_family: network address family
  *	@skc_state: Connection state
  *	@skc_reuse: %SO_REUSEADDR setting
  *	@skc_bound_dev_if: bound device index if != 0
- *	@skc_node: main hash linkage for various protocol lookup tables
- *	@skc_nulls_node: main hash linkage for UDP/UDP-Lite protocol
  *	@skc_bind_node: bind hash linkage for various protocol lookup tables
- *	@skc_refcnt: reference count
- *	@skc_hash: hash value used with various protocol lookup tables
  *	@skc_prot: protocol handlers inside a network family
  *	@skc_net: reference to the network namespace of this socket
  *
@@ -120,17 +120,21 @@ struct net;
  *	for struct sock and struct inet_timewait_sock.
  */
 struct sock_common {
-	unsigned short		skc_family;
-	volatile unsigned char	skc_state;
-	unsigned char		skc_reuse;
-	int			skc_bound_dev_if;
+	/*
+	 * first fields are not copied in sock_copy()
+	 */
 	union {
 		struct hlist_node	skc_node;
 		struct hlist_nulls_node skc_nulls_node;
 	};
-	struct hlist_node	skc_bind_node;
 	atomic_t		skc_refcnt;
+
 	unsigned int		skc_hash;
+	unsigned short		skc_family;
+	volatile unsigned char	skc_state;
+	unsigned char		skc_reuse;
+	int			skc_bound_dev_if;
+	struct hlist_node	skc_bind_node;
 	struct proto		*skc_prot;
 #ifdef CONFIG_NET_NS
 	struct net	 	*skc_net;
@@ -208,15 +212,17 @@ struct sock {
 	 * don't add nothing before this first member (__sk_common) --acme
 	 */
 	struct sock_common	__sk_common;
+#define sk_node			__sk_common.skc_node
+#define sk_nulls_node		__sk_common.skc_nulls_node
+#define sk_refcnt		__sk_common.skc_refcnt
+
+#define sk_copy_start		__sk_common.skc_hash
+#define sk_hash			__sk_common.skc_hash
 #define sk_family		__sk_common.skc_family
 #define sk_state		__sk_common.skc_state
 #define sk_reuse		__sk_common.skc_reuse
 #define sk_bound_dev_if		__sk_common.skc_bound_dev_if
-#define sk_node			__sk_common.skc_node
-#define sk_nulls_node		__sk_common.skc_nulls_node
 #define sk_bind_node		__sk_common.skc_bind_node
-#define sk_refcnt		__sk_common.skc_refcnt
-#define sk_hash			__sk_common.skc_hash
 #define sk_prot			__sk_common.skc_prot
 #define sk_net			__sk_common.skc_net
 	kmemcheck_bitfield_begin(flags);

+ 5 - 0
include/net/tcp.h

@@ -1425,6 +1425,11 @@ struct tcp_request_sock_ops {
 #ifdef CONFIG_TCP_MD5SIG
 	struct tcp_md5sig_key	*(*md5_lookup) (struct sock *sk,
 						struct request_sock *req);
+	int			(*calc_md5_hash) (char *location,
+						  struct tcp_md5sig_key *md5,
+						  struct sock *sk,
+						  struct request_sock *req,
+						  struct sk_buff *skb);
 #endif
 };
 

+ 1 - 1
init/Kconfig

@@ -962,7 +962,7 @@ config PERF_COUNTERS
 
 config EVENT_PROFILE
 	bool "Tracepoint profile sources"
-	depends on PERF_COUNTERS && EVENT_TRACER
+	depends on PERF_COUNTERS && EVENT_TRACING
 	default y
 
 endmenu

+ 3 - 6
kernel/fork.c

@@ -1407,14 +1407,11 @@ long do_fork(unsigned long clone_flags,
 		if (clone_flags & CLONE_VFORK) {
 			p->vfork_done = &vfork;
 			init_completion(&vfork);
-		} else if (!(clone_flags & CLONE_VM)) {
-			/*
-			 * vfork will do an exec which will call
-			 * set_task_comm()
-			 */
-			perf_counter_fork(p);
 		}
 
+		if (!(clone_flags & CLONE_THREAD))
+			perf_counter_fork(p);
+
 		audit_finish_fork(p);
 		tracehook_report_clone(regs, clone_flags, nr, p);
 

+ 7 - 0
kernel/freezer.c

@@ -44,12 +44,19 @@ void refrigerator(void)
 	recalc_sigpending(); /* We sent fake signal, clean it up */
 	spin_unlock_irq(&current->sighand->siglock);
 
+	/* prevent accounting of that task to load */
+	current->flags |= PF_FREEZING;
+
 	for (;;) {
 		set_current_state(TASK_UNINTERRUPTIBLE);
 		if (!frozen(current))
 			break;
 		schedule();
 	}
+
+	/* Remove the accounting blocker */
+	current->flags &= ~PF_FREEZING;
+
 	pr_debug("%s left refrigerator\n", current->comm);
 	__set_current_state(save);
 }

+ 1 - 2
kernel/irq/internals.h

@@ -42,8 +42,7 @@ static inline void unregister_handler_proc(unsigned int irq,
 
 extern int irq_select_affinity_usr(unsigned int irq);
 
-extern void
-irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask);
+extern void irq_set_thread_affinity(struct irq_desc *desc);
 
 /*
  * Debugging printout:

+ 49 - 6
kernel/irq/manage.c

@@ -80,14 +80,22 @@ int irq_can_set_affinity(unsigned int irq)
 	return 1;
 }
 
-void
-irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask)
+/**
+ *	irq_set_thread_affinity - Notify irq threads to adjust affinity
+ *	@desc:		irq descriptor which has affitnity changed
+ *
+ *	We just set IRQTF_AFFINITY and delegate the affinity setting
+ *	to the interrupt thread itself. We can not call
+ *	set_cpus_allowed_ptr() here as we hold desc->lock and this
+ *	code can be called from hard interrupt context.
+ */
+void irq_set_thread_affinity(struct irq_desc *desc)
 {
 	struct irqaction *action = desc->action;
 
 	while (action) {
 		if (action->thread)
-			set_cpus_allowed_ptr(action->thread, cpumask);
+			set_bit(IRQTF_AFFINITY, &action->thread_flags);
 		action = action->next;
 	}
 }
@@ -112,7 +120,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
 	if (desc->status & IRQ_MOVE_PCNTXT) {
 		if (!desc->chip->set_affinity(irq, cpumask)) {
 			cpumask_copy(desc->affinity, cpumask);
-			irq_set_thread_affinity(desc, cpumask);
+			irq_set_thread_affinity(desc);
 		}
 	}
 	else {
@@ -122,7 +130,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
 #else
 	if (!desc->chip->set_affinity(irq, cpumask)) {
 		cpumask_copy(desc->affinity, cpumask);
-		irq_set_thread_affinity(desc, cpumask);
+		irq_set_thread_affinity(desc);
 	}
 #endif
 	desc->status |= IRQ_AFFINITY_SET;
@@ -176,7 +184,7 @@ int irq_select_affinity_usr(unsigned int irq)
 	spin_lock_irqsave(&desc->lock, flags);
 	ret = setup_affinity(irq, desc);
 	if (!ret)
-		irq_set_thread_affinity(desc, desc->affinity);
+		irq_set_thread_affinity(desc);
 	spin_unlock_irqrestore(&desc->lock, flags);
 
 	return ret;
@@ -443,6 +451,39 @@ static int irq_wait_for_interrupt(struct irqaction *action)
 	return -1;
 }
 
+#ifdef CONFIG_SMP
+/*
+ * Check whether we need to change the affinity of the interrupt thread.
+ */
+static void
+irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
+{
+	cpumask_var_t mask;
+
+	if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
+		return;
+
+	/*
+	 * In case we are out of memory we set IRQTF_AFFINITY again and
+	 * try again next time
+	 */
+	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
+		set_bit(IRQTF_AFFINITY, &action->thread_flags);
+		return;
+	}
+
+	spin_lock_irq(&desc->lock);
+	cpumask_copy(mask, desc->affinity);
+	spin_unlock_irq(&desc->lock);
+
+	set_cpus_allowed_ptr(current, mask);
+	free_cpumask_var(mask);
+}
+#else
+static inline void
+irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
+#endif
+
 /*
  * Interrupt handler thread
  */
@@ -458,6 +499,8 @@ static int irq_thread(void *data)
 
 	while (!irq_wait_for_interrupt(action)) {
 
+		irq_thread_check_affinity(desc, action);
+
 		atomic_inc(&desc->threads_active);
 
 		spin_lock_irq(&desc->lock);

+ 1 - 1
kernel/irq/migration.c

@@ -45,7 +45,7 @@ void move_masked_irq(int irq)
 		   < nr_cpu_ids))
 		if (!desc->chip->set_affinity(irq, desc->pending_mask)) {
 			cpumask_copy(desc->affinity, desc->pending_mask);
-			irq_set_thread_affinity(desc, desc->pending_mask);
+			irq_set_thread_affinity(desc);
 		}
 
 	cpumask_clear(desc->pending_mask);

+ 92 - 94
kernel/perf_counter.c

@@ -146,6 +146,28 @@ static void put_ctx(struct perf_counter_context *ctx)
 	}
 }
 
+static void unclone_ctx(struct perf_counter_context *ctx)
+{
+	if (ctx->parent_ctx) {
+		put_ctx(ctx->parent_ctx);
+		ctx->parent_ctx = NULL;
+	}
+}
+
+/*
+ * If we inherit counters we want to return the parent counter id
+ * to userspace.
+ */
+static u64 primary_counter_id(struct perf_counter *counter)
+{
+	u64 id = counter->id;
+
+	if (counter->parent)
+		id = counter->parent->id;
+
+	return id;
+}
+
 /*
  * Get the perf_counter_context for a task and lock it.
  * This has to cope with with the fact that until it is locked,
@@ -1288,7 +1310,6 @@ static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
 #define MAX_INTERRUPTS (~0ULL)
 
 static void perf_log_throttle(struct perf_counter *counter, int enable);
-static void perf_log_period(struct perf_counter *counter, u64 period);
 
 static void perf_adjust_period(struct perf_counter *counter, u64 events)
 {
@@ -1307,8 +1328,6 @@ static void perf_adjust_period(struct perf_counter *counter, u64 events)
 	if (!sample_period)
 		sample_period = 1;
 
-	perf_log_period(counter, sample_period);
-
 	hwc->sample_period = sample_period;
 }
 
@@ -1463,10 +1482,8 @@ static void perf_counter_enable_on_exec(struct task_struct *task)
 	/*
 	 * Unclone this context if we enabled any counter.
 	 */
-	if (enabled && ctx->parent_ctx) {
-		put_ctx(ctx->parent_ctx);
-		ctx->parent_ctx = NULL;
-	}
+	if (enabled)
+		unclone_ctx(ctx);
 
 	spin_unlock(&ctx->lock);
 
@@ -1526,7 +1543,6 @@ __perf_counter_init_context(struct perf_counter_context *ctx,
 
 static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
 {
-	struct perf_counter_context *parent_ctx;
 	struct perf_counter_context *ctx;
 	struct perf_cpu_context *cpuctx;
 	struct task_struct *task;
@@ -1586,11 +1602,7 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
  retry:
 	ctx = perf_lock_task_context(task, &flags);
 	if (ctx) {
-		parent_ctx = ctx->parent_ctx;
-		if (parent_ctx) {
-			put_ctx(parent_ctx);
-			ctx->parent_ctx = NULL;		/* no longer a clone */
-		}
+		unclone_ctx(ctx);
 		spin_unlock_irqrestore(&ctx->lock, flags);
 	}
 
@@ -1704,7 +1716,7 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
 		values[n++] = counter->total_time_running +
 			atomic64_read(&counter->child_total_time_running);
 	if (counter->attr.read_format & PERF_FORMAT_ID)
-		values[n++] = counter->id;
+		values[n++] = primary_counter_id(counter);
 	mutex_unlock(&counter->child_mutex);
 
 	if (count < n * sizeof(u64))
@@ -1811,8 +1823,6 @@ static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
 
 		counter->attr.sample_freq = value;
 	} else {
-		perf_log_period(counter, value);
-
 		counter->attr.sample_period = value;
 		counter->hw.sample_period = value;
 	}
@@ -2661,10 +2671,14 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
 	if (sample_type & PERF_SAMPLE_ID)
 		header.size += sizeof(u64);
 
+	if (sample_type & PERF_SAMPLE_STREAM_ID)
+		header.size += sizeof(u64);
+
 	if (sample_type & PERF_SAMPLE_CPU) {
 		header.size += sizeof(cpu_entry);
 
 		cpu_entry.cpu = raw_smp_processor_id();
+		cpu_entry.reserved = 0;
 	}
 
 	if (sample_type & PERF_SAMPLE_PERIOD)
@@ -2703,7 +2717,13 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
 	if (sample_type & PERF_SAMPLE_ADDR)
 		perf_output_put(&handle, data->addr);
 
-	if (sample_type & PERF_SAMPLE_ID)
+	if (sample_type & PERF_SAMPLE_ID) {
+		u64 id = primary_counter_id(counter);
+
+		perf_output_put(&handle, id);
+	}
+
+	if (sample_type & PERF_SAMPLE_STREAM_ID)
 		perf_output_put(&handle, counter->id);
 
 	if (sample_type & PERF_SAMPLE_CPU)
@@ -2726,7 +2746,7 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
 			if (sub != counter)
 				sub->pmu->read(sub);
 
-			group_entry.id = sub->id;
+			group_entry.id = primary_counter_id(sub);
 			group_entry.counter = atomic64_read(&sub->count);
 
 			perf_output_put(&handle, group_entry);
@@ -2786,15 +2806,8 @@ perf_counter_read_event(struct perf_counter *counter,
 	}
 
 	if (counter->attr.read_format & PERF_FORMAT_ID) {
-		u64 id;
-
 		event.header.size += sizeof(u64);
-		if (counter->parent)
-			id = counter->parent->id;
-		else
-			id = counter->id;
-
-		event.format[i++] = id;
+		event.format[i++] = primary_counter_id(counter);
 	}
 
 	ret = perf_output_begin(&handle, counter, event.header.size, 0, 0);
@@ -2895,8 +2908,11 @@ void perf_counter_fork(struct task_struct *task)
 		.event  = {
 			.header = {
 				.type = PERF_EVENT_FORK,
+				.misc = 0,
 				.size = sizeof(fork_event.event),
 			},
+			/* .pid  */
+			/* .ppid */
 		},
 	};
 
@@ -2968,8 +2984,10 @@ static void perf_counter_comm_event(struct perf_comm_event *comm_event)
 	struct perf_cpu_context *cpuctx;
 	struct perf_counter_context *ctx;
 	unsigned int size;
-	char *comm = comm_event->task->comm;
+	char comm[TASK_COMM_LEN];
 
+	memset(comm, 0, sizeof(comm));
+	strncpy(comm, comm_event->task->comm, sizeof(comm));
 	size = ALIGN(strlen(comm)+1, sizeof(u64));
 
 	comm_event->comm = comm;
@@ -3004,8 +3022,16 @@ void perf_counter_comm(struct task_struct *task)
 
 	comm_event = (struct perf_comm_event){
 		.task	= task,
+		/* .comm      */
+		/* .comm_size */
 		.event  = {
-			.header = { .type = PERF_EVENT_COMM, },
+			.header = {
+				.type = PERF_EVENT_COMM,
+				.misc = 0,
+				/* .size */
+			},
+			/* .pid */
+			/* .tid */
 		},
 	};
 
@@ -3088,8 +3114,15 @@ static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
 	char *buf = NULL;
 	const char *name;
 
+	memset(tmp, 0, sizeof(tmp));
+
 	if (file) {
-		buf = kzalloc(PATH_MAX, GFP_KERNEL);
+		/*
+		 * d_path works from the end of the buffer backwards, so we
+		 * need to add enough zero bytes after the string to handle
+		 * the 64bit alignment we do later.
+		 */
+		buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
 		if (!buf) {
 			name = strncpy(tmp, "//enomem", sizeof(tmp));
 			goto got_name;
@@ -3100,9 +3133,11 @@ static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
 			goto got_name;
 		}
 	} else {
-		name = arch_vma_name(mmap_event->vma);
-		if (name)
+		if (arch_vma_name(mmap_event->vma)) {
+			name = strncpy(tmp, arch_vma_name(mmap_event->vma),
+				       sizeof(tmp));
 			goto got_name;
+		}
 
 		if (!vma->vm_mm) {
 			name = strncpy(tmp, "[vdso]", sizeof(tmp));
@@ -3147,8 +3182,16 @@ void __perf_counter_mmap(struct vm_area_struct *vma)
 
 	mmap_event = (struct perf_mmap_event){
 		.vma	= vma,
+		/* .file_name */
+		/* .file_size */
 		.event  = {
-			.header = { .type = PERF_EVENT_MMAP, },
+			.header = {
+				.type = PERF_EVENT_MMAP,
+				.misc = 0,
+				/* .size */
+			},
+			/* .pid */
+			/* .tid */
 			.start  = vma->vm_start,
 			.len    = vma->vm_end - vma->vm_start,
 			.pgoff  = vma->vm_pgoff,
@@ -3158,49 +3201,6 @@ void __perf_counter_mmap(struct vm_area_struct *vma)
 	perf_counter_mmap_event(&mmap_event);
 }
 
-/*
- * Log sample_period changes so that analyzing tools can re-normalize the
- * event flow.
- */
-
-struct freq_event {
-	struct perf_event_header	header;
-	u64				time;
-	u64				id;
-	u64				period;
-};
-
-static void perf_log_period(struct perf_counter *counter, u64 period)
-{
-	struct perf_output_handle handle;
-	struct freq_event event;
-	int ret;
-
-	if (counter->hw.sample_period == period)
-		return;
-
-	if (counter->attr.sample_type & PERF_SAMPLE_PERIOD)
-		return;
-
-	event = (struct freq_event) {
-		.header = {
-			.type = PERF_EVENT_PERIOD,
-			.misc = 0,
-			.size = sizeof(event),
-		},
-		.time = sched_clock(),
-		.id = counter->id,
-		.period = period,
-	};
-
-	ret = perf_output_begin(&handle, counter, sizeof(event), 1, 0);
-	if (ret)
-		return;
-
-	perf_output_put(&handle, event);
-	perf_output_end(&handle);
-}
-
 /*
  * IRQ throttle logging
  */
@@ -3214,16 +3214,21 @@ static void perf_log_throttle(struct perf_counter *counter, int enable)
 		struct perf_event_header	header;
 		u64				time;
 		u64				id;
+		u64				stream_id;
 	} throttle_event = {
 		.header = {
-			.type = PERF_EVENT_THROTTLE + 1,
+			.type = PERF_EVENT_THROTTLE,
 			.misc = 0,
 			.size = sizeof(throttle_event),
 		},
-		.time	= sched_clock(),
-		.id	= counter->id,
+		.time		= sched_clock(),
+		.id		= primary_counter_id(counter),
+		.stream_id	= counter->id,
 	};
 
+	if (enable)
+		throttle_event.header.type = PERF_EVENT_UNTHROTTLE;
+
 	ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0);
 	if (ret)
 		return;
@@ -3671,7 +3676,7 @@ static const struct pmu perf_ops_task_clock = {
 void perf_tpcounter_event(int event_id)
 {
 	struct perf_sample_data data = {
-		.regs = get_irq_regs();
+		.regs = get_irq_regs(),
 		.addr = 0,
 	};
 
@@ -3687,16 +3692,12 @@ extern void ftrace_profile_disable(int);
 
 static void tp_perf_counter_destroy(struct perf_counter *counter)
 {
-	ftrace_profile_disable(perf_event_id(&counter->attr));
+	ftrace_profile_disable(counter->attr.config);
 }
 
 static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
 {
-	int event_id = perf_event_id(&counter->attr);
-	int ret;
-
-	ret = ftrace_profile_enable(event_id);
-	if (ret)
+	if (ftrace_profile_enable(counter->attr.config))
 		return NULL;
 
 	counter->destroy = tp_perf_counter_destroy;
@@ -4255,15 +4256,12 @@ void perf_counter_exit_task(struct task_struct *child)
 	 */
 	spin_lock(&child_ctx->lock);
 	child->perf_counter_ctxp = NULL;
-	if (child_ctx->parent_ctx) {
-		/*
-		 * This context is a clone; unclone it so it can't get
-		 * swapped to another process while we're removing all
-		 * the counters from it.
-		 */
-		put_ctx(child_ctx->parent_ctx);
-		child_ctx->parent_ctx = NULL;
-	}
+	/*
+	 * If this context is a clone; unclone it so it can't get
+	 * swapped to another process while we're removing all
+	 * the counters from it.
+	 */
+	unclone_ctx(child_ctx);
 	spin_unlock(&child_ctx->lock);
 	local_irq_restore(flags);
 

+ 2 - 2
kernel/sched.c

@@ -7289,6 +7289,7 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
 static void calc_global_load_remove(struct rq *rq)
 {
 	atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
+	rq->calc_load_active = 0;
 }
 #endif /* CONFIG_HOTPLUG_CPU */
 
@@ -7515,6 +7516,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
 		task_rq_unlock(rq, &flags);
 		get_task_struct(p);
 		cpu_rq(cpu)->migration_thread = p;
+		rq->calc_load_update = calc_load_update;
 		break;
 
 	case CPU_ONLINE:
@@ -7525,8 +7527,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
 		/* Update our root-domain */
 		rq = cpu_rq(cpu);
 		spin_lock_irqsave(&rq->lock, flags);
-		rq->calc_load_update = calc_load_update;
-		rq->calc_load_active = 0;
 		if (rq->rd) {
 			BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
 

+ 8 - 2
kernel/sched_fair.c

@@ -266,6 +266,12 @@ static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
 	return min_vruntime;
 }
 
+static inline int entity_before(struct sched_entity *a,
+				struct sched_entity *b)
+{
+	return (s64)(a->vruntime - b->vruntime) < 0;
+}
+
 static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
 	return se->vruntime - cfs_rq->min_vruntime;
@@ -1017,7 +1023,7 @@ static void yield_task_fair(struct rq *rq)
 	/*
 	 * Already in the rightmost position?
 	 */
-	if (unlikely(!rightmost || rightmost->vruntime < se->vruntime))
+	if (unlikely(!rightmost || entity_before(rightmost, se)))
 		return;
 
 	/*
@@ -1713,7 +1719,7 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
 
 	/* 'curr' will be NULL if the child belongs to a different group */
 	if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
-			curr && curr->vruntime < se->vruntime) {
+			curr && entity_before(curr, se)) {
 		/*
 		 * Upon rescheduling, sched_class::put_prev_task() will place
 		 * 'current' within the tree based on its new key value.

+ 63 - 1
kernel/softirq.c

@@ -345,7 +345,9 @@ void open_softirq(int nr, void (*action)(struct softirq_action *))
 	softirq_vec[nr].action = action;
 }
 
-/* Tasklets */
+/*
+ * Tasklets
+ */
 struct tasklet_head
 {
 	struct tasklet_struct *head;
@@ -493,6 +495,66 @@ void tasklet_kill(struct tasklet_struct *t)
 
 EXPORT_SYMBOL(tasklet_kill);
 
+/*
+ * tasklet_hrtimer
+ */
+
+/*
+ * The trampoline is called when the hrtimer expires. If this is
+ * called from the hrtimer interrupt then we schedule the tasklet as
+ * the timer callback function expects to run in softirq context. If
+ * it's called in softirq context anyway (i.e. high resolution timers
+ * disabled) then the hrtimer callback is called right away.
+ */
+static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
+{
+	struct tasklet_hrtimer *ttimer =
+		container_of(timer, struct tasklet_hrtimer, timer);
+
+	if (hrtimer_is_hres_active(timer)) {
+		tasklet_hi_schedule(&ttimer->tasklet);
+		return HRTIMER_NORESTART;
+	}
+	return ttimer->function(timer);
+}
+
+/*
+ * Helper function which calls the hrtimer callback from
+ * tasklet/softirq context
+ */
+static void __tasklet_hrtimer_trampoline(unsigned long data)
+{
+	struct tasklet_hrtimer *ttimer = (void *)data;
+	enum hrtimer_restart restart;
+
+	restart = ttimer->function(&ttimer->timer);
+	if (restart != HRTIMER_NORESTART)
+		hrtimer_restart(&ttimer->timer);
+}
+
+/**
+ * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
+ * @ttimer:	 tasklet_hrtimer which is initialized
+ * @function:	 hrtimer callback funtion which gets called from softirq context
+ * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
+ * @mode:	 hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
+ */
+void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
+			  enum hrtimer_restart (*function)(struct hrtimer *),
+			  clockid_t which_clock, enum hrtimer_mode mode)
+{
+	hrtimer_init(&ttimer->timer, which_clock, mode);
+	ttimer->timer.function = __hrtimer_tasklet_trampoline;
+	tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
+		     (unsigned long)ttimer);
+	ttimer->function = function;
+}
+EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
+
+/*
+ * Remote softirq bits
+ */
+
 DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
 EXPORT_PER_CPU_SYMBOL(softirq_work_list);
 

+ 1 - 1
kernel/time/clocksource.c

@@ -513,7 +513,7 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev,
 	 * Check to make sure we don't switch to a non-highres capable
 	 * clocksource if the tick code is in oneshot mode (highres or nohz)
 	 */
-	if (tick_oneshot_mode_active() &&
+	if (tick_oneshot_mode_active() && ovr &&
 	    !(ovr->flags & CLOCK_SOURCE_VALID_FOR_HRES)) {
 		printk(KERN_WARNING "%s clocksource is not HRT compatible. "
 			"Cannot switch while in HRT/NOHZ mode\n", ovr->name);

+ 1 - 1
kernel/timer.c

@@ -714,7 +714,7 @@ int mod_timer(struct timer_list *timer, unsigned long expires)
 	 * networking code - if the timer is re-modified
 	 * to be the same thing then just return:
 	 */
-	if (timer->expires == expires && timer_pending(timer))
+	if (timer_pending(timer) && timer->expires == expires)
 		return 1;
 
 	return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);

+ 4 - 0
net/can/bcm.c

@@ -75,6 +75,7 @@ static __initdata const char banner[] = KERN_INFO
 MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
+MODULE_ALIAS("can-proto-2");
 
 /* easy access to can_frame payload */
 static inline u64 GET_U64(const struct can_frame *cp)
@@ -1469,6 +1470,9 @@ static int bcm_release(struct socket *sock)
 		bo->ifindex = 0;
 	}
 
+	sock_orphan(sk);
+	sock->sk = NULL;
+
 	release_sock(sk);
 	sock_put(sk);
 

+ 4 - 0
net/can/raw.c

@@ -62,6 +62,7 @@ static __initdata const char banner[] =
 MODULE_DESCRIPTION("PF_CAN raw protocol");
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
+MODULE_ALIAS("can-proto-1");
 
 #define MASK_ALL 0
 
@@ -306,6 +307,9 @@ static int raw_release(struct socket *sock)
 	ro->bound   = 0;
 	ro->count   = 0;
 
+	sock_orphan(sk);
+	sock->sk = NULL;
+
 	release_sock(sk);
 	sock_put(sk);
 

+ 19 - 3
net/core/sock.c

@@ -631,7 +631,7 @@ set_rcvbuf:
 
 	case SO_TIMESTAMPING:
 		if (val & ~SOF_TIMESTAMPING_MASK) {
-			ret = EINVAL;
+			ret = -EINVAL;
 			break;
 		}
 		sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
@@ -919,13 +919,19 @@ static inline void sock_lock_init(struct sock *sk)
 			af_family_keys + sk->sk_family);
 }
 
+/*
+ * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
+ * even temporarly, because of RCU lookups. sk_node should also be left as is.
+ */
 static void sock_copy(struct sock *nsk, const struct sock *osk)
 {
 #ifdef CONFIG_SECURITY_NETWORK
 	void *sptr = nsk->sk_security;
 #endif
-
-	memcpy(nsk, osk, osk->sk_prot->obj_size);
+	BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) !=
+		     sizeof(osk->sk_node) + sizeof(osk->sk_refcnt));
+	memcpy(&nsk->sk_copy_start, &osk->sk_copy_start,
+	       osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start));
 #ifdef CONFIG_SECURITY_NETWORK
 	nsk->sk_security = sptr;
 	security_sk_clone(osk, nsk);
@@ -1140,6 +1146,11 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
 
 		newsk->sk_err	   = 0;
 		newsk->sk_priority = 0;
+		/*
+		 * Before updating sk_refcnt, we must commit prior changes to memory
+		 * (Documentation/RCU/rculist_nulls.txt for details)
+		 */
+		smp_wmb();
 		atomic_set(&newsk->sk_refcnt, 2);
 
 		/*
@@ -1855,6 +1866,11 @@ void sock_init_data(struct socket *sock, struct sock *sk)
 
 	sk->sk_stamp = ktime_set(-1L, 0);
 
+	/*
+	 * Before updating sk_refcnt, we must commit prior changes to memory
+	 * (Documentation/RCU/rculist_nulls.txt for details)
+	 */
+	smp_wmb();
 	atomic_set(&sk->sk_refcnt, 1);
 	atomic_set(&sk->sk_wmem_alloc, 1);
 	atomic_set(&sk->sk_drops, 0);

+ 2 - 1
net/ipv4/tcp_ipv4.c

@@ -1160,6 +1160,7 @@ struct request_sock_ops tcp_request_sock_ops __read_mostly = {
 #ifdef CONFIG_TCP_MD5SIG
 static struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
 	.md5_lookup	=	tcp_v4_reqsk_md5_lookup,
+	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
 };
 #endif
 
@@ -1373,7 +1374,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
 		 */
 		char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
 		if (newkey != NULL)
-			tcp_v4_md5_do_add(newsk, inet_sk(sk)->daddr,
+			tcp_v4_md5_do_add(newsk, newinet->daddr,
 					  newkey, key->keylen);
 		newsk->sk_route_caps &= ~NETIF_F_GSO_MASK;
 	}

+ 1 - 1
net/ipv4/tcp_output.c

@@ -2261,7 +2261,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
 #ifdef CONFIG_TCP_MD5SIG
 	/* Okay, we have all we need - do the md5 hash if needed */
 	if (md5) {
-		tp->af_specific->calc_md5_hash(md5_hash_location,
+		tcp_rsk(req)->af_specific->calc_md5_hash(md5_hash_location,
 					       md5, NULL, req, skb);
 	}
 #endif

+ 2 - 1
net/ipv6/tcp_ipv6.c

@@ -896,6 +896,7 @@ struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
 #ifdef CONFIG_TCP_MD5SIG
 static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
 	.md5_lookup	=	tcp_v6_reqsk_md5_lookup,
+	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
 };
 #endif
 
@@ -1441,7 +1442,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
 		 */
 		char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
 		if (newkey != NULL)
-			tcp_v6_md5_do_add(newsk, &inet6_sk(sk)->daddr,
+			tcp_v6_md5_do_add(newsk, &newnp->daddr,
 					  newkey, key->keylen);
 	}
 #endif

+ 18 - 3
net/netfilter/nf_conntrack_core.c

@@ -561,23 +561,38 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
 		}
 	}
 
-	ct = kmem_cache_zalloc(nf_conntrack_cachep, gfp);
+	/*
+	 * Do not use kmem_cache_zalloc(), as this cache uses
+	 * SLAB_DESTROY_BY_RCU.
+	 */
+	ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
 	if (ct == NULL) {
 		pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");
 		atomic_dec(&net->ct.count);
 		return ERR_PTR(-ENOMEM);
 	}
-
+	/*
+	 * Let ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.next
+	 * and ct->tuplehash[IP_CT_DIR_REPLY].hnnode.next unchanged.
+	 */
+	memset(&ct->tuplehash[IP_CT_DIR_MAX], 0,
+	       sizeof(*ct) - offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX]));
 	spin_lock_init(&ct->lock);
-	atomic_set(&ct->ct_general.use, 1);
 	ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
+	ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
 	ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
+	ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev = NULL;
 	/* Don't set timer yet: wait for confirmation */
 	setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
 #ifdef CONFIG_NET_NS
 	ct->ct_net = net;
 #endif
 
+	/*
+	 * changes to lookup keys must be done before setting refcnt to 1
+	 */
+	smp_wmb();
+	atomic_set(&ct->ct_general.use, 1);
 	return ct;
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_alloc);

+ 3 - 2
net/netfilter/xt_osf.c

@@ -330,7 +330,8 @@ static bool xt_osf_match_packet(const struct sk_buff *skb,
 			fcount++;
 
 			if (info->flags & XT_OSF_LOG)
-				nf_log_packet(p->hooknum, 0, skb, p->in, p->out, NULL,
+				nf_log_packet(p->family, p->hooknum, skb,
+					p->in, p->out, NULL,
 					"%s [%s:%s] : %pi4:%d -> %pi4:%d hops=%d\n",
 					f->genre, f->version, f->subtype,
 					&ip->saddr, ntohs(tcp->source),
@@ -345,7 +346,7 @@ static bool xt_osf_match_packet(const struct sk_buff *skb,
 	rcu_read_unlock();
 
 	if (!fcount && (info->flags & XT_OSF_LOG))
-		nf_log_packet(p->hooknum, 0, skb, p->in, p->out, NULL,
+		nf_log_packet(p->family, p->hooknum, skb, p->in, p->out, NULL,
 			"Remote OS is not known: %pi4:%u -> %pi4:%u\n",
 				&ip->saddr, ntohs(tcp->source),
 				&ip->daddr, ntohs(tcp->dest));

+ 2 - 5
sound/core/seq/Makefile

@@ -3,10 +3,6 @@
 # Copyright (c) 1999 by Jaroslav Kysela <perex@perex.cz>
 #
 
-ifeq ($(CONFIG_SND_SEQUENCER_OSS),y)
-  obj-$(CONFIG_SND_SEQUENCER) += oss/
-endif
-
 snd-seq-device-objs := seq_device.o
 snd-seq-objs := seq.o seq_lock.o seq_clientmgr.o seq_memory.o seq_queue.o \
                 seq_fifo.o seq_prioq.o seq_timer.o \
@@ -19,7 +15,8 @@ snd-seq-virmidi-objs := seq_virmidi.o
 
 obj-$(CONFIG_SND_SEQUENCER) += snd-seq.o snd-seq-device.o
 ifeq ($(CONFIG_SND_SEQUENCER_OSS),y)
-obj-$(CONFIG_SND_SEQUENCER) += snd-seq-midi-event.o
+  obj-$(CONFIG_SND_SEQUENCER) += snd-seq-midi-event.o
+  obj-$(CONFIG_SND_SEQUENCER) += oss/
 endif
 obj-$(CONFIG_SND_SEQ_DUMMY) += snd-seq-dummy.o
 

Niektóre pliki nie zostały wyświetlone z powodu dużej ilości zmienionych plików