Browse Source

Merge branch 'ep93xx-dma' into spi/next

Grant Likely 14 years ago
parent
commit
e4c8308c85
100 changed files with 2466 additions and 910 deletions
  1. 10 0
      Documentation/spi/ep93xx_spi
  2. 1 1
      Makefile
  3. 3 1
      arch/arm/mach-ep93xx/Makefile
  4. 5 1
      arch/arm/mach-ep93xx/core.c
  5. 0 411
      arch/arm/mach-ep93xx/dma-m2p.c
  6. 108 0
      arch/arm/mach-ep93xx/dma.c
  7. 67 123
      arch/arm/mach-ep93xx/include/mach/dma.h
  8. 2 0
      arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h
  9. 56 0
      arch/arm/mach-shmobile/board-ap4evb.c
  10. 78 0
      arch/arm/mach-shmobile/board-mackerel.c
  11. 7 0
      arch/arm/mach-shmobile/clock-sh7372.c
  12. 3 1
      arch/arm/mach-tegra/board-harmony-power.c
  13. 2 1
      arch/arm/mach-tegra/board-harmony.h
  14. 1 1
      arch/blackfin/lib/strncpy.S
  15. 1 1
      arch/sh/Kconfig
  16. 13 19
      arch/sh/boards/mach-ap325rxa/setup.c
  17. 3 0
      arch/sh/boards/mach-ecovec24/setup.c
  18. 1 0
      arch/sh/include/asm/pgtable.h
  19. 4 2
      arch/sh/include/asm/ptrace.h
  20. 1 0
      arch/sh/include/asm/tlb.h
  21. 1 0
      arch/sh/include/cpu-sh4/cpu/sh7722.h
  22. 1 0
      arch/sh/include/cpu-sh4/cpu/sh7724.h
  23. 1 0
      arch/sh/include/cpu-sh4/cpu/sh7757.h
  24. 1 0
      arch/sh/kernel/process_32.c
  25. 1 1
      arch/sh/mm/consistent.c
  26. 2 2
      block/blk-ioc.c
  27. 4 7
      block/cfq-iosched.c
  28. 20 2
      drivers/block/nbd.c
  29. 6 4
      drivers/block/xen-blkback/blkback.c
  30. 1 2
      drivers/block/xen-blkback/xenbus.c
  31. 6 11
      drivers/bluetooth/hci_ldisc.c
  32. 2 10
      drivers/clocksource/sh_cmt.c
  33. 2 10
      drivers/clocksource/sh_tmu.c
  34. 7 0
      drivers/dma/Kconfig
  35. 1 0
      drivers/dma/Makefile
  36. 1355 0
      drivers/dma/ep93xx_dma.c
  37. 7 2
      drivers/dma/shdma.c
  38. 4 19
      drivers/hwmon/coretemp.c
  39. 18 4
      drivers/hwmon/max6642.c
  40. 2 8
      drivers/input/serio/serport.c
  41. 3 5
      drivers/isdn/gigaset/ser-gigaset.c
  42. 2 4
      drivers/misc/ti-st/st_core.c
  43. 7 7
      drivers/net/3c509.c
  44. 2 2
      drivers/net/3c59x.c
  45. 2 4
      drivers/net/caif/caif_serial.c
  46. 2 3
      drivers/net/can/flexcan.c
  47. 3 6
      drivers/net/can/slcan.c
  48. 6 4
      drivers/net/davinci_emac.c
  49. 17 18
      drivers/net/depca.c
  50. 3 3
      drivers/net/dm9000.c
  51. 3 5
      drivers/net/hamradio/6pack.c
  52. 4 7
      drivers/net/hamradio/mkiss.c
  53. 6 6
      drivers/net/hp100.c
  54. 2 2
      drivers/net/ibmlana.c
  55. 7 9
      drivers/net/irda/irtty-sir.c
  56. 22 22
      drivers/net/irda/smsc-ircc2.c
  57. 1 1
      drivers/net/ks8842.c
  58. 6 9
      drivers/net/ne3210.c
  59. 2 4
      drivers/net/ppp_async.c
  60. 2 4
      drivers/net/ppp_synctty.c
  61. 4 7
      drivers/net/slip.c
  62. 3 3
      drivers/net/smc-mca.c
  63. 1 1
      drivers/net/tg3.c
  64. 1 1
      drivers/net/tokenring/madgemc.c
  65. 2 2
      drivers/net/tulip/de4x5.c
  66. 1 1
      drivers/net/usb/catc.c
  67. 2 1
      drivers/net/usb/cdc_ncm.c
  68. 2 5
      drivers/net/wan/x25_asy.c
  69. 0 1
      drivers/net/wireless/ath/ath9k/Kconfig
  70. 1 1
      drivers/net/wireless/ath/ath9k/ar9002_calib.c
  71. 8 2
      drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
  72. 22 0
      drivers/net/wireless/ath/ath9k/ar9003_phy.c
  73. 8 2
      drivers/net/wireless/ath/ath9k/eeprom_9287.c
  74. 4 1
      drivers/net/wireless/ath/ath9k/hw.c
  75. 2 0
      drivers/net/wireless/ath/ath9k/hw.h
  76. 3 1
      drivers/net/wireless/ath/ath9k/main.c
  77. 2 1
      drivers/net/wireless/ath/ath9k/rc.c
  78. 1 1
      drivers/net/wireless/b43/phy_n.c
  79. 2 2
      drivers/net/wireless/iwlegacy/iwl-4965-lib.c
  80. 1 1
      drivers/net/wireless/iwlegacy/iwl-4965.c
  81. 18 10
      drivers/net/wireless/iwlwifi/iwl-6000.c
  82. 3 3
      drivers/net/wireless/iwlwifi/iwl-agn.c
  83. 1 0
      drivers/net/wireless/iwlwifi/iwl-agn.h
  84. 3 3
      drivers/net/wireless/libertas/cmd.c
  85. 2 2
      drivers/net/wireless/mwifiex/sdio.h
  86. 0 1
      drivers/net/wireless/rt2x00/Kconfig
  87. 22 13
      drivers/net/wireless/rtlwifi/pci.c
  88. 3 0
      drivers/net/wireless/wl12xx/conf.h
  89. 1 0
      drivers/net/wireless/wl12xx/main.c
  90. 33 16
      drivers/net/wireless/wl12xx/scan.c
  91. 3 0
      drivers/net/wireless/wl12xx/scan.h
  92. 42 11
      drivers/net/wireless/zd1211rw/zd_usb.c
  93. 1 1
      drivers/rtc/rtc-m41t93.c
  94. 1 1
      drivers/scsi/scsi_scan.c
  95. 1 0
      drivers/scsi/scsi_sysfs.c
  96. 291 12
      drivers/spi/spi-ep93xx.c
  97. 2 4
      drivers/tty/n_gsm.c
  98. 8 10
      drivers/tty/n_hdlc.c
  99. 4 6
      drivers/tty/n_r3964.c
  100. 47 14
      drivers/tty/n_tty.c

+ 10 - 0
Documentation/spi/ep93xx_spi

@@ -88,6 +88,16 @@ static void __init ts72xx_init_machine(void)
 			    ARRAY_SIZE(ts72xx_spi_devices));
 }
 
+The driver can use DMA for the transfers also. In this case ts72xx_spi_info
+becomes:
+
+static struct ep93xx_spi_info ts72xx_spi_info = {
+	.num_chipselect	= ARRAY_SIZE(ts72xx_spi_devices),
+	.use_dma	= true;
+};
+
+Note that CONFIG_EP93XX_DMA should be enabled as well.
+
 Thanks to
 =========
 Martin Guy, H. Hartley Sweeten and others who helped me during development of

+ 1 - 1
Makefile

@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
 NAME = Sneaky Weasel
 
 # *DOCUMENTATION*

+ 3 - 1
arch/arm/mach-ep93xx/Makefile

@@ -1,11 +1,13 @@
 #
 # Makefile for the linux kernel.
 #
-obj-y			:= core.o clock.o dma-m2p.o gpio.o
+obj-y			:= core.o clock.o gpio.o
 obj-m			:=
 obj-n			:=
 obj-			:=
 
+obj-$(CONFIG_EP93XX_DMA)	+= dma.o
+
 obj-$(CONFIG_MACH_ADSSPHERE)	+= adssphere.o
 obj-$(CONFIG_MACH_EDB93XX)	+= edb93xx.o
 obj-$(CONFIG_MACH_GESBC9312)	+= gesbc9312.o

+ 5 - 1
arch/arm/mach-ep93xx/core.c

@@ -488,11 +488,15 @@ static struct resource ep93xx_spi_resources[] = {
 	},
 };
 
+static u64 ep93xx_spi_dma_mask = DMA_BIT_MASK(32);
+
 static struct platform_device ep93xx_spi_device = {
 	.name		= "ep93xx-spi",
 	.id		= 0,
 	.dev		= {
-		.platform_data = &ep93xx_spi_master_data,
+		.platform_data		= &ep93xx_spi_master_data,
+		.coherent_dma_mask	= DMA_BIT_MASK(32),
+		.dma_mask		= &ep93xx_spi_dma_mask,
 	},
 	.num_resources	= ARRAY_SIZE(ep93xx_spi_resources),
 	.resource	= ep93xx_spi_resources,

+ 0 - 411
arch/arm/mach-ep93xx/dma-m2p.c

@@ -1,411 +0,0 @@
-/*
- * arch/arm/mach-ep93xx/dma-m2p.c
- * M2P DMA handling for Cirrus EP93xx chips.
- *
- * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
- * Copyright (C) 2006 Applied Data Systems
- *
- * Copyright (C) 2009 Ryan Mallon <ryan@bluewatersys.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- */
-
-/*
- * On the EP93xx chip the following peripherals my be allocated to the 10
- * Memory to Internal Peripheral (M2P) channels (5 transmit + 5 receive).
- *
- *	I2S	contains 3 Tx and 3 Rx DMA Channels
- *	AAC	contains 3 Tx and 3 Rx DMA Channels
- *	UART1	contains 1 Tx and 1 Rx DMA Channels
- *	UART2	contains 1 Tx and 1 Rx DMA Channels
- *	UART3	contains 1 Tx and 1 Rx DMA Channels
- *	IrDA	contains 1 Tx and 1 Rx DMA Channels
- *
- * SSP and IDE use the Memory to Memory (M2M) channels and are not covered
- * with this implementation.
- */
-
-#define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/io.h>
-
-#include <mach/dma.h>
-#include <mach/hardware.h>
-
-#define M2P_CONTROL			0x00
-#define  M2P_CONTROL_STALL_IRQ_EN	(1 << 0)
-#define  M2P_CONTROL_NFB_IRQ_EN		(1 << 1)
-#define  M2P_CONTROL_ERROR_IRQ_EN	(1 << 3)
-#define  M2P_CONTROL_ENABLE		(1 << 4)
-#define M2P_INTERRUPT			0x04
-#define  M2P_INTERRUPT_STALL		(1 << 0)
-#define  M2P_INTERRUPT_NFB		(1 << 1)
-#define  M2P_INTERRUPT_ERROR		(1 << 3)
-#define M2P_PPALLOC			0x08
-#define M2P_STATUS			0x0c
-#define M2P_REMAIN			0x14
-#define M2P_MAXCNT0			0x20
-#define M2P_BASE0			0x24
-#define M2P_MAXCNT1			0x30
-#define M2P_BASE1			0x34
-
-#define STATE_IDLE	0	/* Channel is inactive.  */
-#define STATE_STALL	1	/* Channel is active, no buffers pending.  */
-#define STATE_ON	2	/* Channel is active, one buffer pending.  */
-#define STATE_NEXT	3	/* Channel is active, two buffers pending.  */
-
-struct m2p_channel {
-	char				*name;
-	void __iomem			*base;
-	int				irq;
-
-	struct clk			*clk;
-	spinlock_t			lock;
-
-	void				*client;
-	unsigned			next_slot:1;
-	struct ep93xx_dma_buffer	*buffer_xfer;
-	struct ep93xx_dma_buffer	*buffer_next;
-	struct list_head		buffers_pending;
-};
-
-static struct m2p_channel m2p_rx[] = {
-	{"m2p1", EP93XX_DMA_BASE + 0x0040, IRQ_EP93XX_DMAM2P1},
-	{"m2p3", EP93XX_DMA_BASE + 0x00c0, IRQ_EP93XX_DMAM2P3},
-	{"m2p5", EP93XX_DMA_BASE + 0x0200, IRQ_EP93XX_DMAM2P5},
-	{"m2p7", EP93XX_DMA_BASE + 0x0280, IRQ_EP93XX_DMAM2P7},
-	{"m2p9", EP93XX_DMA_BASE + 0x0300, IRQ_EP93XX_DMAM2P9},
-	{NULL},
-};
-
-static struct m2p_channel m2p_tx[] = {
-	{"m2p0", EP93XX_DMA_BASE + 0x0000, IRQ_EP93XX_DMAM2P0},
-	{"m2p2", EP93XX_DMA_BASE + 0x0080, IRQ_EP93XX_DMAM2P2},
-	{"m2p4", EP93XX_DMA_BASE + 0x0240, IRQ_EP93XX_DMAM2P4},
-	{"m2p6", EP93XX_DMA_BASE + 0x02c0, IRQ_EP93XX_DMAM2P6},
-	{"m2p8", EP93XX_DMA_BASE + 0x0340, IRQ_EP93XX_DMAM2P8},
-	{NULL},
-};
-
-static void feed_buf(struct m2p_channel *ch, struct ep93xx_dma_buffer *buf)
-{
-	if (ch->next_slot == 0) {
-		writel(buf->size, ch->base + M2P_MAXCNT0);
-		writel(buf->bus_addr, ch->base + M2P_BASE0);
-	} else {
-		writel(buf->size, ch->base + M2P_MAXCNT1);
-		writel(buf->bus_addr, ch->base + M2P_BASE1);
-	}
-	ch->next_slot ^= 1;
-}
-
-static void choose_buffer_xfer(struct m2p_channel *ch)
-{
-	struct ep93xx_dma_buffer *buf;
-
-	ch->buffer_xfer = NULL;
-	if (!list_empty(&ch->buffers_pending)) {
-		buf = list_entry(ch->buffers_pending.next,
-				 struct ep93xx_dma_buffer, list);
-		list_del(&buf->list);
-		feed_buf(ch, buf);
-		ch->buffer_xfer = buf;
-	}
-}
-
-static void choose_buffer_next(struct m2p_channel *ch)
-{
-	struct ep93xx_dma_buffer *buf;
-
-	ch->buffer_next = NULL;
-	if (!list_empty(&ch->buffers_pending)) {
-		buf = list_entry(ch->buffers_pending.next,
-				 struct ep93xx_dma_buffer, list);
-		list_del(&buf->list);
-		feed_buf(ch, buf);
-		ch->buffer_next = buf;
-	}
-}
-
-static inline void m2p_set_control(struct m2p_channel *ch, u32 v)
-{
-	/*
-	 * The control register must be read immediately after being written so
-	 * that the internal state machine is correctly updated. See the ep93xx
-	 * users' guide for details.
-	 */
-	writel(v, ch->base + M2P_CONTROL);
-	readl(ch->base + M2P_CONTROL);
-}
-
-static inline int m2p_channel_state(struct m2p_channel *ch)
-{
-	return (readl(ch->base + M2P_STATUS) >> 4) & 0x3;
-}
-
-static irqreturn_t m2p_irq(int irq, void *dev_id)
-{
-	struct m2p_channel *ch = dev_id;
-	struct ep93xx_dma_m2p_client *cl;
-	u32 irq_status, v;
-	int error = 0;
-
-	cl = ch->client;
-
-	spin_lock(&ch->lock);
-	irq_status = readl(ch->base + M2P_INTERRUPT);
-
-	if (irq_status & M2P_INTERRUPT_ERROR) {
-		writel(M2P_INTERRUPT_ERROR, ch->base + M2P_INTERRUPT);
-		error = 1;
-	}
-
-	if ((irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) == 0) {
-		spin_unlock(&ch->lock);
-		return IRQ_NONE;
-	}
-
-	switch (m2p_channel_state(ch)) {
-	case STATE_IDLE:
-		pr_crit("dma interrupt without a dma buffer\n");
-		BUG();
-		break;
-
-	case STATE_STALL:
-		cl->buffer_finished(cl->cookie, ch->buffer_xfer, 0, error);
-		if (ch->buffer_next != NULL) {
-			cl->buffer_finished(cl->cookie, ch->buffer_next,
-					    0, error);
-		}
-		choose_buffer_xfer(ch);
-		choose_buffer_next(ch);
-		if (ch->buffer_xfer != NULL)
-			cl->buffer_started(cl->cookie, ch->buffer_xfer);
-		break;
-
-	case STATE_ON:
-		cl->buffer_finished(cl->cookie, ch->buffer_xfer, 0, error);
-		ch->buffer_xfer = ch->buffer_next;
-		choose_buffer_next(ch);
-		cl->buffer_started(cl->cookie, ch->buffer_xfer);
-		break;
-
-	case STATE_NEXT:
-		pr_crit("dma interrupt while next\n");
-		BUG();
-		break;
-	}
-
-	v = readl(ch->base + M2P_CONTROL) & ~(M2P_CONTROL_STALL_IRQ_EN |
-					      M2P_CONTROL_NFB_IRQ_EN);
-	if (ch->buffer_xfer != NULL)
-		v |= M2P_CONTROL_STALL_IRQ_EN;
-	if (ch->buffer_next != NULL)
-		v |= M2P_CONTROL_NFB_IRQ_EN;
-	m2p_set_control(ch, v);
-
-	spin_unlock(&ch->lock);
-	return IRQ_HANDLED;
-}
-
-static struct m2p_channel *find_free_channel(struct ep93xx_dma_m2p_client *cl)
-{
-	struct m2p_channel *ch;
-	int i;
-
-	if (cl->flags & EP93XX_DMA_M2P_RX)
-		ch = m2p_rx;
-	else
-		ch = m2p_tx;
-
-	for (i = 0; ch[i].base; i++) {
-		struct ep93xx_dma_m2p_client *client;
-
-		client = ch[i].client;
-		if (client != NULL) {
-			int port;
-
-			port = cl->flags & EP93XX_DMA_M2P_PORT_MASK;
-			if (port == (client->flags &
-				     EP93XX_DMA_M2P_PORT_MASK)) {
-				pr_warning("DMA channel already used by %s\n",
-					   cl->name ? : "unknown client");
-				return ERR_PTR(-EBUSY);
-			}
-		}
-	}
-
-	for (i = 0; ch[i].base; i++) {
-		if (ch[i].client == NULL)
-			return ch + i;
-	}
-
-	pr_warning("No free DMA channel for %s\n",
-		   cl->name ? : "unknown client");
-	return ERR_PTR(-ENODEV);
-}
-
-static void channel_enable(struct m2p_channel *ch)
-{
-	struct ep93xx_dma_m2p_client *cl = ch->client;
-	u32 v;
-
-	clk_enable(ch->clk);
-
-	v = cl->flags & EP93XX_DMA_M2P_PORT_MASK;
-	writel(v, ch->base + M2P_PPALLOC);
-
-	v = cl->flags & EP93XX_DMA_M2P_ERROR_MASK;
-	v |= M2P_CONTROL_ENABLE | M2P_CONTROL_ERROR_IRQ_EN;
-	m2p_set_control(ch, v);
-}
-
-static void channel_disable(struct m2p_channel *ch)
-{
-	u32 v;
-
-	v = readl(ch->base + M2P_CONTROL);
-	v &= ~(M2P_CONTROL_STALL_IRQ_EN | M2P_CONTROL_NFB_IRQ_EN);
-	m2p_set_control(ch, v);
-
-	while (m2p_channel_state(ch) >= STATE_ON)
-		cpu_relax();
-
-	m2p_set_control(ch, 0x0);
-
-	while (m2p_channel_state(ch) == STATE_STALL)
-		cpu_relax();
-
-	clk_disable(ch->clk);
-}
-
-int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *cl)
-{
-	struct m2p_channel *ch;
-	int err;
-
-	ch = find_free_channel(cl);
-	if (IS_ERR(ch))
-		return PTR_ERR(ch);
-
-	err = request_irq(ch->irq, m2p_irq, 0, cl->name ? : "dma-m2p", ch);
-	if (err)
-		return err;
-
-	ch->client = cl;
-	ch->next_slot = 0;
-	ch->buffer_xfer = NULL;
-	ch->buffer_next = NULL;
-	INIT_LIST_HEAD(&ch->buffers_pending);
-
-	cl->channel = ch;
-
-	channel_enable(ch);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_client_register);
-
-void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *cl)
-{
-	struct m2p_channel *ch = cl->channel;
-
-	channel_disable(ch);
-	free_irq(ch->irq, ch);
-	ch->client = NULL;
-}
-EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_client_unregister);
-
-void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *cl,
-			   struct ep93xx_dma_buffer *buf)
-{
-	struct m2p_channel *ch = cl->channel;
-	unsigned long flags;
-	u32 v;
-
-	spin_lock_irqsave(&ch->lock, flags);
-	v = readl(ch->base + M2P_CONTROL);
-	if (ch->buffer_xfer == NULL) {
-		ch->buffer_xfer = buf;
-		feed_buf(ch, buf);
-		cl->buffer_started(cl->cookie, buf);
-
-		v |= M2P_CONTROL_STALL_IRQ_EN;
-		m2p_set_control(ch, v);
-
-	} else if (ch->buffer_next == NULL) {
-		ch->buffer_next = buf;
-		feed_buf(ch, buf);
-
-		v |= M2P_CONTROL_NFB_IRQ_EN;
-		m2p_set_control(ch, v);
-	} else {
-		list_add_tail(&buf->list, &ch->buffers_pending);
-	}
-	spin_unlock_irqrestore(&ch->lock, flags);
-}
-EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_submit);
-
-void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *cl,
-				     struct ep93xx_dma_buffer *buf)
-{
-	struct m2p_channel *ch = cl->channel;
-
-	list_add_tail(&buf->list, &ch->buffers_pending);
-}
-EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_submit_recursive);
-
-void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *cl)
-{
-	struct m2p_channel *ch = cl->channel;
-
-	channel_disable(ch);
-	ch->next_slot = 0;
-	ch->buffer_xfer = NULL;
-	ch->buffer_next = NULL;
-	INIT_LIST_HEAD(&ch->buffers_pending);
-	channel_enable(ch);
-}
-EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_flush);
-
-static int init_channel(struct m2p_channel *ch)
-{
-	ch->clk = clk_get(NULL, ch->name);
-	if (IS_ERR(ch->clk))
-		return PTR_ERR(ch->clk);
-
-	spin_lock_init(&ch->lock);
-	ch->client = NULL;
-
-	return 0;
-}
-
-static int __init ep93xx_dma_m2p_init(void)
-{
-	int i;
-	int ret;
-
-	for (i = 0; m2p_rx[i].base; i++) {
-		ret = init_channel(m2p_rx + i);
-		if (ret)
-			return ret;
-	}
-
-	for (i = 0; m2p_tx[i].base; i++) {
-		ret = init_channel(m2p_tx + i);
-		if (ret)
-			return ret;
-	}
-
-	pr_info("M2P DMA subsystem initialized\n");
-	return 0;
-}
-arch_initcall(ep93xx_dma_m2p_init);

+ 108 - 0
arch/arm/mach-ep93xx/dma.c

@@ -0,0 +1,108 @@
+/*
+ * arch/arm/mach-ep93xx/dma.c
+ *
+ * Platform support code for the EP93xx dmaengine driver.
+ *
+ * Copyright (C) 2011 Mika Westerberg
+ *
+ * This work is based on the original dma-m2p implementation with
+ * following copyrights:
+ *
+ *   Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
+ *   Copyright (C) 2006 Applied Data Systems
+ *   Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include <mach/dma.h>
+#include <mach/hardware.h>
+
+#define DMA_CHANNEL(_name, _base, _irq) \
+	{ .name = (_name), .base = (_base), .irq = (_irq) }
+
+/*
+ * DMA M2P channels.
+ *
+ * On the EP93xx chip the following peripherals my be allocated to the 10
+ * Memory to Internal Peripheral (M2P) channels (5 transmit + 5 receive).
+ *
+ *	I2S	contains 3 Tx and 3 Rx DMA Channels
+ *	AAC	contains 3 Tx and 3 Rx DMA Channels
+ *	UART1	contains 1 Tx and 1 Rx DMA Channels
+ *	UART2	contains 1 Tx and 1 Rx DMA Channels
+ *	UART3	contains 1 Tx and 1 Rx DMA Channels
+ *	IrDA	contains 1 Tx and 1 Rx DMA Channels
+ *
+ * Registers are mapped statically in ep93xx_map_io().
+ */
+static struct ep93xx_dma_chan_data ep93xx_dma_m2p_channels[] = {
+	DMA_CHANNEL("m2p0", EP93XX_DMA_BASE + 0x0000, IRQ_EP93XX_DMAM2P0),
+	DMA_CHANNEL("m2p1", EP93XX_DMA_BASE + 0x0040, IRQ_EP93XX_DMAM2P1),
+	DMA_CHANNEL("m2p2", EP93XX_DMA_BASE + 0x0080, IRQ_EP93XX_DMAM2P2),
+	DMA_CHANNEL("m2p3", EP93XX_DMA_BASE + 0x00c0, IRQ_EP93XX_DMAM2P3),
+	DMA_CHANNEL("m2p4", EP93XX_DMA_BASE + 0x0240, IRQ_EP93XX_DMAM2P4),
+	DMA_CHANNEL("m2p5", EP93XX_DMA_BASE + 0x0200, IRQ_EP93XX_DMAM2P5),
+	DMA_CHANNEL("m2p6", EP93XX_DMA_BASE + 0x02c0, IRQ_EP93XX_DMAM2P6),
+	DMA_CHANNEL("m2p7", EP93XX_DMA_BASE + 0x0280, IRQ_EP93XX_DMAM2P7),
+	DMA_CHANNEL("m2p8", EP93XX_DMA_BASE + 0x0340, IRQ_EP93XX_DMAM2P8),
+	DMA_CHANNEL("m2p9", EP93XX_DMA_BASE + 0x0300, IRQ_EP93XX_DMAM2P9),
+};
+
+static struct ep93xx_dma_platform_data ep93xx_dma_m2p_data = {
+	.channels		= ep93xx_dma_m2p_channels,
+	.num_channels		= ARRAY_SIZE(ep93xx_dma_m2p_channels),
+};
+
+static struct platform_device ep93xx_dma_m2p_device = {
+	.name			= "ep93xx-dma-m2p",
+	.id			= -1,
+	.dev			= {
+		.platform_data	= &ep93xx_dma_m2p_data,
+	},
+};
+
+/*
+ * DMA M2M channels.
+ *
+ * There are 2 M2M channels which support memcpy/memset and in addition simple
+ * hardware requests from/to SSP and IDE. We do not implement an external
+ * hardware requests.
+ *
+ * Registers are mapped statically in ep93xx_map_io().
+ */
+static struct ep93xx_dma_chan_data ep93xx_dma_m2m_channels[] = {
+	DMA_CHANNEL("m2m0", EP93XX_DMA_BASE + 0x0100, IRQ_EP93XX_DMAM2M0),
+	DMA_CHANNEL("m2m1", EP93XX_DMA_BASE + 0x0140, IRQ_EP93XX_DMAM2M1),
+};
+
+static struct ep93xx_dma_platform_data ep93xx_dma_m2m_data = {
+	.channels		= ep93xx_dma_m2m_channels,
+	.num_channels		= ARRAY_SIZE(ep93xx_dma_m2m_channels),
+};
+
+static struct platform_device ep93xx_dma_m2m_device = {
+	.name			= "ep93xx-dma-m2m",
+	.id			= -1,
+	.dev			= {
+		.platform_data	= &ep93xx_dma_m2m_data,
+	},
+};
+
+static int __init ep93xx_dma_init(void)
+{
+	platform_device_register(&ep93xx_dma_m2p_device);
+	platform_device_register(&ep93xx_dma_m2m_device);
+	return 0;
+}
+arch_initcall(ep93xx_dma_init);

+ 67 - 123
arch/arm/mach-ep93xx/include/mach/dma.h

@@ -1,149 +1,93 @@
-/**
- * DOC: EP93xx DMA M2P memory to peripheral and peripheral to memory engine
- *
- * The EP93xx DMA M2P subsystem handles DMA transfers between memory and
- * peripherals. DMA M2P channels are available for audio, UARTs and IrDA.
- * See chapter 10 of the EP93xx users guide for full details on the DMA M2P
- * engine.
- *
- * See sound/soc/ep93xx/ep93xx-pcm.c for an example use of the DMA M2P code.
- *
- */
-
 #ifndef __ASM_ARCH_DMA_H
 #define __ASM_ARCH_DMA_H
 
-#include <linux/list.h>
 #include <linux/types.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
 
-/**
- * struct ep93xx_dma_buffer - Information about a buffer to be transferred
- * using the DMA M2P engine
+/*
+ * M2P channels.
  *
- * @list: Entry in DMA buffer list
- * @bus_addr: Physical address of the buffer
- * @size: Size of the buffer in bytes
+ * Note that these values are also directly used for setting the PPALLOC
+ * register.
  */
-struct ep93xx_dma_buffer {
-	struct list_head	list;
-	u32			bus_addr;
-	u16			size;
-};
+#define EP93XX_DMA_I2S1		0
+#define EP93XX_DMA_I2S2		1
+#define EP93XX_DMA_AAC1		2
+#define EP93XX_DMA_AAC2		3
+#define EP93XX_DMA_AAC3		4
+#define EP93XX_DMA_I2S3		5
+#define EP93XX_DMA_UART1	6
+#define EP93XX_DMA_UART2	7
+#define EP93XX_DMA_UART3	8
+#define EP93XX_DMA_IRDA		9
+/* M2M channels */
+#define EP93XX_DMA_SSP		10
+#define EP93XX_DMA_IDE		11
 
 /**
- * struct ep93xx_dma_m2p_client - Information about a DMA M2P client
- *
- * @name: Unique name for this client
- * @flags: Client flags
- * @cookie: User data to pass to callback functions
- * @buffer_started: Non NULL function to call when a transfer is started.
- * 			The arguments are the user data cookie and the DMA
- *			buffer which is starting.
- * @buffer_finished: Non NULL function to call when a transfer is completed.
- *			The arguments are the user data cookie, the DMA buffer
- *			which has completed, and a boolean flag indicating if
- *			the transfer had an error.
+ * struct ep93xx_dma_data - configuration data for the EP93xx dmaengine
+ * @port: peripheral which is requesting the channel
+ * @direction: TX/RX channel
+ * @name: optional name for the channel, this is displayed in /proc/interrupts
+ *
+ * This information is passed as private channel parameter in a filter
+ * function. Note that this is only needed for slave/cyclic channels.  For
+ * memcpy channels %NULL data should be passed.
  */
-struct ep93xx_dma_m2p_client {
-	char			*name;
-	u8			flags;
-	void			*cookie;
-	void			(*buffer_started)(void *cookie,
-					struct ep93xx_dma_buffer *buf);
-	void			(*buffer_finished)(void *cookie,
-					struct ep93xx_dma_buffer *buf,
-					int bytes, int error);
-
-	/* private: Internal use only */
-	void			*channel;
+struct ep93xx_dma_data {
+	int				port;
+	enum dma_data_direction		direction;
+	const char			*name;
 };
 
-/* DMA M2P ports */
-#define EP93XX_DMA_M2P_PORT_I2S1	0x00
-#define EP93XX_DMA_M2P_PORT_I2S2	0x01
-#define EP93XX_DMA_M2P_PORT_AAC1	0x02
-#define EP93XX_DMA_M2P_PORT_AAC2	0x03
-#define EP93XX_DMA_M2P_PORT_AAC3	0x04
-#define EP93XX_DMA_M2P_PORT_I2S3	0x05
-#define EP93XX_DMA_M2P_PORT_UART1	0x06
-#define EP93XX_DMA_M2P_PORT_UART2	0x07
-#define EP93XX_DMA_M2P_PORT_UART3	0x08
-#define EP93XX_DMA_M2P_PORT_IRDA	0x09
-#define EP93XX_DMA_M2P_PORT_MASK	0x0f
-
-/* DMA M2P client flags */
-#define EP93XX_DMA_M2P_TX		0x00	/* Memory to peripheral */
-#define EP93XX_DMA_M2P_RX		0x10	/* Peripheral to memory */
-
-/*
- * DMA M2P client error handling flags. See the EP93xx users guide
- * documentation on the DMA M2P CONTROL register for more details
- */
-#define EP93XX_DMA_M2P_ABORT_ON_ERROR	0x20	/* Abort on peripheral error */
-#define EP93XX_DMA_M2P_IGNORE_ERROR	0x40	/* Ignore peripheral errors */
-#define EP93XX_DMA_M2P_ERROR_MASK	0x60	/* Mask of error bits */
-
 /**
- * ep93xx_dma_m2p_client_register - Register a client with the DMA M2P
- * subsystem
- *
- * @m2p: Client information to register
- * returns 0 on success
- *
- * The DMA M2P subsystem allocates a channel and an interrupt line for the DMA
- * client
+ * struct ep93xx_dma_chan_data - platform specific data for a DMA channel
+ * @name: name of the channel, used for getting the right clock for the channel
+ * @base: mapped registers
+ * @irq: interrupt number used by this channel
  */
-int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *m2p);
+struct ep93xx_dma_chan_data {
+	const char			*name;
+	void __iomem			*base;
+	int				irq;
+};
 
 /**
- * ep93xx_dma_m2p_client_unregister - Unregister a client from the DMA M2P
- * subsystem
- *
- * @m2p: Client to unregister
+ * struct ep93xx_dma_platform_data - platform data for the dmaengine driver
+ * @channels: array of channels which are passed to the driver
+ * @num_channels: number of channels in the array
  *
- * Any transfers currently in progress will be completed in hardware, but
- * ignored in software.
+ * This structure is passed to the DMA engine driver via platform data. For
+ * M2P channels, contract is that even channels are for TX and odd for RX.
+ * There is no requirement for the M2M channels.
  */
-void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *m2p);
+struct ep93xx_dma_platform_data {
+	struct ep93xx_dma_chan_data	*channels;
+	size_t				num_channels;
+};
 
-/**
- * ep93xx_dma_m2p_submit - Submit a DMA M2P transfer
- *
- * @m2p: DMA Client to submit the transfer on
- * @buf: DMA Buffer to submit
- *
- * If the current or next transfer positions are free on the M2P client then
- * the transfer is started immediately. If not, the transfer is added to the
- * list of pending transfers. This function must not be called from the
- * buffer_finished callback for an M2P channel.
- *
- */
-void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *m2p,
-			   struct ep93xx_dma_buffer *buf);
+static inline bool ep93xx_dma_chan_is_m2p(struct dma_chan *chan)
+{
+	return !strcmp(dev_name(chan->device->dev), "ep93xx-dma-m2p");
+}
 
 /**
- * ep93xx_dma_m2p_submit_recursive - Put a DMA transfer on the pending list
- * for an M2P channel
+ * ep93xx_dma_chan_direction - returns direction the channel can be used
+ * @chan: channel
  *
- * @m2p: DMA Client to submit the transfer on
- * @buf: DMA Buffer to submit
- *
- * This function must only be called from the buffer_finished callback for an
- * M2P channel. It is commonly used to add the next transfer in a chained list
- * of DMA transfers.
+ * This function can be used in filter functions to find out whether the
+ * channel supports given DMA direction. Only M2P channels have such
+ * limitation, for M2M channels the direction is configurable.
  */
-void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *m2p,
-				     struct ep93xx_dma_buffer *buf);
+static inline enum dma_data_direction
+ep93xx_dma_chan_direction(struct dma_chan *chan)
+{
+	if (!ep93xx_dma_chan_is_m2p(chan))
+		return DMA_NONE;
 
-/**
- * ep93xx_dma_m2p_flush - Flush all pending transfers on a DMA M2P client
- *
- * @m2p: DMA client to flush transfers on
- *
- * Any transfers currently in progress will be completed in hardware, but
- * ignored in software.
- *
- */
-void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *m2p);
+	/* even channels are for TX, odd for RX */
+	return (chan->chan_id % 2 == 0) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+}
 
 #endif /* __ASM_ARCH_DMA_H */

+ 2 - 0
arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h

@@ -7,9 +7,11 @@ struct spi_device;
  * struct ep93xx_spi_info - EP93xx specific SPI descriptor
  * @num_chipselect: number of chip selects on this board, must be
  *                  at least one
+ * @use_dma: use DMA for the transfers
  */
 struct ep93xx_spi_info {
 	int	num_chipselect;
+	bool	use_dma;
 };
 
 /**

+ 56 - 0
arch/arm/mach-shmobile/board-ap4evb.c

@@ -249,6 +249,29 @@ static int slot_cn7_get_cd(struct platform_device *pdev)
 {
 	return !gpio_get_value(GPIO_PORT41);
 }
+/* MERAM */
+static struct sh_mobile_meram_info meram_info = {
+	.addr_mode      = SH_MOBILE_MERAM_MODE1,
+};
+
+static struct resource meram_resources[] = {
+	[0] = {
+		.name   = "MERAM",
+		.start  = 0xe8000000,
+		.end    = 0xe81fffff,
+		.flags  = IORESOURCE_MEM,
+	},
+};
+
+static struct platform_device meram_device = {
+	.name           = "sh_mobile_meram",
+	.id             = 0,
+	.num_resources  = ARRAY_SIZE(meram_resources),
+	.resource       = meram_resources,
+	.dev            = {
+		.platform_data = &meram_info,
+	},
+};
 
 /* SH_MMCIF */
 static struct resource sh_mmcif_resources[] = {
@@ -447,13 +470,29 @@ const static struct fb_videomode ap4evb_lcdc_modes[] = {
 #endif
 	},
 };
+static struct sh_mobile_meram_cfg lcd_meram_cfg = {
+	.icb[0] = {
+		.marker_icb     = 28,
+		.cache_icb      = 24,
+		.meram_offset   = 0x0,
+		.meram_size     = 0x40,
+	},
+	.icb[1] = {
+		.marker_icb     = 29,
+		.cache_icb      = 25,
+		.meram_offset   = 0x40,
+		.meram_size     = 0x40,
+	},
+};
 
 static struct sh_mobile_lcdc_info lcdc_info = {
+	.meram_dev = &meram_info,
 	.ch[0] = {
 		.chan = LCDC_CHAN_MAINLCD,
 		.bpp = 16,
 		.lcd_cfg = ap4evb_lcdc_modes,
 		.num_cfg = ARRAY_SIZE(ap4evb_lcdc_modes),
+		.meram_cfg = &lcd_meram_cfg,
 	}
 };
 
@@ -724,15 +763,31 @@ static struct platform_device fsi_device = {
 static struct platform_device fsi_ak4643_device = {
 	.name		= "sh_fsi2_a_ak4643",
 };
+static struct sh_mobile_meram_cfg hdmi_meram_cfg = {
+	.icb[0] = {
+		.marker_icb     = 30,
+		.cache_icb      = 26,
+		.meram_offset   = 0x80,
+		.meram_size     = 0x100,
+	},
+	.icb[1] = {
+		.marker_icb     = 31,
+		.cache_icb      = 27,
+		.meram_offset   = 0x180,
+		.meram_size     = 0x100,
+	},
+};
 
 static struct sh_mobile_lcdc_info sh_mobile_lcdc1_info = {
 	.clock_source = LCDC_CLK_EXTERNAL,
+	.meram_dev = &meram_info,
 	.ch[0] = {
 		.chan = LCDC_CHAN_MAINLCD,
 		.bpp = 16,
 		.interface_type = RGB24,
 		.clock_divider = 1,
 		.flags = LCDC_FLAGS_DWPOL,
+		.meram_cfg = &hdmi_meram_cfg,
 	}
 };
 
@@ -961,6 +1016,7 @@ static struct platform_device *ap4evb_devices[] __initdata = {
 	&csi2_device,
 	&ceu_device,
 	&ap4evb_camera,
+	&meram_device,
 };
 
 static void __init hdmi_init_pm_clock(void)

+ 78 - 0
arch/arm/mach-shmobile/board-mackerel.c

@@ -39,6 +39,7 @@
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
 #include <linux/mtd/physmap.h>
+#include <linux/pm_runtime.h>
 #include <linux/smsc911x.h>
 #include <linux/sh_intc.h>
 #include <linux/tca6416_keypad.h>
@@ -314,6 +315,30 @@ static struct platform_device smc911x_device = {
 	},
 };
 
+/* MERAM */
+static struct sh_mobile_meram_info mackerel_meram_info = {
+	.addr_mode	= SH_MOBILE_MERAM_MODE1,
+};
+
+static struct resource meram_resources[] = {
+	[0] = {
+		.name	= "MERAM",
+		.start	= 0xe8000000,
+		.end	= 0xe81fffff,
+		.flags	= IORESOURCE_MEM,
+	},
+};
+
+static struct platform_device meram_device = {
+	.name		= "sh_mobile_meram",
+	.id		= 0,
+	.num_resources	= ARRAY_SIZE(meram_resources),
+	.resource	= meram_resources,
+	.dev		= {
+		.platform_data = &mackerel_meram_info,
+	},
+};
+
 /* LCDC */
 static struct fb_videomode mackerel_lcdc_modes[] = {
 	{
@@ -342,7 +367,23 @@ static int mackerel_get_brightness(void *board_data)
 	return gpio_get_value(GPIO_PORT31);
 }
 
+static struct sh_mobile_meram_cfg lcd_meram_cfg = {
+	.icb[0] = {
+		.marker_icb     = 28,
+		.cache_icb      = 24,
+		.meram_offset   = 0x0,
+		.meram_size     = 0x40,
+	},
+	.icb[1] = {
+		.marker_icb     = 29,
+		.cache_icb      = 25,
+		.meram_offset   = 0x40,
+		.meram_size     = 0x40,
+	},
+};
+
 static struct sh_mobile_lcdc_info lcdc_info = {
+	.meram_dev = &mackerel_meram_info,
 	.clock_source = LCDC_CLK_BUS,
 	.ch[0] = {
 		.chan = LCDC_CHAN_MAINLCD,
@@ -362,6 +403,7 @@ static struct sh_mobile_lcdc_info lcdc_info = {
 			.name = "sh_mobile_lcdc_bl",
 			.max_brightness = 1,
 		},
+		.meram_cfg = &lcd_meram_cfg,
 	}
 };
 
@@ -388,8 +430,23 @@ static struct platform_device lcdc_device = {
 	},
 };
 
+static struct sh_mobile_meram_cfg hdmi_meram_cfg = {
+	.icb[0] = {
+		.marker_icb     = 30,
+		.cache_icb      = 26,
+		.meram_offset   = 0x80,
+		.meram_size     = 0x100,
+	},
+	.icb[1] = {
+		.marker_icb     = 31,
+		.cache_icb      = 27,
+		.meram_offset   = 0x180,
+		.meram_size     = 0x100,
+	},
+};
 /* HDMI */
 static struct sh_mobile_lcdc_info hdmi_lcdc_info = {
+	.meram_dev = &mackerel_meram_info,
 	.clock_source = LCDC_CLK_EXTERNAL,
 	.ch[0] = {
 		.chan = LCDC_CHAN_MAINLCD,
@@ -397,6 +454,7 @@ static struct sh_mobile_lcdc_info hdmi_lcdc_info = {
 		.interface_type = RGB24,
 		.clock_divider = 1,
 		.flags = LCDC_FLAGS_DWPOL,
+		.meram_cfg = &hdmi_meram_cfg,
 	}
 };
 
@@ -856,6 +914,17 @@ static int slot_cn7_get_cd(struct platform_device *pdev)
 }
 
 /* SDHI0 */
+static irqreturn_t mackerel_sdhi0_gpio_cd(int irq, void *arg)
+{
+	struct device *dev = arg;
+	struct sh_mobile_sdhi_info *info = dev->platform_data;
+	struct tmio_mmc_data *pdata = info->pdata;
+
+	tmio_mmc_cd_wakeup(pdata);
+
+	return IRQ_HANDLED;
+}
+
 static struct sh_mobile_sdhi_info sdhi0_info = {
 	.dma_slave_tx	= SHDMA_SLAVE_SDHI0_TX,
 	.dma_slave_rx	= SHDMA_SLAVE_SDHI0_RX,
@@ -1150,6 +1219,7 @@ static struct platform_device *mackerel_devices[] __initdata = {
 	&mackerel_camera,
 	&hdmi_lcdc_device,
 	&hdmi_device,
+	&meram_device,
 };
 
 /* Keypad Initialization */
@@ -1238,6 +1308,7 @@ static void __init mackerel_init(void)
 {
 	u32 srcr4;
 	struct clk *clk;
+	int ret;
 
 	sh7372_pinmux_init();
 
@@ -1343,6 +1414,13 @@ static void __init mackerel_init(void)
 	gpio_request(GPIO_FN_SDHID0_1, NULL);
 	gpio_request(GPIO_FN_SDHID0_0, NULL);
 
+	ret = request_irq(evt2irq(0x3340), mackerel_sdhi0_gpio_cd,
+			  IRQF_TRIGGER_FALLING, "sdhi0 cd", &sdhi0_device.dev);
+	if (!ret)
+		sdhi0_info.tmio_flags |= TMIO_MMC_HAS_COLD_CD;
+	else
+		pr_err("Cannot get IRQ #%d: %d\n", evt2irq(0x3340), ret);
+
 #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
 	/* enable SDHI1 */
 	gpio_request(GPIO_FN_SDHICMD1, NULL);

+ 7 - 0
arch/arm/mach-shmobile/clock-sh7372.c

@@ -509,6 +509,7 @@ enum { MSTP001,
        MSTP118, MSTP117, MSTP116, MSTP113,
        MSTP106, MSTP101, MSTP100,
        MSTP223,
+       MSTP218, MSTP217, MSTP216,
        MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
        MSTP329, MSTP328, MSTP323, MSTP322, MSTP314, MSTP313, MSTP312,
        MSTP423, MSTP415, MSTP413, MSTP411, MSTP410, MSTP406, MSTP403,
@@ -534,6 +535,9 @@ static struct clk mstp_clks[MSTP_NR] = {
 	[MSTP101] = MSTP(&div4_clks[DIV4_M1], SMSTPCR1, 1, 0), /* VPU */
 	[MSTP100] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */
 	[MSTP223] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR2, 23, 0), /* SPU2 */
+	[MSTP218] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* DMAC1 */
+	[MSTP217] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 17, 0), /* DMAC2 */
+	[MSTP216] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 16, 0), /* DMAC3 */
 	[MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */
 	[MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */
 	[MSTP204] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */
@@ -626,6 +630,9 @@ static struct clk_lookup lookups[] = {
 	CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */
 	CLKDEV_DEV_ID("uio_pdrv_genirq.6", &mstp_clks[MSTP223]), /* SPU2DSP0 */
 	CLKDEV_DEV_ID("uio_pdrv_genirq.7", &mstp_clks[MSTP223]), /* SPU2DSP1 */
+	CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]), /* DMAC1 */
+	CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[MSTP217]), /* DMAC2 */
+	CLKDEV_DEV_ID("sh-dma-engine.2", &mstp_clks[MSTP216]), /* DMAC3 */
 	CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */
 	CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP206]), /* SCIFB */
 	CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */

+ 3 - 1
arch/arm/mach-tegra/board-harmony-power.c

@@ -24,6 +24,8 @@
 
 #include <mach/irqs.h>
 
+#include "board-harmony.h"
+
 #define PMC_CTRL		0x0
 #define PMC_CTRL_INTR_LOW	(1 << 17)
 
@@ -98,7 +100,7 @@ static struct tps6586x_platform_data tps_platform = {
 	.irq_base	= TEGRA_NR_IRQS,
 	.num_subdevs	= ARRAY_SIZE(tps_devs),
 	.subdevs	= tps_devs,
-	.gpio_base	= TEGRA_NR_GPIOS,
+	.gpio_base	= HARMONY_GPIO_TPS6586X(0),
 };
 
 static struct i2c_board_info __initdata harmony_regulators[] = {

+ 2 - 1
arch/arm/mach-tegra/board-harmony.h

@@ -17,7 +17,8 @@
 #ifndef _MACH_TEGRA_BOARD_HARMONY_H
 #define _MACH_TEGRA_BOARD_HARMONY_H
 
-#define HARMONY_GPIO_WM8903(_x_)	(TEGRA_NR_GPIOS + (_x_))
+#define HARMONY_GPIO_TPS6586X(_x_)	(TEGRA_NR_GPIOS + (_x_))
+#define HARMONY_GPIO_WM8903(_x_)	(HARMONY_GPIO_TPS6586X(4) + (_x_))
 
 #define TEGRA_GPIO_SD2_CD		TEGRA_GPIO_PI5
 #define TEGRA_GPIO_SD2_WP		TEGRA_GPIO_PH1

+ 1 - 1
arch/blackfin/lib/strncpy.S

@@ -25,7 +25,7 @@
 
 ENTRY(_strncpy)
 	CC = R2 == 0;
-	if CC JUMP 4f;
+	if CC JUMP 6f;
 
 	P2 = R2 ;       /* size */
 	P0 = R0 ;       /* dst*/

+ 1 - 1
arch/sh/Kconfig

@@ -161,7 +161,7 @@ config ARCH_HAS_CPU_IDLE_WAIT
 
 config NO_IOPORT
 	def_bool !PCI
-	depends on !SH_CAYMAN && !SH_SH4202_MICRODEV
+	depends on !SH_CAYMAN && !SH_SH4202_MICRODEV && !SH_SHMIN
 
 config IO_TRAPPED
 	bool

+ 13 - 19
arch/sh/boards/mach-ap325rxa/setup.c

@@ -359,37 +359,31 @@ static struct soc_camera_link camera_link = {
 	.priv		= &camera_info,
 };
 
-static void dummy_release(struct device *dev)
+static struct platform_device *camera_device;
+
+static void ap325rxa_camera_release(struct device *dev)
 {
+	soc_camera_platform_release(&camera_device);
 }
 
-static struct platform_device camera_device = {
-	.name		= "soc_camera_platform",
-	.dev		= {
-		.platform_data	= &camera_info,
-		.release	= dummy_release,
-	},
-};
-
 static int ap325rxa_camera_add(struct soc_camera_link *icl,
 			       struct device *dev)
 {
-	if (icl != &camera_link || camera_probe() <= 0)
-		return -ENODEV;
+	int ret = soc_camera_platform_add(icl, dev, &camera_device, &camera_link,
+					  ap325rxa_camera_release, 0);
+	if (ret < 0)
+		return ret;
 
-	camera_info.dev = dev;
+	ret = camera_probe();
+	if (ret < 0)
+		soc_camera_platform_del(icl, camera_device, &camera_link);
 
-	return platform_device_register(&camera_device);
+	return ret;
 }
 
 static void ap325rxa_camera_del(struct soc_camera_link *icl)
 {
-	if (icl != &camera_link)
-		return;
-
-	platform_device_unregister(&camera_device);
-	memset(&camera_device.dev.kobj, 0,
-	       sizeof(camera_device.dev.kobj));
+	soc_camera_platform_del(icl, camera_device, &camera_link);
 }
 #endif /* CONFIG_I2C */
 

+ 3 - 0
arch/sh/boards/mach-ecovec24/setup.c

@@ -885,6 +885,9 @@ static struct platform_device sh_mmcif_device = {
 	},
 	.num_resources	= ARRAY_SIZE(sh_mmcif_resources),
 	.resource	= sh_mmcif_resources,
+	.archdata = {
+		.hwblk_id = HWBLK_MMC,
+	},
 };
 #endif
 

+ 1 - 0
arch/sh/include/asm/pgtable.h

@@ -18,6 +18,7 @@
 #include <asm/pgtable-2level.h>
 #endif
 #include <asm/page.h>
+#include <asm/mmu.h>
 
 #ifndef __ASSEMBLY__
 #include <asm/addrspace.h>

+ 4 - 2
arch/sh/include/asm/ptrace.h

@@ -41,7 +41,9 @@
 
 #define user_mode(regs)			(((regs)->sr & 0x40000000)==0)
 #define kernel_stack_pointer(_regs)	((unsigned long)(_regs)->regs[15])
-#define GET_USP(regs) ((regs)->regs[15])
+
+#define GET_FP(regs)	((regs)->regs[14])
+#define GET_USP(regs)	((regs)->regs[15])
 
 extern void show_regs(struct pt_regs *);
 
@@ -131,7 +133,7 @@ extern void ptrace_triggered(struct perf_event *bp, int nmi,
 
 static inline unsigned long profile_pc(struct pt_regs *regs)
 {
-	unsigned long pc = instruction_pointer(regs);
+	unsigned long pc = regs->pc;
 
 	if (virt_addr_uncached(pc))
 		return CAC_ADDR(pc);

+ 1 - 0
arch/sh/include/asm/tlb.h

@@ -9,6 +9,7 @@
 #include <linux/pagemap.h>
 
 #ifdef CONFIG_MMU
+#include <linux/swap.h>
 #include <asm/pgalloc.h>
 #include <asm/tlbflush.h>
 #include <asm/mmu_context.h>

+ 1 - 0
arch/sh/include/cpu-sh4/cpu/sh7722.h

@@ -236,6 +236,7 @@ enum {
 };
 
 enum {
+	SHDMA_SLAVE_INVALID,
 	SHDMA_SLAVE_SCIF0_TX,
 	SHDMA_SLAVE_SCIF0_RX,
 	SHDMA_SLAVE_SCIF1_TX,

+ 1 - 0
arch/sh/include/cpu-sh4/cpu/sh7724.h

@@ -285,6 +285,7 @@ enum {
 };
 
 enum {
+	SHDMA_SLAVE_INVALID,
 	SHDMA_SLAVE_SCIF0_TX,
 	SHDMA_SLAVE_SCIF0_RX,
 	SHDMA_SLAVE_SCIF1_TX,

+ 1 - 0
arch/sh/include/cpu-sh4/cpu/sh7757.h

@@ -252,6 +252,7 @@ enum {
 };
 
 enum {
+	SHDMA_SLAVE_INVALID,
 	SHDMA_SLAVE_SDHI_TX,
 	SHDMA_SLAVE_SDHI_RX,
 	SHDMA_SLAVE_MMCIF_TX,

+ 1 - 0
arch/sh/kernel/process_32.c

@@ -21,6 +21,7 @@
 #include <linux/fs.h>
 #include <linux/ftrace.h>
 #include <linux/hw_breakpoint.h>
+#include <linux/prefetch.h>
 #include <asm/uaccess.h>
 #include <asm/mmu_context.h>
 #include <asm/system.h>

+ 1 - 1
arch/sh/mm/consistent.c

@@ -82,7 +82,7 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 	void *addr;
 
 	addr = __in_29bit_mode() ?
-	       (void *)P1SEGADDR((unsigned long)vaddr) : vaddr;
+	       (void *)CAC_ADDR((unsigned long)vaddr) : vaddr;
 
 	switch (direction) {
 	case DMA_FROM_DEVICE:		/* invalidate only */

+ 2 - 2
block/blk-ioc.c

@@ -21,7 +21,7 @@ static void cfq_dtor(struct io_context *ioc)
 	if (!hlist_empty(&ioc->cic_list)) {
 		struct cfq_io_context *cic;
 
-		cic = list_entry(ioc->cic_list.first, struct cfq_io_context,
+		cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context,
 								cic_list);
 		cic->dtor(ioc);
 	}
@@ -57,7 +57,7 @@ static void cfq_exit(struct io_context *ioc)
 	if (!hlist_empty(&ioc->cic_list)) {
 		struct cfq_io_context *cic;
 
-		cic = list_entry(ioc->cic_list.first, struct cfq_io_context,
+		cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context,
 								cic_list);
 		cic->exit(ioc);
 	}

+ 4 - 7
block/cfq-iosched.c

@@ -185,7 +185,7 @@ struct cfq_group {
 	int nr_cfqq;
 
 	/*
-	 * Per group busy queus average. Useful for workload slice calc. We
+	 * Per group busy queues average. Useful for workload slice calc. We
 	 * create the array for each prio class but at run time it is used
 	 * only for RT and BE class and slot for IDLE class remains unused.
 	 * This is primarily done to avoid confusion and a gcc warning.
@@ -369,16 +369,16 @@ CFQ_CFQQ_FNS(wait_busy);
 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)	\
 	blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
 			cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
-			blkg_path(&(cfqq)->cfqg->blkg), ##args);
+			blkg_path(&(cfqq)->cfqg->blkg), ##args)
 
 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)				\
 	blk_add_trace_msg((cfqd)->queue, "%s " fmt,			\
-				blkg_path(&(cfqg)->blkg), ##args);      \
+				blkg_path(&(cfqg)->blkg), ##args)       \
 
 #else
 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)	\
 	blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
-#define cfq_log_cfqg(cfqd, cfqg, fmt, args...)		do {} while (0);
+#define cfq_log_cfqg(cfqd, cfqg, fmt, args...)		do {} while (0)
 #endif
 #define cfq_log(cfqd, fmt, args...)	\
 	blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
@@ -3786,9 +3786,6 @@ new_queue:
 	return 0;
 
 queue_fail:
-	if (cic)
-		put_io_context(cic->ioc);
-
 	cfq_schedule_dispatch(cfqd);
 	spin_unlock_irqrestore(q->queue_lock, flags);
 	cfq_log(cfqd, "set_request fail");

+ 20 - 2
drivers/block/nbd.c

@@ -192,7 +192,8 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
 			if (lo->xmit_timeout)
 				del_timer_sync(&ti);
 		} else
-			result = kernel_recvmsg(sock, &msg, &iov, 1, size, 0);
+			result = kernel_recvmsg(sock, &msg, &iov, 1, size,
+						msg.msg_flags);
 
 		if (signal_pending(current)) {
 			siginfo_t info;
@@ -753,9 +754,26 @@ static int __init nbd_init(void)
 		return -ENOMEM;
 
 	part_shift = 0;
-	if (max_part > 0)
+	if (max_part > 0) {
 		part_shift = fls(max_part);
 
+		/*
+		 * Adjust max_part according to part_shift as it is exported
+		 * to user space so that user can know the max number of
+		 * partition kernel should be able to manage.
+		 *
+		 * Note that -1 is required because partition 0 is reserved
+		 * for the whole disk.
+		 */
+		max_part = (1UL << part_shift) - 1;
+	}
+
+	if ((1UL << part_shift) > DISK_MAX_PARTS)
+		return -EINVAL;
+
+	if (nbds_max > 1UL << (MINORBITS - part_shift))
+		return -EINVAL;
+
 	for (i = 0; i < nbds_max; i++) {
 		struct gendisk *disk = alloc_disk(1 << part_shift);
 		if (!disk)

+ 6 - 4
drivers/block/xen-blkback/blkback.c

@@ -809,11 +809,13 @@ static int __init xen_blkif_init(void)
  failed_init:
 	kfree(blkbk->pending_reqs);
 	kfree(blkbk->pending_grant_handles);
-	for (i = 0; i < mmap_pages; i++) {
-		if (blkbk->pending_pages[i])
-			__free_page(blkbk->pending_pages[i]);
+	if (blkbk->pending_pages) {
+		for (i = 0; i < mmap_pages; i++) {
+			if (blkbk->pending_pages[i])
+				__free_page(blkbk->pending_pages[i]);
+		}
+		kfree(blkbk->pending_pages);
 	}
-	kfree(blkbk->pending_pages);
 	kfree(blkbk);
 	blkbk = NULL;
 	return rc;

+ 1 - 2
drivers/block/xen-blkback/xenbus.c

@@ -357,14 +357,13 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
 	}
 
 	vbd->bdev = bdev;
-	vbd->size = vbd_sz(vbd);
-
 	if (vbd->bdev->bd_disk == NULL) {
 		DPRINTK("xen_vbd_create: device %08x doesn't exist.\n",
 			vbd->pdevice);
 		xen_vbd_free(vbd);
 		return -ENOENT;
 	}
+	vbd->size = vbd_sz(vbd);
 
 	if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom)
 		vbd->type |= VDISK_CDROM;

+ 6 - 11
drivers/bluetooth/hci_ldisc.c

@@ -355,29 +355,24 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty)
  *             flags        pointer to flags for data
  *             count        count of received data in bytes
  *     
- * Return Value:    Number of bytes received
+ * Return Value:    None
  */
-static unsigned int hci_uart_tty_receive(struct tty_struct *tty,
-		const u8 *data, char *flags, int count)
+static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, char *flags, int count)
 {
 	struct hci_uart *hu = (void *)tty->disc_data;
-	int received;
 
 	if (!hu || tty != hu->tty)
-		return -ENODEV;
+		return;
 
 	if (!test_bit(HCI_UART_PROTO_SET, &hu->flags))
-		return -EINVAL;
+		return;
 
 	spin_lock(&hu->rx_lock);
-	received = hu->proto->recv(hu, (void *) data, count);
-	if (received > 0)
-		hu->hdev->stat.byte_rx += received;
+	hu->proto->recv(hu, (void *) data, count);
+	hu->hdev->stat.byte_rx += count;
 	spin_unlock(&hu->rx_lock);
 
 	tty_unthrottle(tty);
-
-	return received;
 }
 
 static int hci_uart_register_dev(struct hci_uart *hu)

+ 2 - 10
drivers/clocksource/sh_cmt.c

@@ -24,7 +24,6 @@
 #include <linux/ioport.h>
 #include <linux/io.h>
 #include <linux/clk.h>
-#include <linux/pm_runtime.h>
 #include <linux/irq.h>
 #include <linux/err.h>
 #include <linux/clocksource.h>
@@ -153,12 +152,10 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
 {
 	int ret;
 
-	/* wake up device and enable clock */
-	pm_runtime_get_sync(&p->pdev->dev);
+	/* enable clock */
 	ret = clk_enable(p->clk);
 	if (ret) {
 		dev_err(&p->pdev->dev, "cannot enable clock\n");
-		pm_runtime_put_sync(&p->pdev->dev);
 		return ret;
 	}
 
@@ -190,9 +187,8 @@ static void sh_cmt_disable(struct sh_cmt_priv *p)
 	/* disable interrupts in CMT block */
 	sh_cmt_write(p, CMCSR, 0);
 
-	/* stop clock and mark device as idle */
+	/* stop clock */
 	clk_disable(p->clk);
-	pm_runtime_put_sync(&p->pdev->dev);
 }
 
 /* private flags */
@@ -664,7 +660,6 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev)
 
 	if (p) {
 		dev_info(&pdev->dev, "kept as earlytimer\n");
-		pm_runtime_enable(&pdev->dev);
 		return 0;
 	}
 
@@ -679,9 +674,6 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev)
 		kfree(p);
 		platform_set_drvdata(pdev, NULL);
 	}
-
-	if (!is_early_platform_device(pdev))
-		pm_runtime_enable(&pdev->dev);
 	return ret;
 }
 

+ 2 - 10
drivers/clocksource/sh_tmu.c

@@ -25,7 +25,6 @@
 #include <linux/delay.h>
 #include <linux/io.h>
 #include <linux/clk.h>
-#include <linux/pm_runtime.h>
 #include <linux/irq.h>
 #include <linux/err.h>
 #include <linux/clocksource.h>
@@ -110,12 +109,10 @@ static int sh_tmu_enable(struct sh_tmu_priv *p)
 {
 	int ret;
 
-	/* wake up device and enable clock */
-	pm_runtime_get_sync(&p->pdev->dev);
+	/* enable clock */
 	ret = clk_enable(p->clk);
 	if (ret) {
 		dev_err(&p->pdev->dev, "cannot enable clock\n");
-		pm_runtime_put_sync(&p->pdev->dev);
 		return ret;
 	}
 
@@ -144,9 +141,8 @@ static void sh_tmu_disable(struct sh_tmu_priv *p)
 	/* disable interrupts in TMU block */
 	sh_tmu_write(p, TCR, 0x0000);
 
-	/* stop clock and mark device as idle */
+	/* stop clock */
 	clk_disable(p->clk);
-	pm_runtime_put_sync(&p->pdev->dev);
 }
 
 static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta,
@@ -415,7 +411,6 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev)
 
 	if (p) {
 		dev_info(&pdev->dev, "kept as earlytimer\n");
-		pm_runtime_enable(&pdev->dev);
 		return 0;
 	}
 
@@ -430,9 +425,6 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev)
 		kfree(p);
 		platform_set_drvdata(pdev, NULL);
 	}
-
-	if (!is_early_platform_device(pdev))
-		pm_runtime_enable(&pdev->dev);
 	return ret;
 }
 

+ 7 - 0
drivers/dma/Kconfig

@@ -237,6 +237,13 @@ config MXS_DMA
 	  Support the MXS DMA engine. This engine including APBH-DMA
 	  and APBX-DMA is integrated into Freescale i.MX23/28 chips.
 
+config EP93XX_DMA
+	bool "Cirrus Logic EP93xx DMA support"
+	depends on ARCH_EP93XX
+	select DMA_ENGINE
+	help
+	  Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller.
+
 config DMA_ENGINE
 	bool
 

+ 1 - 0
drivers/dma/Makefile

@@ -25,3 +25,4 @@ obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
 obj-$(CONFIG_PL330_DMA) += pl330.o
 obj-$(CONFIG_PCH_DMA) += pch_dma.o
 obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
+obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o

+ 1355 - 0
drivers/dma/ep93xx_dma.c

@@ -0,0 +1,1355 @@
+/*
+ * Driver for the Cirrus Logic EP93xx DMA Controller
+ *
+ * Copyright (C) 2011 Mika Westerberg
+ *
+ * DMA M2P implementation is based on the original
+ * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
+ *
+ *   Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
+ *   Copyright (C) 2006 Applied Data Systems
+ *   Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
+ *
+ * This driver is based on dw_dmac and amba-pl08x drivers.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <mach/dma.h>
+
+/* M2P registers */
+#define M2P_CONTROL			0x0000
+#define M2P_CONTROL_STALLINT		BIT(0)
+#define M2P_CONTROL_NFBINT		BIT(1)
+#define M2P_CONTROL_CH_ERROR_INT	BIT(3)
+#define M2P_CONTROL_ENABLE		BIT(4)
+#define M2P_CONTROL_ICE			BIT(6)
+
+#define M2P_INTERRUPT			0x0004
+#define M2P_INTERRUPT_STALL		BIT(0)
+#define M2P_INTERRUPT_NFB		BIT(1)
+#define M2P_INTERRUPT_ERROR		BIT(3)
+
+#define M2P_PPALLOC			0x0008
+#define M2P_STATUS			0x000c
+
+#define M2P_MAXCNT0			0x0020
+#define M2P_BASE0			0x0024
+#define M2P_MAXCNT1			0x0030
+#define M2P_BASE1			0x0034
+
+#define M2P_STATE_IDLE			0
+#define M2P_STATE_STALL			1
+#define M2P_STATE_ON			2
+#define M2P_STATE_NEXT			3
+
+/* M2M registers */
+#define M2M_CONTROL			0x0000
+#define M2M_CONTROL_DONEINT		BIT(2)
+#define M2M_CONTROL_ENABLE		BIT(3)
+#define M2M_CONTROL_START		BIT(4)
+#define M2M_CONTROL_DAH			BIT(11)
+#define M2M_CONTROL_SAH			BIT(12)
+#define M2M_CONTROL_PW_SHIFT		9
+#define M2M_CONTROL_PW_8		(0 << M2M_CONTROL_PW_SHIFT)
+#define M2M_CONTROL_PW_16		(1 << M2M_CONTROL_PW_SHIFT)
+#define M2M_CONTROL_PW_32		(2 << M2M_CONTROL_PW_SHIFT)
+#define M2M_CONTROL_PW_MASK		(3 << M2M_CONTROL_PW_SHIFT)
+#define M2M_CONTROL_TM_SHIFT		13
+#define M2M_CONTROL_TM_TX		(1 << M2M_CONTROL_TM_SHIFT)
+#define M2M_CONTROL_TM_RX		(2 << M2M_CONTROL_TM_SHIFT)
+#define M2M_CONTROL_RSS_SHIFT		22
+#define M2M_CONTROL_RSS_SSPRX		(1 << M2M_CONTROL_RSS_SHIFT)
+#define M2M_CONTROL_RSS_SSPTX		(2 << M2M_CONTROL_RSS_SHIFT)
+#define M2M_CONTROL_RSS_IDE		(3 << M2M_CONTROL_RSS_SHIFT)
+#define M2M_CONTROL_NO_HDSK		BIT(24)
+#define M2M_CONTROL_PWSC_SHIFT		25
+
+#define M2M_INTERRUPT			0x0004
+#define M2M_INTERRUPT_DONEINT		BIT(1)
+
+#define M2M_BCR0			0x0010
+#define M2M_BCR1			0x0014
+#define M2M_SAR_BASE0			0x0018
+#define M2M_SAR_BASE1			0x001c
+#define M2M_DAR_BASE0			0x002c
+#define M2M_DAR_BASE1			0x0030
+
+#define DMA_MAX_CHAN_BYTES		0xffff
+#define DMA_MAX_CHAN_DESCRIPTORS	32
+
+struct ep93xx_dma_engine;
+
+/**
+ * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
+ * @src_addr: source address of the transaction
+ * @dst_addr: destination address of the transaction
+ * @size: size of the transaction (in bytes)
+ * @complete: this descriptor is completed
+ * @txd: dmaengine API descriptor
+ * @tx_list: list of linked descriptors
+ * @node: link used for putting this into a channel queue
+ */
+struct ep93xx_dma_desc {
+	u32				src_addr;
+	u32				dst_addr;
+	size_t				size;
+	bool				complete;
+	struct dma_async_tx_descriptor	txd;
+	struct list_head		tx_list;
+	struct list_head		node;
+};
+
+/**
+ * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
+ * @chan: dmaengine API channel
+ * @edma: pointer to to the engine device
+ * @regs: memory mapped registers
+ * @irq: interrupt number of the channel
+ * @clk: clock used by this channel
+ * @tasklet: channel specific tasklet used for callbacks
+ * @lock: lock protecting the fields following
+ * @flags: flags for the channel
+ * @buffer: which buffer to use next (0/1)
+ * @last_completed: last completed cookie value
+ * @active: flattened chain of descriptors currently being processed
+ * @queue: pending descriptors which are handled next
+ * @free_list: list of free descriptors which can be used
+ * @runtime_addr: physical address currently used as dest/src (M2M only). This
+ *                is set via %DMA_SLAVE_CONFIG before slave operation is
+ *                prepared
+ * @runtime_ctrl: M2M runtime values for the control register.
+ *
+ * As EP93xx DMA controller doesn't support real chained DMA descriptors we
+ * will have slightly different scheme here: @active points to a head of
+ * flattened DMA descriptor chain.
+ *
+ * @queue holds pending transactions. These are linked through the first
+ * descriptor in the chain. When a descriptor is moved to the @active queue,
+ * the first and chained descriptors are flattened into a single list.
+ *
+ * @chan.private holds pointer to &struct ep93xx_dma_data which contains
+ * necessary channel configuration information. For memcpy channels this must
+ * be %NULL.
+ */
+struct ep93xx_dma_chan {
+	struct dma_chan			chan;
+	const struct ep93xx_dma_engine	*edma;
+	void __iomem			*regs;
+	int				irq;
+	struct clk			*clk;
+	struct tasklet_struct		tasklet;
+	/* protects the fields following */
+	spinlock_t			lock;
+	unsigned long			flags;
+/* Channel is configured for cyclic transfers */
+#define EP93XX_DMA_IS_CYCLIC		0
+
+	int				buffer;
+	dma_cookie_t			last_completed;
+	struct list_head		active;
+	struct list_head		queue;
+	struct list_head		free_list;
+	u32				runtime_addr;
+	u32				runtime_ctrl;
+};
+
+/**
+ * struct ep93xx_dma_engine - the EP93xx DMA engine instance
+ * @dma_dev: holds the dmaengine device
+ * @m2m: is this an M2M or M2P device
+ * @hw_setup: method which sets the channel up for operation
+ * @hw_shutdown: shuts the channel down and flushes whatever is left
+ * @hw_submit: pushes active descriptor(s) to the hardware
+ * @hw_interrupt: handle the interrupt
+ * @num_channels: number of channels for this instance
+ * @channels: array of channels
+ *
+ * There is one instance of this struct for the M2P channels and one for the
+ * M2M channels. hw_xxx() methods are used to perform operations which are
+ * different on M2M and M2P channels. These methods are called with channel
+ * lock held and interrupts disabled so they cannot sleep.
+ */
+struct ep93xx_dma_engine {
+	struct dma_device	dma_dev;
+	bool			m2m;
+	int			(*hw_setup)(struct ep93xx_dma_chan *);
+	void			(*hw_shutdown)(struct ep93xx_dma_chan *);
+	void			(*hw_submit)(struct ep93xx_dma_chan *);
+	int			(*hw_interrupt)(struct ep93xx_dma_chan *);
+#define INTERRUPT_UNKNOWN	0
+#define INTERRUPT_DONE		1
+#define INTERRUPT_NEXT_BUFFER	2
+
+	size_t			num_channels;
+	struct ep93xx_dma_chan	channels[];
+};
+
+static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
+{
+	return &edmac->chan.dev->device;
+}
+
+static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
+{
+	return container_of(chan, struct ep93xx_dma_chan, chan);
+}
+
+/**
+ * ep93xx_dma_set_active - set new active descriptor chain
+ * @edmac: channel
+ * @desc: head of the new active descriptor chain
+ *
+ * Sets @desc to be the head of the new active descriptor chain. This is the
+ * chain which is processed next. The active list must be empty before calling
+ * this function.
+ *
+ * Called with @edmac->lock held and interrupts disabled.
+ */
+static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
+				  struct ep93xx_dma_desc *desc)
+{
+	BUG_ON(!list_empty(&edmac->active));
+
+	list_add_tail(&desc->node, &edmac->active);
+
+	/* Flatten the @desc->tx_list chain into @edmac->active list */
+	while (!list_empty(&desc->tx_list)) {
+		struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
+			struct ep93xx_dma_desc, node);
+
+		/*
+		 * We copy the callback parameters from the first descriptor
+		 * to all the chained descriptors. This way we can call the
+		 * callback without having to find out the first descriptor in
+		 * the chain. Useful for cyclic transfers.
+		 */
+		d->txd.callback = desc->txd.callback;
+		d->txd.callback_param = desc->txd.callback_param;
+
+		list_move_tail(&d->node, &edmac->active);
+	}
+}
+
+/* Called with @edmac->lock held and interrupts disabled */
+static struct ep93xx_dma_desc *
+ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
+{
+	return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node);
+}
+
+/**
+ * ep93xx_dma_advance_active - advances to the next active descriptor
+ * @edmac: channel
+ *
+ * Function advances active descriptor to the next in the @edmac->active and
+ * returns %true if we still have descriptors in the chain to process.
+ * Otherwise returns %false.
+ *
+ * When the channel is in cyclic mode always returns %true.
+ *
+ * Called with @edmac->lock held and interrupts disabled.
+ */
+static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
+{
+	list_rotate_left(&edmac->active);
+
+	if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
+		return true;
+
+	/*
+	 * If txd.cookie is set it means that we are back in the first
+	 * descriptor in the chain and hence done with it.
+	 */
+	return !ep93xx_dma_get_active(edmac)->txd.cookie;
+}
+
+/*
+ * M2P DMA implementation
+ */
+
+static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
+{
+	writel(control, edmac->regs + M2P_CONTROL);
+	/*
+	 * EP93xx User's Guide states that we must perform a dummy read after
+	 * write to the control register.
+	 */
+	readl(edmac->regs + M2P_CONTROL);
+}
+
+static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
+{
+	struct ep93xx_dma_data *data = edmac->chan.private;
+	u32 control;
+
+	writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
+
+	control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
+		| M2P_CONTROL_ENABLE;
+	m2p_set_control(edmac, control);
+
+	return 0;
+}
+
+static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
+{
+	return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
+}
+
+static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
+{
+	u32 control;
+
+	control = readl(edmac->regs + M2P_CONTROL);
+	control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
+	m2p_set_control(edmac, control);
+
+	while (m2p_channel_state(edmac) >= M2P_STATE_ON)
+		cpu_relax();
+
+	m2p_set_control(edmac, 0);
+
+	while (m2p_channel_state(edmac) == M2P_STATE_STALL)
+		cpu_relax();
+}
+
+static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
+{
+	struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
+	u32 bus_addr;
+
+	if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_TO_DEVICE)
+		bus_addr = desc->src_addr;
+	else
+		bus_addr = desc->dst_addr;
+
+	if (edmac->buffer == 0) {
+		writel(desc->size, edmac->regs + M2P_MAXCNT0);
+		writel(bus_addr, edmac->regs + M2P_BASE0);
+	} else {
+		writel(desc->size, edmac->regs + M2P_MAXCNT1);
+		writel(bus_addr, edmac->regs + M2P_BASE1);
+	}
+
+	edmac->buffer ^= 1;
+}
+
+static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
+{
+	u32 control = readl(edmac->regs + M2P_CONTROL);
+
+	m2p_fill_desc(edmac);
+	control |= M2P_CONTROL_STALLINT;
+
+	if (ep93xx_dma_advance_active(edmac)) {
+		m2p_fill_desc(edmac);
+		control |= M2P_CONTROL_NFBINT;
+	}
+
+	m2p_set_control(edmac, control);
+}
+
+static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
+{
+	u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
+	u32 control;
+
+	if (irq_status & M2P_INTERRUPT_ERROR) {
+		struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
+
+		/* Clear the error interrupt */
+		writel(1, edmac->regs + M2P_INTERRUPT);
+
+		/*
+		 * It seems that there is no easy way of reporting errors back
+		 * to client so we just report the error here and continue as
+		 * usual.
+		 *
+		 * Revisit this when there is a mechanism to report back the
+		 * errors.
+		 */
+		dev_err(chan2dev(edmac),
+			"DMA transfer failed! Details:\n"
+			"\tcookie	: %d\n"
+			"\tsrc_addr	: 0x%08x\n"
+			"\tdst_addr	: 0x%08x\n"
+			"\tsize		: %zu\n",
+			desc->txd.cookie, desc->src_addr, desc->dst_addr,
+			desc->size);
+	}
+
+	switch (irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) {
+	case M2P_INTERRUPT_STALL:
+		/* Disable interrupts */
+		control = readl(edmac->regs + M2P_CONTROL);
+		control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
+		m2p_set_control(edmac, control);
+
+		return INTERRUPT_DONE;
+
+	case M2P_INTERRUPT_NFB:
+		if (ep93xx_dma_advance_active(edmac))
+			m2p_fill_desc(edmac);
+
+		return INTERRUPT_NEXT_BUFFER;
+	}
+
+	return INTERRUPT_UNKNOWN;
+}
+
+/*
+ * M2M DMA implementation
+ *
+ * For the M2M transfers we don't use NFB at all. This is because it simply
+ * doesn't work well with memcpy transfers. When you submit both buffers it is
+ * extremely unlikely that you get an NFB interrupt, but it instead reports
+ * DONE interrupt and both buffers are already transferred which means that we
+ * weren't able to update the next buffer.
+ *
+ * So for now we "simulate" NFB by just submitting buffer after buffer
+ * without double buffering.
+ */
+
+static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
+{
+	const struct ep93xx_dma_data *data = edmac->chan.private;
+	u32 control = 0;
+
+	if (!data) {
+		/* This is memcpy channel, nothing to configure */
+		writel(control, edmac->regs + M2M_CONTROL);
+		return 0;
+	}
+
+	switch (data->port) {
+	case EP93XX_DMA_SSP:
+		/*
+		 * This was found via experimenting - anything less than 5
+		 * causes the channel to perform only a partial transfer which
+		 * leads to problems since we don't get DONE interrupt then.
+		 */
+		control = (5 << M2M_CONTROL_PWSC_SHIFT);
+		control |= M2M_CONTROL_NO_HDSK;
+
+		if (data->direction == DMA_TO_DEVICE) {
+			control |= M2M_CONTROL_DAH;
+			control |= M2M_CONTROL_TM_TX;
+			control |= M2M_CONTROL_RSS_SSPTX;
+		} else {
+			control |= M2M_CONTROL_SAH;
+			control |= M2M_CONTROL_TM_RX;
+			control |= M2M_CONTROL_RSS_SSPRX;
+		}
+		break;
+
+	case EP93XX_DMA_IDE:
+		/*
+		 * This IDE part is totally untested. Values below are taken
+		 * from the EP93xx Users's Guide and might not be correct.
+		 */
+		control |= M2M_CONTROL_NO_HDSK;
+		control |= M2M_CONTROL_RSS_IDE;
+		control |= M2M_CONTROL_PW_16;
+
+		if (data->direction == DMA_TO_DEVICE) {
+			/* Worst case from the UG */
+			control = (3 << M2M_CONTROL_PWSC_SHIFT);
+			control |= M2M_CONTROL_DAH;
+			control |= M2M_CONTROL_TM_TX;
+		} else {
+			control = (2 << M2M_CONTROL_PWSC_SHIFT);
+			control |= M2M_CONTROL_SAH;
+			control |= M2M_CONTROL_TM_RX;
+		}
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	writel(control, edmac->regs + M2M_CONTROL);
+	return 0;
+}
+
+static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
+{
+	/* Just disable the channel */
+	writel(0, edmac->regs + M2M_CONTROL);
+}
+
+static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
+{
+	struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
+
+	if (edmac->buffer == 0) {
+		writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
+		writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
+		writel(desc->size, edmac->regs + M2M_BCR0);
+	} else {
+		writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
+		writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
+		writel(desc->size, edmac->regs + M2M_BCR1);
+	}
+
+	edmac->buffer ^= 1;
+}
+
+static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
+{
+	struct ep93xx_dma_data *data = edmac->chan.private;
+	u32 control = readl(edmac->regs + M2M_CONTROL);
+
+	/*
+	 * Since we allow clients to configure PW (peripheral width) we always
+	 * clear PW bits here and then set them according what is given in
+	 * the runtime configuration.
+	 */
+	control &= ~M2M_CONTROL_PW_MASK;
+	control |= edmac->runtime_ctrl;
+
+	m2m_fill_desc(edmac);
+	control |= M2M_CONTROL_DONEINT;
+
+	/*
+	 * Now we can finally enable the channel. For M2M channel this must be
+	 * done _after_ the BCRx registers are programmed.
+	 */
+	control |= M2M_CONTROL_ENABLE;
+	writel(control, edmac->regs + M2M_CONTROL);
+
+	if (!data) {
+		/*
+		 * For memcpy channels the software trigger must be asserted
+		 * in order to start the memcpy operation.
+		 */
+		control |= M2M_CONTROL_START;
+		writel(control, edmac->regs + M2M_CONTROL);
+	}
+}
+
+static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
+{
+	u32 control;
+
+	if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_DONEINT))
+		return INTERRUPT_UNKNOWN;
+
+	/* Clear the DONE bit */
+	writel(0, edmac->regs + M2M_INTERRUPT);
+
+	/* Disable interrupts and the channel */
+	control = readl(edmac->regs + M2M_CONTROL);
+	control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_ENABLE);
+	writel(control, edmac->regs + M2M_CONTROL);
+
+	/*
+	 * Since we only get DONE interrupt we have to find out ourselves
+	 * whether there still is something to process. So we try to advance
+	 * the chain an see whether it succeeds.
+	 */
+	if (ep93xx_dma_advance_active(edmac)) {
+		edmac->edma->hw_submit(edmac);
+		return INTERRUPT_NEXT_BUFFER;
+	}
+
+	return INTERRUPT_DONE;
+}
+
+/*
+ * DMA engine API implementation
+ */
+
+static struct ep93xx_dma_desc *
+ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
+{
+	struct ep93xx_dma_desc *desc, *_desc;
+	struct ep93xx_dma_desc *ret = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&edmac->lock, flags);
+	list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
+		if (async_tx_test_ack(&desc->txd)) {
+			list_del_init(&desc->node);
+
+			/* Re-initialize the descriptor */
+			desc->src_addr = 0;
+			desc->dst_addr = 0;
+			desc->size = 0;
+			desc->complete = false;
+			desc->txd.cookie = 0;
+			desc->txd.callback = NULL;
+			desc->txd.callback_param = NULL;
+
+			ret = desc;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&edmac->lock, flags);
+	return ret;
+}
+
+static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
+				struct ep93xx_dma_desc *desc)
+{
+	if (desc) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&edmac->lock, flags);
+		list_splice_init(&desc->tx_list, &edmac->free_list);
+		list_add(&desc->node, &edmac->free_list);
+		spin_unlock_irqrestore(&edmac->lock, flags);
+	}
+}
+
+/**
+ * ep93xx_dma_advance_work - start processing the next pending transaction
+ * @edmac: channel
+ *
+ * If we have pending transactions queued and we are currently idling, this
+ * function takes the next queued transaction from the @edmac->queue and
+ * pushes it to the hardware for execution.
+ */
+static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
+{
+	struct ep93xx_dma_desc *new;
+	unsigned long flags;
+
+	spin_lock_irqsave(&edmac->lock, flags);
+	if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
+		spin_unlock_irqrestore(&edmac->lock, flags);
+		return;
+	}
+
+	/* Take the next descriptor from the pending queue */
+	new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
+	list_del_init(&new->node);
+
+	ep93xx_dma_set_active(edmac, new);
+
+	/* Push it to the hardware */
+	edmac->edma->hw_submit(edmac);
+	spin_unlock_irqrestore(&edmac->lock, flags);
+}
+
+static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc *desc)
+{
+	struct device *dev = desc->txd.chan->device->dev;
+
+	if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+		if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+			dma_unmap_single(dev, desc->src_addr, desc->size,
+					 DMA_TO_DEVICE);
+		else
+			dma_unmap_page(dev, desc->src_addr, desc->size,
+				       DMA_TO_DEVICE);
+	}
+	if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+		if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+			dma_unmap_single(dev, desc->dst_addr, desc->size,
+					 DMA_FROM_DEVICE);
+		else
+			dma_unmap_page(dev, desc->dst_addr, desc->size,
+				       DMA_FROM_DEVICE);
+	}
+}
+
+static void ep93xx_dma_tasklet(unsigned long data)
+{
+	struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
+	struct ep93xx_dma_desc *desc, *d;
+	dma_async_tx_callback callback;
+	void *callback_param;
+	LIST_HEAD(list);
+
+	spin_lock_irq(&edmac->lock);
+	desc = ep93xx_dma_get_active(edmac);
+	if (desc->complete) {
+		edmac->last_completed = desc->txd.cookie;
+		list_splice_init(&edmac->active, &list);
+	}
+	spin_unlock_irq(&edmac->lock);
+
+	/* Pick up the next descriptor from the queue */
+	ep93xx_dma_advance_work(edmac);
+
+	callback = desc->txd.callback;
+	callback_param = desc->txd.callback_param;
+
+	/* Now we can release all the chained descriptors */
+	list_for_each_entry_safe(desc, d, &list, node) {
+		/*
+		 * For the memcpy channels the API requires us to unmap the
+		 * buffers unless requested otherwise.
+		 */
+		if (!edmac->chan.private)
+			ep93xx_dma_unmap_buffers(desc);
+
+		ep93xx_dma_desc_put(edmac, desc);
+	}
+
+	if (callback)
+		callback(callback_param);
+}
+
+static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
+{
+	struct ep93xx_dma_chan *edmac = dev_id;
+	irqreturn_t ret = IRQ_HANDLED;
+
+	spin_lock(&edmac->lock);
+
+	switch (edmac->edma->hw_interrupt(edmac)) {
+	case INTERRUPT_DONE:
+		ep93xx_dma_get_active(edmac)->complete = true;
+		tasklet_schedule(&edmac->tasklet);
+		break;
+
+	case INTERRUPT_NEXT_BUFFER:
+		if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
+			tasklet_schedule(&edmac->tasklet);
+		break;
+
+	default:
+		dev_warn(chan2dev(edmac), "unknown interrupt!\n");
+		ret = IRQ_NONE;
+		break;
+	}
+
+	spin_unlock(&edmac->lock);
+	return ret;
+}
+
+/**
+ * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
+ * @tx: descriptor to be executed
+ *
+ * Function will execute given descriptor on the hardware or if the hardware
+ * is busy, queue the descriptor to be executed later on. Returns cookie which
+ * can be used to poll the status of the descriptor.
+ */
+static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
+	struct ep93xx_dma_desc *desc;
+	dma_cookie_t cookie;
+	unsigned long flags;
+
+	spin_lock_irqsave(&edmac->lock, flags);
+
+	cookie = edmac->chan.cookie;
+
+	if (++cookie < 0)
+		cookie = 1;
+
+	desc = container_of(tx, struct ep93xx_dma_desc, txd);
+
+	edmac->chan.cookie = cookie;
+	desc->txd.cookie = cookie;
+
+	/*
+	 * If nothing is currently prosessed, we push this descriptor
+	 * directly to the hardware. Otherwise we put the descriptor
+	 * to the pending queue.
+	 */
+	if (list_empty(&edmac->active)) {
+		ep93xx_dma_set_active(edmac, desc);
+		edmac->edma->hw_submit(edmac);
+	} else {
+		list_add_tail(&desc->node, &edmac->queue);
+	}
+
+	spin_unlock_irqrestore(&edmac->lock, flags);
+	return cookie;
+}
+
+/**
+ * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
+ * @chan: channel to allocate resources
+ *
+ * Function allocates necessary resources for the given DMA channel and
+ * returns number of allocated descriptors for the channel. Negative errno
+ * is returned in case of failure.
+ */
+static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+	struct ep93xx_dma_data *data = chan->private;
+	const char *name = dma_chan_name(chan);
+	int ret, i;
+
+	/* Sanity check the channel parameters */
+	if (!edmac->edma->m2m) {
+		if (!data)
+			return -EINVAL;
+		if (data->port < EP93XX_DMA_I2S1 ||
+		    data->port > EP93XX_DMA_IRDA)
+			return -EINVAL;
+		if (data->direction != ep93xx_dma_chan_direction(chan))
+			return -EINVAL;
+	} else {
+		if (data) {
+			switch (data->port) {
+			case EP93XX_DMA_SSP:
+			case EP93XX_DMA_IDE:
+				if (data->direction != DMA_TO_DEVICE &&
+				    data->direction != DMA_FROM_DEVICE)
+					return -EINVAL;
+				break;
+			default:
+				return -EINVAL;
+			}
+		}
+	}
+
+	if (data && data->name)
+		name = data->name;
+
+	ret = clk_enable(edmac->clk);
+	if (ret)
+		return ret;
+
+	ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
+	if (ret)
+		goto fail_clk_disable;
+
+	spin_lock_irq(&edmac->lock);
+	edmac->last_completed = 1;
+	edmac->chan.cookie = 1;
+	ret = edmac->edma->hw_setup(edmac);
+	spin_unlock_irq(&edmac->lock);
+
+	if (ret)
+		goto fail_free_irq;
+
+	for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
+		struct ep93xx_dma_desc *desc;
+
+		desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+		if (!desc) {
+			dev_warn(chan2dev(edmac), "not enough descriptors\n");
+			break;
+		}
+
+		INIT_LIST_HEAD(&desc->tx_list);
+
+		dma_async_tx_descriptor_init(&desc->txd, chan);
+		desc->txd.flags = DMA_CTRL_ACK;
+		desc->txd.tx_submit = ep93xx_dma_tx_submit;
+
+		ep93xx_dma_desc_put(edmac, desc);
+	}
+
+	return i;
+
+fail_free_irq:
+	free_irq(edmac->irq, edmac);
+fail_clk_disable:
+	clk_disable(edmac->clk);
+
+	return ret;
+}
+
+/**
+ * ep93xx_dma_free_chan_resources - release resources for the channel
+ * @chan: channel
+ *
+ * Function releases all the resources allocated for the given channel.
+ * The channel must be idle when this is called.
+ */
+static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
+{
+	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+	struct ep93xx_dma_desc *desc, *d;
+	unsigned long flags;
+	LIST_HEAD(list);
+
+	BUG_ON(!list_empty(&edmac->active));
+	BUG_ON(!list_empty(&edmac->queue));
+
+	spin_lock_irqsave(&edmac->lock, flags);
+	edmac->edma->hw_shutdown(edmac);
+	edmac->runtime_addr = 0;
+	edmac->runtime_ctrl = 0;
+	edmac->buffer = 0;
+	list_splice_init(&edmac->free_list, &list);
+	spin_unlock_irqrestore(&edmac->lock, flags);
+
+	list_for_each_entry_safe(desc, d, &list, node)
+		kfree(desc);
+
+	clk_disable(edmac->clk);
+	free_irq(edmac->irq, edmac);
+}
+
+/**
+ * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
+ * @chan: channel
+ * @dest: destination bus address
+ * @src: source bus address
+ * @len: size of the transaction
+ * @flags: flags for the descriptor
+ *
+ * Returns a valid DMA descriptor or %NULL in case of failure.
+ */
+struct dma_async_tx_descriptor *
+ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
+			   dma_addr_t src, size_t len, unsigned long flags)
+{
+	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+	struct ep93xx_dma_desc *desc, *first;
+	size_t bytes, offset;
+
+	first = NULL;
+	for (offset = 0; offset < len; offset += bytes) {
+		desc = ep93xx_dma_desc_get(edmac);
+		if (!desc) {
+			dev_warn(chan2dev(edmac), "couln't get descriptor\n");
+			goto fail;
+		}
+
+		bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
+
+		desc->src_addr = src + offset;
+		desc->dst_addr = dest + offset;
+		desc->size = bytes;
+
+		if (!first)
+			first = desc;
+		else
+			list_add_tail(&desc->node, &first->tx_list);
+	}
+
+	first->txd.cookie = -EBUSY;
+	first->txd.flags = flags;
+
+	return &first->txd;
+fail:
+	ep93xx_dma_desc_put(edmac, first);
+	return NULL;
+}
+
+/**
+ * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
+ * @chan: channel
+ * @sgl: list of buffers to transfer
+ * @sg_len: number of entries in @sgl
+ * @dir: direction of tha DMA transfer
+ * @flags: flags for the descriptor
+ *
+ * Returns a valid DMA descriptor or %NULL in case of failure.
+ */
+static struct dma_async_tx_descriptor *
+ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+			 unsigned int sg_len, enum dma_data_direction dir,
+			 unsigned long flags)
+{
+	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+	struct ep93xx_dma_desc *desc, *first;
+	struct scatterlist *sg;
+	int i;
+
+	if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
+		dev_warn(chan2dev(edmac),
+			 "channel was configured with different direction\n");
+		return NULL;
+	}
+
+	if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
+		dev_warn(chan2dev(edmac),
+			 "channel is already used for cyclic transfers\n");
+		return NULL;
+	}
+
+	first = NULL;
+	for_each_sg(sgl, sg, sg_len, i) {
+		size_t sg_len = sg_dma_len(sg);
+
+		if (sg_len > DMA_MAX_CHAN_BYTES) {
+			dev_warn(chan2dev(edmac), "too big transfer size %d\n",
+				 sg_len);
+			goto fail;
+		}
+
+		desc = ep93xx_dma_desc_get(edmac);
+		if (!desc) {
+			dev_warn(chan2dev(edmac), "couln't get descriptor\n");
+			goto fail;
+		}
+
+		if (dir == DMA_TO_DEVICE) {
+			desc->src_addr = sg_dma_address(sg);
+			desc->dst_addr = edmac->runtime_addr;
+		} else {
+			desc->src_addr = edmac->runtime_addr;
+			desc->dst_addr = sg_dma_address(sg);
+		}
+		desc->size = sg_len;
+
+		if (!first)
+			first = desc;
+		else
+			list_add_tail(&desc->node, &first->tx_list);
+	}
+
+	first->txd.cookie = -EBUSY;
+	first->txd.flags = flags;
+
+	return &first->txd;
+
+fail:
+	ep93xx_dma_desc_put(edmac, first);
+	return NULL;
+}
+
+/**
+ * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
+ * @chan: channel
+ * @dma_addr: DMA mapped address of the buffer
+ * @buf_len: length of the buffer (in bytes)
+ * @period_len: lenght of a single period
+ * @dir: direction of the operation
+ *
+ * Prepares a descriptor for cyclic DMA operation. This means that once the
+ * descriptor is submitted, we will be submitting in a @period_len sized
+ * buffers and calling callback once the period has been elapsed. Transfer
+ * terminates only when client calls dmaengine_terminate_all() for this
+ * channel.
+ *
+ * Returns a valid DMA descriptor or %NULL in case of failure.
+ */
+static struct dma_async_tx_descriptor *
+ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
+			   size_t buf_len, size_t period_len,
+			   enum dma_data_direction dir)
+{
+	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+	struct ep93xx_dma_desc *desc, *first;
+	size_t offset = 0;
+
+	if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
+		dev_warn(chan2dev(edmac),
+			 "channel was configured with different direction\n");
+		return NULL;
+	}
+
+	if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
+		dev_warn(chan2dev(edmac),
+			 "channel is already used for cyclic transfers\n");
+		return NULL;
+	}
+
+	if (period_len > DMA_MAX_CHAN_BYTES) {
+		dev_warn(chan2dev(edmac), "too big period length %d\n",
+			 period_len);
+		return NULL;
+	}
+
+	/* Split the buffer into period size chunks */
+	first = NULL;
+	for (offset = 0; offset < buf_len; offset += period_len) {
+		desc = ep93xx_dma_desc_get(edmac);
+		if (!desc) {
+			dev_warn(chan2dev(edmac), "couln't get descriptor\n");
+			goto fail;
+		}
+
+		if (dir == DMA_TO_DEVICE) {
+			desc->src_addr = dma_addr + offset;
+			desc->dst_addr = edmac->runtime_addr;
+		} else {
+			desc->src_addr = edmac->runtime_addr;
+			desc->dst_addr = dma_addr + offset;
+		}
+
+		desc->size = period_len;
+
+		if (!first)
+			first = desc;
+		else
+			list_add_tail(&desc->node, &first->tx_list);
+	}
+
+	first->txd.cookie = -EBUSY;
+
+	return &first->txd;
+
+fail:
+	ep93xx_dma_desc_put(edmac, first);
+	return NULL;
+}
+
+/**
+ * ep93xx_dma_terminate_all - terminate all transactions
+ * @edmac: channel
+ *
+ * Stops all DMA transactions. All descriptors are put back to the
+ * @edmac->free_list and callbacks are _not_ called.
+ */
+static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac)
+{
+	struct ep93xx_dma_desc *desc, *_d;
+	unsigned long flags;
+	LIST_HEAD(list);
+
+	spin_lock_irqsave(&edmac->lock, flags);
+	/* First we disable and flush the DMA channel */
+	edmac->edma->hw_shutdown(edmac);
+	clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
+	list_splice_init(&edmac->active, &list);
+	list_splice_init(&edmac->queue, &list);
+	/*
+	 * We then re-enable the channel. This way we can continue submitting
+	 * the descriptors by just calling ->hw_submit() again.
+	 */
+	edmac->edma->hw_setup(edmac);
+	spin_unlock_irqrestore(&edmac->lock, flags);
+
+	list_for_each_entry_safe(desc, _d, &list, node)
+		ep93xx_dma_desc_put(edmac, desc);
+
+	return 0;
+}
+
+static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac,
+				   struct dma_slave_config *config)
+{
+	enum dma_slave_buswidth width;
+	unsigned long flags;
+	u32 addr, ctrl;
+
+	if (!edmac->edma->m2m)
+		return -EINVAL;
+
+	switch (config->direction) {
+	case DMA_FROM_DEVICE:
+		width = config->src_addr_width;
+		addr = config->src_addr;
+		break;
+
+	case DMA_TO_DEVICE:
+		width = config->dst_addr_width;
+		addr = config->dst_addr;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	switch (width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		ctrl = 0;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		ctrl = M2M_CONTROL_PW_16;
+		break;
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		ctrl = M2M_CONTROL_PW_32;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&edmac->lock, flags);
+	edmac->runtime_addr = addr;
+	edmac->runtime_ctrl = ctrl;
+	spin_unlock_irqrestore(&edmac->lock, flags);
+
+	return 0;
+}
+
+/**
+ * ep93xx_dma_control - manipulate all pending operations on a channel
+ * @chan: channel
+ * @cmd: control command to perform
+ * @arg: optional argument
+ *
+ * Controls the channel. Function returns %0 in case of success or negative
+ * error in case of failure.
+ */
+static int ep93xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+			      unsigned long arg)
+{
+	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+	struct dma_slave_config *config;
+
+	switch (cmd) {
+	case DMA_TERMINATE_ALL:
+		return ep93xx_dma_terminate_all(edmac);
+
+	case DMA_SLAVE_CONFIG:
+		config = (struct dma_slave_config *)arg;
+		return ep93xx_dma_slave_config(edmac, config);
+
+	default:
+		break;
+	}
+
+	return -ENOSYS;
+}
+
+/**
+ * ep93xx_dma_tx_status - check if a transaction is completed
+ * @chan: channel
+ * @cookie: transaction specific cookie
+ * @state: state of the transaction is stored here if given
+ *
+ * This function can be used to query state of a given transaction.
+ */
+static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
+					    dma_cookie_t cookie,
+					    struct dma_tx_state *state)
+{
+	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+	dma_cookie_t last_used, last_completed;
+	enum dma_status ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&edmac->lock, flags);
+	last_used = chan->cookie;
+	last_completed = edmac->last_completed;
+	spin_unlock_irqrestore(&edmac->lock, flags);
+
+	ret = dma_async_is_complete(cookie, last_completed, last_used);
+	dma_set_tx_state(state, last_completed, last_used, 0);
+
+	return ret;
+}
+
+/**
+ * ep93xx_dma_issue_pending - push pending transactions to the hardware
+ * @chan: channel
+ *
+ * When this function is called, all pending transactions are pushed to the
+ * hardware and executed.
+ */
+static void ep93xx_dma_issue_pending(struct dma_chan *chan)
+{
+	ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
+}
+
+static int __init ep93xx_dma_probe(struct platform_device *pdev)
+{
+	struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
+	struct ep93xx_dma_engine *edma;
+	struct dma_device *dma_dev;
+	size_t edma_size;
+	int ret, i;
+
+	edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
+	edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
+	if (!edma)
+		return -ENOMEM;
+
+	dma_dev = &edma->dma_dev;
+	edma->m2m = platform_get_device_id(pdev)->driver_data;
+	edma->num_channels = pdata->num_channels;
+
+	INIT_LIST_HEAD(&dma_dev->channels);
+	for (i = 0; i < pdata->num_channels; i++) {
+		const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
+		struct ep93xx_dma_chan *edmac = &edma->channels[i];
+
+		edmac->chan.device = dma_dev;
+		edmac->regs = cdata->base;
+		edmac->irq = cdata->irq;
+		edmac->edma = edma;
+
+		edmac->clk = clk_get(NULL, cdata->name);
+		if (IS_ERR(edmac->clk)) {
+			dev_warn(&pdev->dev, "failed to get clock for %s\n",
+				 cdata->name);
+			continue;
+		}
+
+		spin_lock_init(&edmac->lock);
+		INIT_LIST_HEAD(&edmac->active);
+		INIT_LIST_HEAD(&edmac->queue);
+		INIT_LIST_HEAD(&edmac->free_list);
+		tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet,
+			     (unsigned long)edmac);
+
+		list_add_tail(&edmac->chan.device_node,
+			      &dma_dev->channels);
+	}
+
+	dma_cap_zero(dma_dev->cap_mask);
+	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+	dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
+
+	dma_dev->dev = &pdev->dev;
+	dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
+	dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
+	dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
+	dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
+	dma_dev->device_control = ep93xx_dma_control;
+	dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
+	dma_dev->device_tx_status = ep93xx_dma_tx_status;
+
+	dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
+
+	if (edma->m2m) {
+		dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+		dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
+
+		edma->hw_setup = m2m_hw_setup;
+		edma->hw_shutdown = m2m_hw_shutdown;
+		edma->hw_submit = m2m_hw_submit;
+		edma->hw_interrupt = m2m_hw_interrupt;
+	} else {
+		dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
+
+		edma->hw_setup = m2p_hw_setup;
+		edma->hw_shutdown = m2p_hw_shutdown;
+		edma->hw_submit = m2p_hw_submit;
+		edma->hw_interrupt = m2p_hw_interrupt;
+	}
+
+	ret = dma_async_device_register(dma_dev);
+	if (unlikely(ret)) {
+		for (i = 0; i < edma->num_channels; i++) {
+			struct ep93xx_dma_chan *edmac = &edma->channels[i];
+			if (!IS_ERR_OR_NULL(edmac->clk))
+				clk_put(edmac->clk);
+		}
+		kfree(edma);
+	} else {
+		dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
+			 edma->m2m ? "M" : "P");
+	}
+
+	return ret;
+}
+
+static struct platform_device_id ep93xx_dma_driver_ids[] = {
+	{ "ep93xx-dma-m2p", 0 },
+	{ "ep93xx-dma-m2m", 1 },
+	{ },
+};
+
+static struct platform_driver ep93xx_dma_driver = {
+	.driver		= {
+		.name	= "ep93xx-dma",
+	},
+	.id_table	= ep93xx_dma_driver_ids,
+};
+
+static int __init ep93xx_dma_module_init(void)
+{
+	return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
+}
+subsys_initcall(ep93xx_dma_module_init);
+
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
+MODULE_DESCRIPTION("EP93xx DMA driver");
+MODULE_LICENSE("GPL");

+ 7 - 2
drivers/dma/shdma.c

@@ -343,7 +343,7 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
 
 		dmae_set_dmars(sh_chan, cfg->mid_rid);
 		dmae_set_chcr(sh_chan, cfg->chcr);
-	} else if ((sh_dmae_readl(sh_chan, CHCR) & 0xf00) != 0x400) {
+	} else {
 		dmae_init(sh_chan);
 	}
 
@@ -1144,6 +1144,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
 	/* platform data */
 	shdev->pdata = pdata;
 
+	platform_set_drvdata(pdev, shdev);
+
 	pm_runtime_enable(&pdev->dev);
 	pm_runtime_get_sync(&pdev->dev);
 
@@ -1256,7 +1258,6 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
 
 	pm_runtime_put(&pdev->dev);
 
-	platform_set_drvdata(pdev, shdev);
 	dma_async_device_register(&shdev->common);
 
 	return err;
@@ -1278,6 +1279,8 @@ rst_err:
 
 	if (dmars)
 		iounmap(shdev->dmars);
+
+	platform_set_drvdata(pdev, NULL);
 emapdmars:
 	iounmap(shdev->chan_reg);
 	synchronize_rcu();
@@ -1316,6 +1319,8 @@ static int __exit sh_dmae_remove(struct platform_device *pdev)
 		iounmap(shdev->dmars);
 	iounmap(shdev->chan_reg);
 
+	platform_set_drvdata(pdev, NULL);
+
 	synchronize_rcu();
 	kfree(shdev);
 

+ 4 - 19
drivers/hwmon/coretemp.c

@@ -296,7 +296,7 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
 		 * If the TjMax is not plausible, an assumption
 		 * will be used
 		 */
-		if (val > 80 && val < 120) {
+		if (val) {
 			dev_info(dev, "TjMax is %d C.\n", val);
 			return val * 1000;
 		}
@@ -304,24 +304,9 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
 
 	/*
 	 * An assumption is made for early CPUs and unreadable MSR.
-	 * NOTE: the given value may not be correct.
+	 * NOTE: the calculated value may not be correct.
 	 */
-
-	switch (c->x86_model) {
-	case 0xe:
-	case 0xf:
-	case 0x16:
-	case 0x1a:
-		dev_warn(dev, "TjMax is assumed as 100 C!\n");
-		return 100000;
-	case 0x17:
-	case 0x1c:		/* Atom CPUs */
-		return adjust_tjmax(c, id, dev);
-	default:
-		dev_warn(dev, "CPU (model=0x%x) is not supported yet,"
-			" using default TjMax of 100C.\n", c->x86_model);
-		return 100000;
-	}
+	return adjust_tjmax(c, id, dev);
 }
 
 static void __devinit get_ucode_rev_on_cpu(void *edx)
@@ -341,7 +326,7 @@ static int get_pkg_tjmax(unsigned int cpu, struct device *dev)
 	err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
 	if (!err) {
 		val = (eax >> 16) & 0xff;
-		if (val > 80 && val < 120)
+		if (val)
 			return val * 1000;
 	}
 	dev_warn(dev, "Unable to read Pkg-TjMax from CPU:%u\n", cpu);

+ 18 - 4
drivers/hwmon/max6642.c

@@ -136,15 +136,29 @@ static int max6642_detect(struct i2c_client *client,
 	if (man_id != 0x4D)
 		return -ENODEV;
 
+	/* sanity check */
+	if (i2c_smbus_read_byte_data(client, 0x04) != 0x4D
+	    || i2c_smbus_read_byte_data(client, 0x06) != 0x4D
+	    || i2c_smbus_read_byte_data(client, 0xff) != 0x4D)
+		return -ENODEV;
+
 	/*
 	 * We read the config and status register, the 4 lower bits in the
 	 * config register should be zero and bit 5, 3, 1 and 0 should be
 	 * zero in the status register.
 	 */
 	reg_config = i2c_smbus_read_byte_data(client, MAX6642_REG_R_CONFIG);
+	if ((reg_config & 0x0f) != 0x00)
+		return -ENODEV;
+
+	/* in between, another round of sanity checks */
+	if (i2c_smbus_read_byte_data(client, 0x04) != reg_config
+	    || i2c_smbus_read_byte_data(client, 0x06) != reg_config
+	    || i2c_smbus_read_byte_data(client, 0xff) != reg_config)
+		return -ENODEV;
+
 	reg_status = i2c_smbus_read_byte_data(client, MAX6642_REG_R_STATUS);
-	if (((reg_config & 0x0f) != 0x00) ||
-	    ((reg_status & 0x2b) != 0x00))
+	if ((reg_status & 0x2b) != 0x00)
 		return -ENODEV;
 
 	strlcpy(info->type, "max6642", I2C_NAME_SIZE);
@@ -246,7 +260,7 @@ static SENSOR_DEVICE_ATTR_2(temp1_max, S_IWUSR | S_IRUGO, show_temp_max,
 			    set_temp_max, 0, MAX6642_REG_W_LOCAL_HIGH);
 static SENSOR_DEVICE_ATTR_2(temp2_max, S_IWUSR | S_IRUGO, show_temp_max,
 			    set_temp_max, 1, MAX6642_REG_W_REMOTE_HIGH);
-static SENSOR_DEVICE_ATTR(temp_fault, S_IRUGO, show_alarm, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 2);
 static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6);
 static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 4);
 
@@ -256,7 +270,7 @@ static struct attribute *max6642_attributes[] = {
 	&sensor_dev_attr_temp1_max.dev_attr.attr,
 	&sensor_dev_attr_temp2_max.dev_attr.attr,
 
-	&sensor_dev_attr_temp_fault.dev_attr.attr,
+	&sensor_dev_attr_temp2_fault.dev_attr.attr,
 	&sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
 	&sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
 	NULL

+ 2 - 8
drivers/input/serio/serport.c

@@ -120,21 +120,17 @@ static void serport_ldisc_close(struct tty_struct *tty)
  * 'interrupt' routine.
  */
 
-static unsigned int serport_ldisc_receive(struct tty_struct *tty,
-		const unsigned char *cp, char *fp, int count)
+static void serport_ldisc_receive(struct tty_struct *tty, const unsigned char *cp, char *fp, int count)
 {
 	struct serport *serport = (struct serport*) tty->disc_data;
 	unsigned long flags;
 	unsigned int ch_flags;
-	int ret = 0;
 	int i;
 
 	spin_lock_irqsave(&serport->lock, flags);
 
-	if (!test_bit(SERPORT_ACTIVE, &serport->flags)) {
-		ret = -EINVAL;
+	if (!test_bit(SERPORT_ACTIVE, &serport->flags))
 		goto out;
-	}
 
 	for (i = 0; i < count; i++) {
 		switch (fp[i]) {
@@ -156,8 +152,6 @@ static unsigned int serport_ldisc_receive(struct tty_struct *tty,
 
 out:
 	spin_unlock_irqrestore(&serport->lock, flags);
-
-	return ret == 0 ? count : ret;
 }
 
 /*

+ 3 - 5
drivers/isdn/gigaset/ser-gigaset.c

@@ -674,7 +674,7 @@ gigaset_tty_ioctl(struct tty_struct *tty, struct file *file,
  *	cflags	buffer containing error flags for received characters (ignored)
  *	count	number of received characters
  */
-static unsigned int
+static void
 gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
 		    char *cflags, int count)
 {
@@ -683,12 +683,12 @@ gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
 	struct inbuf_t *inbuf;
 
 	if (!cs)
-		return -ENODEV;
+		return;
 	inbuf = cs->inbuf;
 	if (!inbuf) {
 		dev_err(cs->dev, "%s: no inbuf\n", __func__);
 		cs_put(cs);
-		return -EINVAL;
+		return;
 	}
 
 	tail = inbuf->tail;
@@ -725,8 +725,6 @@ gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
 	gig_dbg(DEBUG_INTR, "%s-->BH", __func__);
 	gigaset_schedule_event(cs);
 	cs_put(cs);
-
-	return count;
 }
 
 /*

+ 2 - 4
drivers/misc/ti-st/st_core.c

@@ -747,8 +747,8 @@ static void st_tty_close(struct tty_struct *tty)
 	pr_debug("%s: done ", __func__);
 }
 
-static unsigned int st_tty_receive(struct tty_struct *tty,
-		const unsigned char *data, char *tty_flags, int count)
+static void st_tty_receive(struct tty_struct *tty, const unsigned char *data,
+			   char *tty_flags, int count)
 {
 #ifdef VERBOSE
 	print_hex_dump(KERN_DEBUG, ">in>", DUMP_PREFIX_NONE,
@@ -761,8 +761,6 @@ static unsigned int st_tty_receive(struct tty_struct *tty,
 	 */
 	st_recv(tty->disc_data, data, count);
 	pr_debug("done %s", __func__);
-
-	return count;
 }
 
 /* wake-up function called in from the TTY layer

+ 7 - 7
drivers/net/3c509.c

@@ -185,7 +185,7 @@ static int max_interrupt_work = 10;
 static int nopnp;
 #endif
 
-static int el3_common_init(struct net_device *dev);
+static int __devinit el3_common_init(struct net_device *dev);
 static void el3_common_remove(struct net_device *dev);
 static ushort id_read_eeprom(int index);
 static ushort read_eeprom(int ioaddr, int index);
@@ -395,7 +395,7 @@ static struct isa_driver el3_isa_driver = {
 static int isa_registered;
 
 #ifdef CONFIG_PNP
-static const struct pnp_device_id el3_pnp_ids[] __devinitconst = {
+static struct pnp_device_id el3_pnp_ids[] = {
 	{ .id = "TCM5090" }, /* 3Com Etherlink III (TP) */
 	{ .id = "TCM5091" }, /* 3Com Etherlink III */
 	{ .id = "TCM5094" }, /* 3Com Etherlink III (combo) */
@@ -478,7 +478,7 @@ static int pnp_registered;
 #endif /* CONFIG_PNP */
 
 #ifdef CONFIG_EISA
-static const struct eisa_device_id el3_eisa_ids[] __devinitconst = {
+static struct eisa_device_id el3_eisa_ids[] = {
 		{ "TCM5090" },
 		{ "TCM5091" },
 		{ "TCM5092" },
@@ -508,7 +508,7 @@ static int eisa_registered;
 #ifdef CONFIG_MCA
 static int el3_mca_probe(struct device *dev);
 
-static const short el3_mca_adapter_ids[] __devinitconst = {
+static short el3_mca_adapter_ids[] __initdata = {
 		0x627c,
 		0x627d,
 		0x62db,
@@ -517,7 +517,7 @@ static const short el3_mca_adapter_ids[] __devinitconst = {
 		0x0000
 };
 
-static const char *const el3_mca_adapter_names[] __devinitconst = {
+static char *el3_mca_adapter_names[] __initdata = {
 		"3Com 3c529 EtherLink III (10base2)",
 		"3Com 3c529 EtherLink III (10baseT)",
 		"3Com 3c529 EtherLink III (test mode)",
@@ -601,7 +601,7 @@ static void el3_common_remove (struct net_device *dev)
 }
 
 #ifdef CONFIG_MCA
-static int __devinit el3_mca_probe(struct device *device)
+static int __init el3_mca_probe(struct device *device)
 {
 	/* Based on Erik Nygren's (nygren@mit.edu) 3c529 patch,
 	 * heavily modified by Chris Beauregard
@@ -671,7 +671,7 @@ static int __devinit el3_mca_probe(struct device *device)
 #endif /* CONFIG_MCA */
 
 #ifdef CONFIG_EISA
-static int __devinit el3_eisa_probe (struct device *device)
+static int __init el3_eisa_probe (struct device *device)
 {
 	short i;
 	int ioaddr, irq, if_port;

+ 2 - 2
drivers/net/3c59x.c

@@ -901,14 +901,14 @@ static const struct dev_pm_ops vortex_pm_ops = {
 #endif /* !CONFIG_PM */
 
 #ifdef CONFIG_EISA
-static const struct eisa_device_id vortex_eisa_ids[] __devinitconst = {
+static struct eisa_device_id vortex_eisa_ids[] = {
 	{ "TCM5920", CH_3C592 },
 	{ "TCM5970", CH_3C597 },
 	{ "" }
 };
 MODULE_DEVICE_TABLE(eisa, vortex_eisa_ids);
 
-static int __devinit vortex_eisa_probe(struct device *device)
+static int __init vortex_eisa_probe(struct device *device)
 {
 	void __iomem *ioaddr;
 	struct eisa_device *edev;

+ 2 - 4
drivers/net/caif/caif_serial.c

@@ -167,8 +167,8 @@ static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
 
 #endif
 
-static unsigned int ldisc_receive(struct tty_struct *tty,
-		const u8 *data, char *flags, int count)
+static void ldisc_receive(struct tty_struct *tty, const u8 *data,
+			char *flags, int count)
 {
 	struct sk_buff *skb = NULL;
 	struct ser_device *ser;
@@ -215,8 +215,6 @@ static unsigned int ldisc_receive(struct tty_struct *tty,
 	} else
 		++ser->dev->stats.rx_dropped;
 	update_tty_status(ser);
-
-	return count;
 }
 
 static int handle_tx(struct ser_device *ser)

+ 2 - 3
drivers/net/can/flexcan.c

@@ -923,7 +923,7 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
 	mem_size = resource_size(mem);
 	if (!request_mem_region(mem->start, mem_size, pdev->name)) {
 		err = -EBUSY;
-		goto failed_req;
+		goto failed_get;
 	}
 
 	base = ioremap(mem->start, mem_size);
@@ -977,9 +977,8 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
 	iounmap(base);
  failed_map:
 	release_mem_region(mem->start, mem_size);
- failed_req:
-	clk_put(clk);
  failed_get:
+	clk_put(clk);
  failed_clock:
 	return err;
 }

+ 3 - 6
drivers/net/can/slcan.c

@@ -425,17 +425,16 @@ static void slc_setup(struct net_device *dev)
  * in parallel
  */
 
-static unsigned int slcan_receive_buf(struct tty_struct *tty,
+static void slcan_receive_buf(struct tty_struct *tty,
 			      const unsigned char *cp, char *fp, int count)
 {
 	struct slcan *sl = (struct slcan *) tty->disc_data;
-	int bytes = count;
 
 	if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
-		return -ENODEV;
+		return;
 
 	/* Read the characters out of the buffer */
-	while (bytes--) {
+	while (count--) {
 		if (fp && *fp++) {
 			if (!test_and_set_bit(SLF_ERROR, &sl->flags))
 				sl->dev->stats.rx_errors++;
@@ -444,8 +443,6 @@ static unsigned int slcan_receive_buf(struct tty_struct *tty,
 		}
 		slcan_unesc(sl, *cp++);
 	}
-
-	return count;
 }
 
 /************************************

+ 6 - 4
drivers/net/davinci_emac.c

@@ -1781,8 +1781,8 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
 	ndev = alloc_etherdev(sizeof(struct emac_priv));
 	if (!ndev) {
 		dev_err(&pdev->dev, "error allocating net_device\n");
-		clk_put(emac_clk);
-		return -ENOMEM;
+		rc = -ENOMEM;
+		goto free_clk;
 	}
 
 	platform_set_drvdata(pdev, ndev);
@@ -1796,7 +1796,8 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
 	pdata = pdev->dev.platform_data;
 	if (!pdata) {
 		dev_err(&pdev->dev, "no platform data\n");
-		return -ENODEV;
+		rc = -ENODEV;
+		goto probe_quit;
 	}
 
 	/* MAC addr and PHY mask , RMII enable info from platform_data */
@@ -1929,8 +1930,9 @@ no_dma:
 	iounmap(priv->remap_addr);
 
 probe_quit:
-	clk_put(emac_clk);
 	free_netdev(ndev);
+free_clk:
+	clk_put(emac_clk);
 	return rc;
 }
 

+ 17 - 18
drivers/net/depca.c

@@ -331,18 +331,18 @@ static struct {
                          "DE422",\
                          ""}
 
-static const char* const depca_signature[] __devinitconst = DEPCA_SIGNATURE;
+static char* __initdata depca_signature[] = DEPCA_SIGNATURE;
 
 enum depca_type {
 	DEPCA, de100, de101, de200, de201, de202, de210, de212, de422, unknown
 };
 
-static const char depca_string[] = "depca";
+static char depca_string[] = "depca";
 
 static int depca_device_remove (struct device *device);
 
 #ifdef CONFIG_EISA
-static const struct eisa_device_id depca_eisa_ids[] __devinitconst = {
+static struct eisa_device_id depca_eisa_ids[] = {
 	{ "DEC4220", de422 },
 	{ "" }
 };
@@ -367,19 +367,19 @@ static struct eisa_driver depca_eisa_driver = {
 #define DE210_ID 0x628d
 #define DE212_ID 0x6def
 
-static const short depca_mca_adapter_ids[] __devinitconst = {
+static short depca_mca_adapter_ids[] = {
 	DE210_ID,
 	DE212_ID,
 	0x0000
 };
 
-static const char *depca_mca_adapter_name[] = {
+static char *depca_mca_adapter_name[] = {
 	"DEC EtherWORKS MC Adapter (DE210)",
 	"DEC EtherWORKS MC Adapter (DE212)",
 	NULL
 };
 
-static const enum depca_type depca_mca_adapter_type[] = {
+static enum depca_type depca_mca_adapter_type[] = {
 	de210,
 	de212,
 	0
@@ -541,9 +541,10 @@ static void SetMulticastFilter(struct net_device *dev);
 static int load_packet(struct net_device *dev, struct sk_buff *skb);
 static void depca_dbg_open(struct net_device *dev);
 
-static const u_char de1xx_irq[] __devinitconst = { 2, 3, 4, 5, 7, 9, 0 };
-static const u_char de2xx_irq[] __devinitconst = { 5, 9, 10, 11, 15, 0 };
-static const u_char de422_irq[] __devinitconst = { 5, 9, 10, 11, 0 };
+static u_char de1xx_irq[] __initdata = { 2, 3, 4, 5, 7, 9, 0 };
+static u_char de2xx_irq[] __initdata = { 5, 9, 10, 11, 15, 0 };
+static u_char de422_irq[] __initdata = { 5, 9, 10, 11, 0 };
+static u_char *depca_irq;
 
 static int irq;
 static int io;
@@ -579,7 +580,7 @@ static const struct net_device_ops depca_netdev_ops = {
 	.ndo_validate_addr	= eth_validate_addr,
 };
 
-static int __devinit depca_hw_init (struct net_device *dev, struct device *device)
+static int __init depca_hw_init (struct net_device *dev, struct device *device)
 {
 	struct depca_private *lp;
 	int i, j, offset, netRAM, mem_len, status = 0;
@@ -747,7 +748,6 @@ static int __devinit depca_hw_init (struct net_device *dev, struct device *devic
 	if (dev->irq < 2) {
 		unsigned char irqnum;
 		unsigned long irq_mask, delay;
-		const u_char *depca_irq;
 
 		irq_mask = probe_irq_on();
 
@@ -770,7 +770,6 @@ static int __devinit depca_hw_init (struct net_device *dev, struct device *devic
 			break;
 
 		default:
-			depca_irq = NULL;
 			break;	/* Not reached */
 		}
 
@@ -1303,7 +1302,7 @@ static void SetMulticastFilter(struct net_device *dev)
 	}
 }
 
-static int __devinit depca_common_init (u_long ioaddr, struct net_device **devp)
+static int __init depca_common_init (u_long ioaddr, struct net_device **devp)
 {
 	int status = 0;
 
@@ -1334,7 +1333,7 @@ static int __devinit depca_common_init (u_long ioaddr, struct net_device **devp)
 /*
 ** Microchannel bus I/O device probe
 */
-static int __devinit depca_mca_probe(struct device *device)
+static int __init depca_mca_probe(struct device *device)
 {
 	unsigned char pos[2];
 	unsigned char where;
@@ -1458,7 +1457,7 @@ static int __devinit depca_mca_probe(struct device *device)
 ** ISA bus I/O device probe
 */
 
-static void __devinit depca_platform_probe (void)
+static void __init depca_platform_probe (void)
 {
 	int i;
 	struct platform_device *pldev;
@@ -1498,7 +1497,7 @@ static void __devinit depca_platform_probe (void)
 	}
 }
 
-static enum depca_type __devinit depca_shmem_probe (ulong *mem_start)
+static enum depca_type __init depca_shmem_probe (ulong *mem_start)
 {
 	u_long mem_base[] = DEPCA_RAM_BASE_ADDRESSES;
 	enum depca_type adapter = unknown;
@@ -1559,7 +1558,7 @@ static int __devinit depca_isa_probe (struct platform_device *device)
 */
 
 #ifdef CONFIG_EISA
-static int __devinit depca_eisa_probe (struct device *device)
+static int __init depca_eisa_probe (struct device *device)
 {
 	enum depca_type adapter = unknown;
 	struct eisa_device *edev;
@@ -1630,7 +1629,7 @@ static int __devexit depca_device_remove (struct device *device)
 ** and Boot (readb) ROM. This will also give us a clue to the network RAM
 ** base address.
 */
-static int __devinit DepcaSignature(char *name, u_long base_addr)
+static int __init DepcaSignature(char *name, u_long base_addr)
 {
 	u_int i, j, k;
 	void __iomem *ptr;

+ 3 - 3
drivers/net/dm9000.c

@@ -1157,9 +1157,6 @@ dm9000_open(struct net_device *dev)
 
 	irqflags |= IRQF_SHARED;
 
-	if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
-		return -EAGAIN;
-
 	/* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
 	iow(db, DM9000_GPR, 0);	/* REG_1F bit0 activate phyxcer */
 	mdelay(1); /* delay needs by DM9000B */
@@ -1168,6 +1165,9 @@ dm9000_open(struct net_device *dev)
 	dm9000_reset(db);
 	dm9000_init_dm9000(dev);
 
+	if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
+		return -EAGAIN;
+
 	/* Init driver variable */
 	db->dbug_cnt = 0;
 

+ 3 - 5
drivers/net/hamradio/6pack.c

@@ -456,7 +456,7 @@ out:
  * a block of 6pack data has been received, which can now be decapsulated
  * and sent on to some IP layer for further processing.
  */
-static unsigned int sixpack_receive_buf(struct tty_struct *tty,
+static void sixpack_receive_buf(struct tty_struct *tty,
 	const unsigned char *cp, char *fp, int count)
 {
 	struct sixpack *sp;
@@ -464,11 +464,11 @@ static unsigned int sixpack_receive_buf(struct tty_struct *tty,
 	int count1;
 
 	if (!count)
-		return 0;
+		return;
 
 	sp = sp_get(tty);
 	if (!sp)
-		return -ENODEV;
+		return;
 
 	memcpy(buf, cp, count < sizeof(buf) ? count : sizeof(buf));
 
@@ -487,8 +487,6 @@ static unsigned int sixpack_receive_buf(struct tty_struct *tty,
 
 	sp_put(sp);
 	tty_unthrottle(tty);
-
-	return count1;
 }
 
 /*

+ 4 - 7
drivers/net/hamradio/mkiss.c

@@ -923,14 +923,13 @@ static long mkiss_compat_ioctl(struct tty_struct *tty, struct file *file,
  * a block of data has been received, which can now be decapsulated
  * and sent on to the AX.25 layer for further processing.
  */
-static unsigned int mkiss_receive_buf(struct tty_struct *tty,
-		const unsigned char *cp, char *fp, int count)
+static void mkiss_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+	char *fp, int count)
 {
 	struct mkiss *ax = mkiss_get(tty);
-	int bytes = count;
 
 	if (!ax)
-		return -ENODEV;
+		return;
 
 	/*
 	 * Argh! mtu change time! - costs us the packet part received
@@ -940,7 +939,7 @@ static unsigned int mkiss_receive_buf(struct tty_struct *tty,
 		ax_changedmtu(ax);
 
 	/* Read the characters out of the buffer */
-	while (bytes--) {
+	while (count--) {
 		if (fp != NULL && *fp++) {
 			if (!test_and_set_bit(AXF_ERROR, &ax->flags))
 				ax->dev->stats.rx_errors++;
@@ -953,8 +952,6 @@ static unsigned int mkiss_receive_buf(struct tty_struct *tty,
 
 	mkiss_put(ax);
 	tty_unthrottle(tty);
-
-	return count;
 }
 
 /*

+ 6 - 6
drivers/net/hp100.c

@@ -188,14 +188,14 @@ struct hp100_private {
  *  variables
  */
 #ifdef CONFIG_ISA
-static const char *const hp100_isa_tbl[] __devinitconst = {
+static const char *hp100_isa_tbl[] = {
 	"HWPF150", /* HP J2573 rev A */
 	"HWP1950", /* HP J2573 */
 };
 #endif
 
 #ifdef CONFIG_EISA
-static const struct eisa_device_id hp100_eisa_tbl[] __devinitconst = {
+static struct eisa_device_id hp100_eisa_tbl[] = {
 	{ "HWPF180" }, /* HP J2577 rev A */
 	{ "HWP1920" }, /* HP 27248B */
 	{ "HWP1940" }, /* HP J2577 */
@@ -336,7 +336,7 @@ static __devinit const char *hp100_read_id(int ioaddr)
 }
 
 #ifdef CONFIG_ISA
-static __devinit int hp100_isa_probe1(struct net_device *dev, int ioaddr)
+static __init int hp100_isa_probe1(struct net_device *dev, int ioaddr)
 {
 	const char *sig;
 	int i;
@@ -372,7 +372,7 @@ static __devinit int hp100_isa_probe1(struct net_device *dev, int ioaddr)
  * EISA and PCI are handled by device infrastructure.
  */
 
-static int  __devinit hp100_isa_probe(struct net_device *dev, int addr)
+static int  __init hp100_isa_probe(struct net_device *dev, int addr)
 {
 	int err = -ENODEV;
 
@@ -396,7 +396,7 @@ static int  __devinit hp100_isa_probe(struct net_device *dev, int addr)
 #endif /* CONFIG_ISA */
 
 #if !defined(MODULE) && defined(CONFIG_ISA)
-struct net_device * __devinit hp100_probe(int unit)
+struct net_device * __init hp100_probe(int unit)
 {
 	struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private));
 	int err;
@@ -2843,7 +2843,7 @@ static void cleanup_dev(struct net_device *d)
 }
 
 #ifdef CONFIG_EISA
-static int __devinit hp100_eisa_probe (struct device *gendev)
+static int __init hp100_eisa_probe (struct device *gendev)
 {
 	struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private));
 	struct eisa_device *edev = to_eisa_device(gendev);

+ 2 - 2
drivers/net/ibmlana.c

@@ -895,12 +895,12 @@ static int ibmlana_irq;
 static int ibmlana_io;
 static int startslot;		/* counts through slots when probing multiple devices */
 
-static const short ibmlana_adapter_ids[] __devinitconst = {
+static short ibmlana_adapter_ids[] __initdata = {
 	IBM_LANA_ID,
 	0x0000
 };
 
-static const char *const ibmlana_adapter_names[] __devinitconst = {
+static char *ibmlana_adapter_names[] __devinitdata = {
 	"IBM LAN Adapter/A",
 	NULL
 };

+ 7 - 9
drivers/net/irda/irtty-sir.c

@@ -216,23 +216,23 @@ static int irtty_do_write(struct sir_dev *dev, const unsigned char *ptr, size_t
  * usbserial:	urb-complete-interrupt / softint
  */
 
-static unsigned int irtty_receive_buf(struct tty_struct *tty,
-		const unsigned char *cp, char *fp, int count)
+static void irtty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+			      char *fp, int count) 
 {
 	struct sir_dev *dev;
 	struct sirtty_cb *priv = tty->disc_data;
 	int	i;
 
-	IRDA_ASSERT(priv != NULL, return -ENODEV;);
-	IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -EINVAL;);
+	IRDA_ASSERT(priv != NULL, return;);
+	IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return;);
 
 	if (unlikely(count==0))		/* yes, this happens */
-		return 0;
+		return;
 
 	dev = priv->dev;
 	if (!dev) {
 		IRDA_WARNING("%s(), not ready yet!\n", __func__);
-		return -ENODEV;
+		return;
 	}
 
 	for (i = 0; i < count; i++) {
@@ -242,13 +242,11 @@ static unsigned int irtty_receive_buf(struct tty_struct *tty,
  		if (fp && *fp++) { 
 			IRDA_DEBUG(0, "Framing or parity error!\n");
 			sirdev_receive(dev, NULL, 0);	/* notify sir_dev (updating stats) */
-			return -EINVAL;
+			return;
  		}
 	}
 
 	sirdev_receive(dev, cp, count);
-
-	return count;
 }
 
 /*

+ 22 - 22
drivers/net/irda/smsc-ircc2.c

@@ -222,19 +222,19 @@ static void smsc_ircc_set_transceiver_for_speed(struct smsc_ircc_cb *self, u32 s
 static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self);
 
 /* Probing */
-static int smsc_ircc_look_for_chips(void);
-static const struct smsc_chip * smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type);
-static int smsc_superio_flat(const struct smsc_chip *chips, unsigned short cfg_base, char *type);
-static int smsc_superio_paged(const struct smsc_chip *chips, unsigned short cfg_base, char *type);
-static int smsc_superio_fdc(unsigned short cfg_base);
-static int smsc_superio_lpc(unsigned short cfg_base);
+static int __init smsc_ircc_look_for_chips(void);
+static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type);
+static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned short cfg_base, char *type);
+static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned short cfg_base, char *type);
+static int __init smsc_superio_fdc(unsigned short cfg_base);
+static int __init smsc_superio_lpc(unsigned short cfg_base);
 #ifdef CONFIG_PCI
-static int preconfigure_smsc_chip(struct smsc_ircc_subsystem_configuration *conf);
-static int preconfigure_through_82801(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf);
-static void preconfigure_ali_port(struct pci_dev *dev,
+static int __init preconfigure_smsc_chip(struct smsc_ircc_subsystem_configuration *conf);
+static int __init preconfigure_through_82801(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf);
+static void __init preconfigure_ali_port(struct pci_dev *dev,
 					 unsigned short port);
-static int preconfigure_through_ali(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf);
-static int smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
+static int __init preconfigure_through_ali(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf);
+static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
 						    unsigned short ircc_fir,
 						    unsigned short ircc_sir,
 						    unsigned char ircc_dma,
@@ -366,7 +366,7 @@ static inline void register_bank(int iobase, int bank)
 }
 
 /* PNP hotplug support */
-static const struct pnp_device_id smsc_ircc_pnp_table[] __devinitconst = {
+static const struct pnp_device_id smsc_ircc_pnp_table[] = {
 	{ .id = "SMCf010", .driver_data = 0 },
 	/* and presumably others */
 	{ }
@@ -515,7 +515,7 @@ static const struct net_device_ops smsc_ircc_netdev_ops = {
  *    Try to open driver instance
  *
  */
-static int __devinit smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, u8 irq)
+static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, u8 irq)
 {
 	struct smsc_ircc_cb *self;
 	struct net_device *dev;
@@ -2273,7 +2273,7 @@ static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned sho
 }
 
 
-static int __devinit smsc_access(unsigned short cfg_base, unsigned char reg)
+static int __init smsc_access(unsigned short cfg_base, unsigned char reg)
 {
 	IRDA_DEBUG(1, "%s\n", __func__);
 
@@ -2281,7 +2281,7 @@ static int __devinit smsc_access(unsigned short cfg_base, unsigned char reg)
 	return inb(cfg_base) != reg ? -1 : 0;
 }
 
-static const struct smsc_chip * __devinit smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type)
+static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type)
 {
 	u8 devid, xdevid, rev;
 
@@ -2406,7 +2406,7 @@ static int __init smsc_superio_lpc(unsigned short cfg_base)
 #ifdef CONFIG_PCI
 #define PCIID_VENDOR_INTEL 0x8086
 #define PCIID_VENDOR_ALI 0x10b9
-static const struct smsc_ircc_subsystem_configuration subsystem_configurations[] __devinitconst = {
+static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __initdata = {
 	/*
 	 * Subsystems needing entries:
 	 * 0x10b9:0x1533 0x103c:0x0850 HP nx9010 family
@@ -2532,7 +2532,7 @@ static const struct smsc_ircc_subsystem_configuration subsystem_configurations[]
  * (FIR port, SIR port, FIR DMA, FIR IRQ)
  * through the chip configuration port.
  */
-static int __devinit preconfigure_smsc_chip(struct
+static int __init preconfigure_smsc_chip(struct
 					 smsc_ircc_subsystem_configuration
 					 *conf)
 {
@@ -2633,7 +2633,7 @@ static int __devinit preconfigure_smsc_chip(struct
  * or Intel 82801DB/DBL (ICH4/ICH4-L) LPC Interface Bridge.
  * They all work the same way!
  */
-static int __devinit preconfigure_through_82801(struct pci_dev *dev,
+static int __init preconfigure_through_82801(struct pci_dev *dev,
 					     struct
 					     smsc_ircc_subsystem_configuration
 					     *conf)
@@ -2786,7 +2786,7 @@ static int __devinit preconfigure_through_82801(struct pci_dev *dev,
  * This is based on reverse-engineering since ALi does not
  * provide any data sheet for the 1533 chip.
  */
-static void __devinit preconfigure_ali_port(struct pci_dev *dev,
+static void __init preconfigure_ali_port(struct pci_dev *dev,
 					 unsigned short port)
 {
 	unsigned char reg;
@@ -2824,7 +2824,7 @@ static void __devinit preconfigure_ali_port(struct pci_dev *dev,
 	IRDA_MESSAGE("Activated ALi 1533 ISA bridge port 0x%04x.\n", port);
 }
 
-static int __devinit preconfigure_through_ali(struct pci_dev *dev,
+static int __init preconfigure_through_ali(struct pci_dev *dev,
 					   struct
 					   smsc_ircc_subsystem_configuration
 					   *conf)
@@ -2837,7 +2837,7 @@ static int __devinit preconfigure_through_ali(struct pci_dev *dev,
 	return preconfigure_smsc_chip(conf);
 }
 
-static int __devinit smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
+static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
 						    unsigned short ircc_fir,
 						    unsigned short ircc_sir,
 						    unsigned char ircc_dma,
@@ -2849,7 +2849,7 @@ static int __devinit smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
 	int ret = 0;
 
 	for_each_pci_dev(dev) {
-		const struct smsc_ircc_subsystem_configuration *conf;
+		struct smsc_ircc_subsystem_configuration *conf;
 
 		/*
 		 * Cache the subsystem vendor/device:

+ 1 - 1
drivers/net/ks8842.c

@@ -661,7 +661,7 @@ static void ks8842_rx_frame(struct net_device *netdev,
 
 	/* check the status */
 	if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
-		struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len);
+		struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len + 3);
 
 		if (skb) {
 

+ 6 - 9
drivers/net/ne3210.c

@@ -80,20 +80,17 @@ static void ne3210_block_output(struct net_device *dev, int count, const unsigne
 
 #define NE3210_DEBUG	0x0
 
-static const unsigned char irq_map[] __devinitconst =
-	{ 15, 12, 11, 10, 9, 7, 5, 3 };
-static const unsigned int shmem_map[] __devinitconst =
-	{ 0xff0, 0xfe0, 0xfff0, 0xd8, 0xffe0, 0xffc0, 0xd0, 0x0 };
-static const char *const ifmap[] __devinitconst =
-	{ "UTP", "?", "BNC", "AUI" };
-static const int ifmap_val[] __devinitconst = {
+static unsigned char irq_map[] __initdata = {15, 12, 11, 10, 9, 7, 5, 3};
+static unsigned int shmem_map[] __initdata = {0xff0, 0xfe0, 0xfff0, 0xd8, 0xffe0, 0xffc0, 0xd0, 0x0};
+static const char *ifmap[] __initdata = {"UTP", "?", "BNC", "AUI"};
+static int ifmap_val[] __initdata = {
 		IF_PORT_10BASET,
 		IF_PORT_UNKNOWN,
 		IF_PORT_10BASE2,
 		IF_PORT_AUI,
 };
 
-static int __devinit ne3210_eisa_probe (struct device *device)
+static int __init ne3210_eisa_probe (struct device *device)
 {
 	unsigned long ioaddr, phys_mem;
 	int i, retval, port_index;
@@ -316,7 +313,7 @@ static void ne3210_block_output(struct net_device *dev, int count,
 	memcpy_toio(shmem, buf, count);
 }
 
-static const struct eisa_device_id ne3210_ids[] __devinitconst = {
+static struct eisa_device_id ne3210_ids[] = {
 	{ "EGL0101" },
 	{ "NVL1801" },
 	{ "" },

+ 2 - 4
drivers/net/ppp_async.c

@@ -340,7 +340,7 @@ ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
 }
 
 /* May sleep, don't call from interrupt level or with interrupts disabled */
-static unsigned int
+static void
 ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
 		  char *cflags, int count)
 {
@@ -348,7 +348,7 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
 	unsigned long flags;
 
 	if (!ap)
-		return -ENODEV;
+		return;
 	spin_lock_irqsave(&ap->recv_lock, flags);
 	ppp_async_input(ap, buf, cflags, count);
 	spin_unlock_irqrestore(&ap->recv_lock, flags);
@@ -356,8 +356,6 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
 		tasklet_schedule(&ap->tsk);
 	ap_put(ap);
 	tty_unthrottle(tty);
-
-	return count;
 }
 
 static void

+ 2 - 4
drivers/net/ppp_synctty.c

@@ -381,7 +381,7 @@ ppp_sync_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
 }
 
 /* May sleep, don't call from interrupt level or with interrupts disabled */
-static unsigned int
+static void
 ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
 		  char *cflags, int count)
 {
@@ -389,7 +389,7 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
 	unsigned long flags;
 
 	if (!ap)
-		return -ENODEV;
+		return;
 	spin_lock_irqsave(&ap->recv_lock, flags);
 	ppp_sync_input(ap, buf, cflags, count);
 	spin_unlock_irqrestore(&ap->recv_lock, flags);
@@ -397,8 +397,6 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
 		tasklet_schedule(&ap->tsk);
 	sp_put(ap);
 	tty_unthrottle(tty);
-
-	return count;
 }
 
 static void

+ 4 - 7
drivers/net/slip.c

@@ -670,17 +670,16 @@ static void sl_setup(struct net_device *dev)
  * in parallel
  */
 
-static unsigned int slip_receive_buf(struct tty_struct *tty,
-		const unsigned char *cp, char *fp, int count)
+static void slip_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+							char *fp, int count)
 {
 	struct slip *sl = tty->disc_data;
-	int bytes = count;
 
 	if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
-		return -ENODEV;
+		return;
 
 	/* Read the characters out of the buffer */
-	while (bytes--) {
+	while (count--) {
 		if (fp && *fp++) {
 			if (!test_and_set_bit(SLF_ERROR, &sl->flags))
 				sl->dev->stats.rx_errors++;
@@ -694,8 +693,6 @@ static unsigned int slip_receive_buf(struct tty_struct *tty,
 #endif
 			slip_unesc(sl, *cp++);
 	}
-
-	return count;
 }
 
 /************************************

+ 3 - 3
drivers/net/smc-mca.c

@@ -156,7 +156,7 @@ static const struct {
    { 14, 15 }
 };
 
-static const short smc_mca_adapter_ids[] __devinitconst = {
+static short smc_mca_adapter_ids[] __initdata = {
 	0x61c8,
 	0x61c9,
 	0x6fc0,
@@ -168,7 +168,7 @@ static const short smc_mca_adapter_ids[] __devinitconst = {
 	0x0000
 };
 
-static const char *const smc_mca_adapter_names[] __devinitconst = {
+static char *smc_mca_adapter_names[] __initdata = {
 	"SMC Ethercard PLUS Elite/A BNC/AUI (WD8013EP/A)",
 	"SMC Ethercard PLUS Elite/A UTP/AUI (WD8013WP/A)",
 	"WD Ethercard PLUS/A (WD8003E/A or WD8003ET/A)",
@@ -199,7 +199,7 @@ static const struct net_device_ops ultramca_netdev_ops = {
 #endif
 };
 
-static int __devinit ultramca_probe(struct device *gen_dev)
+static int __init ultramca_probe(struct device *gen_dev)
 {
 	unsigned short ioaddr;
 	struct net_device *dev;

+ 1 - 1
drivers/net/tg3.c

@@ -5774,7 +5774,7 @@ static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
 			 dma_unmap_addr(txb, mapping),
 			 skb_headlen(skb),
 			 PCI_DMA_TODEVICE);
-	for (i = 0; i <= last; i++) {
+	for (i = 0; i < last; i++) {
 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
 		entry = NEXT_TX(entry);

+ 1 - 1
drivers/net/tokenring/madgemc.c

@@ -727,7 +727,7 @@ static int __devexit madgemc_remove(struct device *device)
 	return 0;
 }
 
-static const short madgemc_adapter_ids[] __devinitconst = {
+static short madgemc_adapter_ids[] __initdata = {
 	0x002d,
 	0x0000
 };

+ 2 - 2
drivers/net/tulip/de4x5.c

@@ -1995,7 +1995,7 @@ SetMulticastFilter(struct net_device *dev)
 
 static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST;
 
-static int __devinit de4x5_eisa_probe (struct device *gendev)
+static int __init de4x5_eisa_probe (struct device *gendev)
 {
 	struct eisa_device *edev;
 	u_long iobase;
@@ -2097,7 +2097,7 @@ static int __devexit de4x5_eisa_remove (struct device *device)
 	return 0;
 }
 
-static const struct eisa_device_id de4x5_eisa_ids[] __devinitconst = {
+static struct eisa_device_id de4x5_eisa_ids[] = {
         { "DEC4250", 0 },	/* 0 is the board name index... */
         { "" }
 };

+ 1 - 1
drivers/net/usb/catc.c

@@ -495,7 +495,7 @@ static void catc_ctrl_run(struct catc *catc)
 	if (!q->dir && q->buf && q->len)
 		memcpy(catc->ctrl_buf, q->buf, q->len);
 
-	if ((status = usb_submit_urb(catc->ctrl_urb, GFP_KERNEL)))
+	if ((status = usb_submit_urb(catc->ctrl_urb, GFP_ATOMIC)))
 		err("submit(ctrl_urb) status %d", status);
 }
 

+ 2 - 1
drivers/net/usb/cdc_ncm.c

@@ -54,7 +54,7 @@
 #include <linux/usb/usbnet.h>
 #include <linux/usb/cdc.h>
 
-#define	DRIVER_VERSION				"24-May-2011"
+#define	DRIVER_VERSION				"01-June-2011"
 
 /* CDC NCM subclass 3.2.1 */
 #define USB_CDC_NCM_NDP16_LENGTH_MIN		0x10
@@ -1234,6 +1234,7 @@ static struct usb_driver cdc_ncm_driver = {
 	.disconnect = cdc_ncm_disconnect,
 	.suspend = usbnet_suspend,
 	.resume = usbnet_resume,
+	.reset_resume =	usbnet_resume,
 	.supports_autosuspend = 1,
 };
 

+ 2 - 5
drivers/net/wan/x25_asy.c

@@ -517,18 +517,17 @@ static int x25_asy_close(struct net_device *dev)
  * and sent on to some IP layer for further processing.
  */
 
-static unsigned int x25_asy_receive_buf(struct tty_struct *tty,
+static void x25_asy_receive_buf(struct tty_struct *tty,
 				const unsigned char *cp, char *fp, int count)
 {
 	struct x25_asy *sl = tty->disc_data;
-	int bytes = count;
 
 	if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev))
 		return;
 
 
 	/* Read the characters out of the buffer */
-	while (bytes--) {
+	while (count--) {
 		if (fp && *fp++) {
 			if (!test_and_set_bit(SLF_ERROR, &sl->flags))
 				sl->dev->stats.rx_errors++;
@@ -537,8 +536,6 @@ static unsigned int x25_asy_receive_buf(struct tty_struct *tty,
 		}
 		x25_asy_unesc(sl, *cp++);
 	}
-
-	return count;
 }
 
 /*

+ 0 - 1
drivers/net/wireless/ath/ath9k/Kconfig

@@ -26,7 +26,6 @@ config ATH9K
 config ATH9K_PCI
 	bool "Atheros ath9k PCI/PCIe bus support"
 	depends on ATH9K && PCI
-	default PCI
 	---help---
 	  This option enables the PCI bus support in ath9k.
 

+ 1 - 1
drivers/net/wireless/ath/ath9k/ar9002_calib.c

@@ -829,7 +829,7 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
 	if (AR_SREV_9271(ah)) {
 		if (!ar9285_hw_cl_cal(ah, chan))
 			return false;
-	} else if (AR_SREV_9285_12_OR_LATER(ah)) {
+	} else if (AR_SREV_9285(ah) && AR_SREV_9285_12_OR_LATER(ah)) {
 		if (!ar9285_hw_clc(ah, chan))
 			return false;
 	} else {

+ 8 - 2
drivers/net/wireless/ath/ath9k/ar9003_eeprom.c

@@ -4645,10 +4645,16 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
 	case 1:
 		break;
 	case 2:
-		scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
+		if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN)
+			scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
+		else
+			scaledPower = 0;
 		break;
 	case 3:
-		scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
+		if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN)
+			scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
+		else
+			scaledPower = 0;
 		break;
 	}
 

+ 22 - 0
drivers/net/wireless/ath/ath9k/ar9003_phy.c

@@ -1381,3 +1381,25 @@ void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah)
 		"==== BB update: done ====\n\n");
 }
 EXPORT_SYMBOL(ar9003_hw_bb_watchdog_dbg_info);
+
+void ar9003_hw_disable_phy_restart(struct ath_hw *ah)
+{
+	u32 val;
+
+	/* While receiving unsupported rate frame rx state machine
+	 * gets into a state 0xb and if phy_restart happens in that
+	 * state, BB would go hang. If RXSM is in 0xb state after
+	 * first bb panic, ensure to disable the phy_restart.
+	 */
+	if (!((MS(ah->bb_watchdog_last_status,
+		  AR_PHY_WATCHDOG_RX_OFDM_SM) == 0xb) ||
+	    ah->bb_hang_rx_ofdm))
+		return;
+
+	ah->bb_hang_rx_ofdm = true;
+	val = REG_READ(ah, AR_PHY_RESTART);
+	val &= ~AR_PHY_RESTART_ENA;
+
+	REG_WRITE(ah, AR_PHY_RESTART, val);
+}
+EXPORT_SYMBOL(ar9003_hw_disable_phy_restart);

+ 8 - 2
drivers/net/wireless/ath/ath9k/eeprom_9287.c

@@ -524,10 +524,16 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
 	case 1:
 		break;
 	case 2:
-		scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
+		if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN)
+			scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
+		else
+			scaledPower = 0;
 		break;
 	case 3:
-		scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
+		if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN)
+			scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
+		else
+			scaledPower = 0;
 		break;
 	}
 	scaledPower = max((u16)0, scaledPower);

+ 4 - 1
drivers/net/wireless/ath/ath9k/hw.c

@@ -1555,9 +1555,12 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
 	if (ah->btcoex_hw.enabled)
 		ath9k_hw_btcoex_enable(ah);
 
-	if (AR_SREV_9300_20_OR_LATER(ah))
+	if (AR_SREV_9300_20_OR_LATER(ah)) {
 		ar9003_hw_bb_watchdog_config(ah);
 
+		ar9003_hw_disable_phy_restart(ah);
+	}
+
 	ath9k_hw_apply_gpio_override(ah);
 
 	return 0;

+ 2 - 0
drivers/net/wireless/ath/ath9k/hw.h

@@ -842,6 +842,7 @@ struct ath_hw {
 
 	u32 bb_watchdog_last_status;
 	u32 bb_watchdog_timeout_ms; /* in ms, 0 to disable */
+	u8 bb_hang_rx_ofdm; /* true if bb hang due to rx_ofdm */
 
 	unsigned int paprd_target_power;
 	unsigned int paprd_training_power;
@@ -990,6 +991,7 @@ void ar9002_hw_enable_wep_aggregation(struct ath_hw *ah);
 void ar9003_hw_bb_watchdog_config(struct ath_hw *ah);
 void ar9003_hw_bb_watchdog_read(struct ath_hw *ah);
 void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah);
+void ar9003_hw_disable_phy_restart(struct ath_hw *ah);
 void ar9003_paprd_enable(struct ath_hw *ah, bool val);
 void ar9003_paprd_populate_single_table(struct ath_hw *ah,
 					struct ath9k_hw_cal_data *caldata,

+ 3 - 1
drivers/net/wireless/ath/ath9k/main.c

@@ -670,7 +670,8 @@ void ath9k_tasklet(unsigned long data)
 	u32 status = sc->intrstatus;
 	u32 rxmask;
 
-	if (status & ATH9K_INT_FATAL) {
+	if ((status & ATH9K_INT_FATAL) ||
+	    (status & ATH9K_INT_BB_WATCHDOG)) {
 		ath_reset(sc, true);
 		return;
 	}
@@ -737,6 +738,7 @@ irqreturn_t ath_isr(int irq, void *dev)
 {
 #define SCHED_INTR (				\
 		ATH9K_INT_FATAL |		\
+		ATH9K_INT_BB_WATCHDOG |		\
 		ATH9K_INT_RXORN |		\
 		ATH9K_INT_RXEOL |		\
 		ATH9K_INT_RX |			\

+ 2 - 1
drivers/net/wireless/ath/ath9k/rc.c

@@ -689,7 +689,8 @@ static void ath_rc_rate_set_series(const struct ath_rate_table *rate_table,
 
 	if (WLAN_RC_PHY_HT(rate_table->info[rix].phy)) {
 		rate->flags |= IEEE80211_TX_RC_MCS;
-		if (WLAN_RC_PHY_40(rate_table->info[rix].phy))
+		if (WLAN_RC_PHY_40(rate_table->info[rix].phy) &&
+		    conf_is_ht40(&txrc->hw->conf))
 			rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
 		if (WLAN_RC_PHY_SGI(rate_table->info[rix].phy))
 			rate->flags |= IEEE80211_TX_RC_SHORT_GI;

+ 1 - 1
drivers/net/wireless/b43/phy_n.c

@@ -3093,7 +3093,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
 	int freq;
 	bool avoid = false;
 	u8 length;
-	u16 tmp, core, type, count, max, numb, last, cmd;
+	u16 tmp, core, type, count, max, numb, last = 0, cmd;
 	const u16 *table;
 	bool phy6or5x;
 

+ 2 - 2
drivers/net/wireless/iwlegacy/iwl-4965-lib.c

@@ -628,11 +628,11 @@ void iwl4965_rx_reply_rx(struct iwl_priv *priv,
 
 	/* rx_status carries information about the packet to mac80211 */
 	rx_status.mactime = le64_to_cpu(phy_res->timestamp);
+	rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
+				IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
 	rx_status.freq =
 		ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
 							rx_status.band);
-	rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
-				IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
 	rx_status.rate_idx =
 		iwl4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
 	rx_status.flag = 0;

+ 1 - 1
drivers/net/wireless/iwlegacy/iwl-4965.c

@@ -1543,7 +1543,7 @@ static void iwl4965_temperature_calib(struct iwl_priv *priv)
 	s32 temp;
 
 	temp = iwl4965_hw_get_temperature(priv);
-	if (temp < 0)
+	if (IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(temp))
 		return;
 
 	if (priv->temperature != temp) {

+ 18 - 10
drivers/net/wireless/iwlwifi/iwl-6000.c

@@ -603,19 +603,27 @@ struct iwl_cfg iwl6050_2abg_cfg = {
 	IWL_DEVICE_6050,
 };
 
+#define IWL_DEVICE_6150						\
+	.fw_name_pre = IWL6050_FW_PRE,				\
+	.ucode_api_max = IWL6050_UCODE_API_MAX,			\
+	.ucode_api_min = IWL6050_UCODE_API_MIN,			\
+	.ops = &iwl6150_ops,					\
+	.eeprom_ver = EEPROM_6150_EEPROM_VERSION,		\
+	.eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION,	\
+	.base_params = &iwl6050_base_params,			\
+	.need_dc_calib = true,					\
+	.led_mode = IWL_LED_BLINK,				\
+	.internal_wimax_coex = true
+
 struct iwl_cfg iwl6150_bgn_cfg = {
 	.name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
-	.fw_name_pre = IWL6050_FW_PRE,
-	.ucode_api_max = IWL6050_UCODE_API_MAX,
-	.ucode_api_min = IWL6050_UCODE_API_MIN,
-	.eeprom_ver = EEPROM_6150_EEPROM_VERSION,
-	.eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION,
-	.ops = &iwl6150_ops,
-	.base_params = &iwl6050_base_params,
+	IWL_DEVICE_6150,
 	.ht_params = &iwl6000_ht_params,
-	.need_dc_calib = true,
-	.led_mode = IWL_LED_RF_STATE,
-	.internal_wimax_coex = true,
+};
+
+struct iwl_cfg iwl6150_bg_cfg = {
+	.name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BG",
+	IWL_DEVICE_6150,
 };
 
 struct iwl_cfg iwl6000_3agn_cfg = {

+ 3 - 3
drivers/net/wireless/iwlwifi/iwl-agn.c

@@ -3831,11 +3831,11 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
 
 /* 6150 WiFi/WiMax Series */
 	{IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_bgn_cfg)},
-	{IWL_PCI_DEVICE(0x0885, 0x1306, iwl6150_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0885, 0x1307, iwl6150_bg_cfg)},
 	{IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_bgn_cfg)},
-	{IWL_PCI_DEVICE(0x0885, 0x1326, iwl6150_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0885, 0x1327, iwl6150_bg_cfg)},
 	{IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_bgn_cfg)},
-	{IWL_PCI_DEVICE(0x0886, 0x1316, iwl6150_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0886, 0x1317, iwl6150_bg_cfg)},
 
 /* 1000 Series WiFi */
 	{IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)},

+ 1 - 0
drivers/net/wireless/iwlwifi/iwl-agn.h

@@ -89,6 +89,7 @@ extern struct iwl_cfg iwl6000_3agn_cfg;
 extern struct iwl_cfg iwl6050_2agn_cfg;
 extern struct iwl_cfg iwl6050_2abg_cfg;
 extern struct iwl_cfg iwl6150_bgn_cfg;
+extern struct iwl_cfg iwl6150_bg_cfg;
 extern struct iwl_cfg iwl1000_bgn_cfg;
 extern struct iwl_cfg iwl1000_bg_cfg;
 extern struct iwl_cfg iwl100_bgn_cfg;

+ 3 - 3
drivers/net/wireless/libertas/cmd.c

@@ -994,6 +994,8 @@ static void lbs_submit_command(struct lbs_private *priv,
 	cmd = cmdnode->cmdbuf;
 
 	spin_lock_irqsave(&priv->driver_lock, flags);
+	priv->seqnum++;
+	cmd->seqnum = cpu_to_le16(priv->seqnum);
 	priv->cur_cmd = cmdnode;
 	spin_unlock_irqrestore(&priv->driver_lock, flags);
 
@@ -1621,11 +1623,9 @@ struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv,
 	/* Copy the incoming command to the buffer */
 	memcpy(cmdnode->cmdbuf, in_cmd, in_cmd_size);
 
-	/* Set sequence number, clean result, move to buffer */
-	priv->seqnum++;
+	/* Set command, clean result, move to buffer */
 	cmdnode->cmdbuf->command = cpu_to_le16(command);
 	cmdnode->cmdbuf->size    = cpu_to_le16(in_cmd_size);
-	cmdnode->cmdbuf->seqnum  = cpu_to_le16(priv->seqnum);
 	cmdnode->cmdbuf->result  = 0;
 
 	lbs_deb_host("PREP_CMD: command 0x%04x\n", command);

+ 2 - 2
drivers/net/wireless/mwifiex/sdio.h

@@ -167,8 +167,8 @@
 /* Rx unit register */
 #define CARD_RX_UNIT_REG		0x63
 
-/* Event header Len*/
-#define MWIFIEX_EVENT_HEADER_LEN           8
+/* Event header len w/o 4 bytes of interface header */
+#define MWIFIEX_EVENT_HEADER_LEN           4
 
 /* Max retry number of CMD53 write */
 #define MAX_WRITE_IOMEM_RETRY		2

+ 0 - 1
drivers/net/wireless/rt2x00/Kconfig

@@ -166,7 +166,6 @@ config RT2800USB_RT35XX
 config RT2800USB_RT53XX
        bool "rt2800usb - Include support for rt53xx devices (EXPERIMENTAL)"
        depends on EXPERIMENTAL
-       default y
        ---help---
          This adds support for rt53xx wireless chipset family to the
          rt2800pci driver.

+ 22 - 13
drivers/net/wireless/rtlwifi/pci.c

@@ -669,11 +669,6 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
 							 &rx_status,
 							 (u8 *) pdesc, skb);
 
-			pci_unmap_single(rtlpci->pdev,
-					 *((dma_addr_t *) skb->cb),
-					 rtlpci->rxbuffersize,
-					 PCI_DMA_FROMDEVICE);
-
 			skb_put(skb, rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
 							 false,
 							 HW_DESC_RXPKT_LEN));
@@ -690,6 +685,21 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
 			hdr = rtl_get_hdr(skb);
 			fc = rtl_get_fc(skb);
 
+			/* try for new buffer - if allocation fails, drop
+			 * frame and reuse old buffer
+			 */
+			new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
+			if (unlikely(!new_skb)) {
+				RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
+					 DBG_DMESG,
+					 ("can't alloc skb for rx\n"));
+				goto done;
+			}
+			pci_unmap_single(rtlpci->pdev,
+					 *((dma_addr_t *) skb->cb),
+					 rtlpci->rxbuffersize,
+					 PCI_DMA_FROMDEVICE);
+
 			if (!stats.crc || !stats.hwerror) {
 				memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
 				       sizeof(rx_status));
@@ -758,15 +768,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
 				rtl_lps_leave(hw);
 			}
 
-			new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
-			if (unlikely(!new_skb)) {
-				RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
-					 DBG_DMESG,
-					 ("can't alloc skb for rx\n"));
-				goto done;
-			}
 			skb = new_skb;
-			/*skb->dev = dev; */
 
 			rtlpci->rx_ring[rx_queue_idx].rx_buf[rtlpci->
 							     rx_ring
@@ -1113,6 +1115,13 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
 
 		rtlpci->rx_ring[rx_queue_idx].idx = 0;
 
+		/* If amsdu_8k is disabled, set buffersize to 4096. This
+		 * change will reduce memory fragmentation.
+		 */
+		if (rtlpci->rxbuffersize > 4096 &&
+		    rtlpriv->rtlhal.disable_amsdu_8k)
+			rtlpci->rxbuffersize = 4096;
+
 		for (i = 0; i < rtlpci->rxringcount; i++) {
 			struct sk_buff *skb =
 			    dev_alloc_skb(rtlpci->rxbuffersize);

+ 3 - 0
drivers/net/wireless/wl12xx/conf.h

@@ -1157,6 +1157,9 @@ struct conf_sched_scan_settings {
 	/* time to wait on the channel for passive scans (in TUs) */
 	u32 dwell_time_passive;
 
+	/* time to wait on the channel for DFS scans (in TUs) */
+	u32 dwell_time_dfs;
+
 	/* number of probe requests to send on each channel in active scans */
 	u8 num_probe_reqs;
 

+ 1 - 0
drivers/net/wireless/wl12xx/main.c

@@ -311,6 +311,7 @@ static struct conf_drv_settings default_conf = {
 		.min_dwell_time_active = 8,
 		.max_dwell_time_active = 30,
 		.dwell_time_passive    = 100,
+		.dwell_time_dfs        = 150,
 		.num_probe_reqs        = 2,
 		.rssi_threshold        = -90,
 		.snr_threshold         = 0,

+ 33 - 16
drivers/net/wireless/wl12xx/scan.c

@@ -331,16 +331,22 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
 	struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
 	int i, j;
 	u32 flags;
+	bool force_passive = !req->n_ssids;
 
 	for (i = 0, j = start;
 	     i < req->n_channels && j < MAX_CHANNELS_ALL_BANDS;
 	     i++) {
 		flags = req->channels[i]->flags;
 
-		if (!(flags & IEEE80211_CHAN_DISABLED) &&
-		    ((flags & IEEE80211_CHAN_PASSIVE_SCAN) == passive) &&
-		    ((flags & IEEE80211_CHAN_RADAR) == radar) &&
-		    (req->channels[i]->band == band)) {
+		if (force_passive)
+			flags |= IEEE80211_CHAN_PASSIVE_SCAN;
+
+		if ((req->channels[i]->band == band) &&
+		    !(flags & IEEE80211_CHAN_DISABLED) &&
+		    (!!(flags & IEEE80211_CHAN_RADAR) == radar) &&
+		    /* if radar is set, we ignore the passive flag */
+		    (radar ||
+		     !!(flags & IEEE80211_CHAN_PASSIVE_SCAN) == passive)) {
 			wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ",
 				     req->channels[i]->band,
 				     req->channels[i]->center_freq);
@@ -350,7 +356,12 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
 			wl1271_debug(DEBUG_SCAN, "max_power %d",
 				     req->channels[i]->max_power);
 
-			if (flags & IEEE80211_CHAN_PASSIVE_SCAN) {
+			if (flags & IEEE80211_CHAN_RADAR) {
+				channels[j].flags |= SCAN_CHANNEL_FLAGS_DFS;
+				channels[j].passive_duration =
+					cpu_to_le16(c->dwell_time_dfs);
+			}
+			else if (flags & IEEE80211_CHAN_PASSIVE_SCAN) {
 				channels[j].passive_duration =
 					cpu_to_le16(c->dwell_time_passive);
 			} else {
@@ -359,7 +370,7 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
 				channels[j].max_duration =
 					cpu_to_le16(c->max_dwell_time_active);
 			}
-			channels[j].tx_power_att = req->channels[j]->max_power;
+			channels[j].tx_power_att = req->channels[i]->max_power;
 			channels[j].channel = req->channels[i]->hw_value;
 
 			j++;
@@ -386,7 +397,11 @@ wl1271_scan_sched_scan_channels(struct wl1271 *wl,
 		wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
 						    IEEE80211_BAND_2GHZ,
 						    false, false, idx);
-	idx += cfg->active[0];
+	/*
+	 * 5GHz channels always start at position 14, not immediately
+	 * after the last 2.4GHz channel
+	 */
+	idx = 14;
 
 	cfg->passive[1] =
 		wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
@@ -394,22 +409,23 @@ wl1271_scan_sched_scan_channels(struct wl1271 *wl,
 						    false, true, idx);
 	idx += cfg->passive[1];
 
-	cfg->active[1] =
+	cfg->dfs =
 		wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
 						    IEEE80211_BAND_5GHZ,
-						    false, false, 14);
-	idx += cfg->active[1];
+						    true, true, idx);
+	idx += cfg->dfs;
 
-	cfg->dfs =
+	cfg->active[1] =
 		wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
 						    IEEE80211_BAND_5GHZ,
-						    true, false, idx);
-	idx += cfg->dfs;
+						    false, false, idx);
+	idx += cfg->active[1];
 
 	wl1271_debug(DEBUG_SCAN, "    2.4GHz: active %d passive %d",
 		     cfg->active[0], cfg->passive[0]);
 	wl1271_debug(DEBUG_SCAN, "    5GHz: active %d passive %d",
 		     cfg->active[1], cfg->passive[1]);
+	wl1271_debug(DEBUG_SCAN, "    DFS: %d", cfg->dfs);
 
 	return idx;
 }
@@ -421,6 +437,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
 	struct wl1271_cmd_sched_scan_config *cfg = NULL;
 	struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
 	int i, total_channels, ret;
+	bool force_passive = !req->n_ssids;
 
 	wl1271_debug(DEBUG_CMD, "cmd sched_scan scan config");
 
@@ -444,7 +461,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
 	for (i = 0; i < SCAN_MAX_CYCLE_INTERVALS; i++)
 		cfg->intervals[i] = cpu_to_le32(req->interval);
 
-	if (req->ssids[0].ssid_len && req->ssids[0].ssid) {
+	if (!force_passive && req->ssids[0].ssid_len && req->ssids[0].ssid) {
 		cfg->filter_type = SCAN_SSID_FILTER_SPECIFIC;
 		cfg->ssid_len = req->ssids[0].ssid_len;
 		memcpy(cfg->ssid, req->ssids[0].ssid,
@@ -461,7 +478,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
 		goto out;
 	}
 
-	if (cfg->active[0]) {
+	if (!force_passive && cfg->active[0]) {
 		ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid,
 						 req->ssids[0].ssid_len,
 						 ies->ie[IEEE80211_BAND_2GHZ],
@@ -473,7 +490,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
 		}
 	}
 
-	if (cfg->active[1]) {
+	if (!force_passive && cfg->active[1]) {
 		ret = wl1271_cmd_build_probe_req(wl,  req->ssids[0].ssid,
 						 req->ssids[0].ssid_len,
 						 ies->ie[IEEE80211_BAND_5GHZ],

+ 3 - 0
drivers/net/wireless/wl12xx/scan.h

@@ -137,6 +137,9 @@ enum {
 	SCAN_BSS_TYPE_ANY,
 };
 
+#define SCAN_CHANNEL_FLAGS_DFS		BIT(0)
+#define SCAN_CHANNEL_FLAGS_DFS_ENABLED	BIT(1)
+
 struct conn_scan_ch_params {
 	__le16 min_duration;
 	__le16 max_duration;

+ 42 - 11
drivers/net/wireless/zd1211rw/zd_usb.c

@@ -1533,6 +1533,31 @@ static void __exit usb_exit(void)
 module_init(usb_init);
 module_exit(usb_exit);
 
+static int zd_ep_regs_out_msg(struct usb_device *udev, void *data, int len,
+			      int *actual_length, int timeout)
+{
+	/* In USB 2.0 mode EP_REGS_OUT endpoint is interrupt type. However in
+	 * USB 1.1 mode endpoint is bulk. Select correct type URB by endpoint
+	 * descriptor.
+	 */
+	struct usb_host_endpoint *ep;
+	unsigned int pipe;
+
+	pipe = usb_sndintpipe(udev, EP_REGS_OUT);
+	ep = usb_pipe_endpoint(udev, pipe);
+	if (!ep)
+		return -EINVAL;
+
+	if (usb_endpoint_xfer_int(&ep->desc)) {
+		return usb_interrupt_msg(udev, pipe, data, len,
+					 actual_length, timeout);
+	} else {
+		pipe = usb_sndbulkpipe(udev, EP_REGS_OUT);
+		return usb_bulk_msg(udev, pipe, data, len, actual_length,
+				    timeout);
+	}
+}
+
 static int usb_int_regs_length(unsigned int count)
 {
 	return sizeof(struct usb_int_regs) + count * sizeof(struct reg_data);
@@ -1648,15 +1673,14 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
 
 	udev = zd_usb_to_usbdev(usb);
 	prepare_read_regs_int(usb);
-	r = usb_interrupt_msg(udev, usb_sndintpipe(udev, EP_REGS_OUT),
-			      req, req_len, &actual_req_len, 50 /* ms */);
+	r = zd_ep_regs_out_msg(udev, req, req_len, &actual_req_len, 50 /*ms*/);
 	if (r) {
 		dev_dbg_f(zd_usb_dev(usb),
-			"error in usb_interrupt_msg(). Error number %d\n", r);
+			"error in zd_ep_regs_out_msg(). Error number %d\n", r);
 		goto error;
 	}
 	if (req_len != actual_req_len) {
-		dev_dbg_f(zd_usb_dev(usb), "error in usb_interrupt_msg()\n"
+		dev_dbg_f(zd_usb_dev(usb), "error in zd_ep_regs_out_msg()\n"
 			" req_len %d != actual_req_len %d\n",
 			req_len, actual_req_len);
 		r = -EIO;
@@ -1818,9 +1842,17 @@ int zd_usb_iowrite16v_async(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
 		rw->value = cpu_to_le16(ioreqs[i].value);
 	}
 
-	usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, EP_REGS_OUT),
-			 req, req_len, iowrite16v_urb_complete, usb,
-			 ep->desc.bInterval);
+	/* In USB 2.0 mode endpoint is interrupt type. However in USB 1.1 mode
+	 * endpoint is bulk. Select correct type URB by endpoint descriptor.
+	 */
+	if (usb_endpoint_xfer_int(&ep->desc))
+		usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, EP_REGS_OUT),
+				 req, req_len, iowrite16v_urb_complete, usb,
+				 ep->desc.bInterval);
+	else
+		usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_REGS_OUT),
+				  req, req_len, iowrite16v_urb_complete, usb);
+
 	urb->transfer_flags |= URB_FREE_BUFFER;
 
 	/* Submit previous URB */
@@ -1924,15 +1956,14 @@ int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
 	}
 
 	udev = zd_usb_to_usbdev(usb);
-	r = usb_interrupt_msg(udev, usb_sndintpipe(udev, EP_REGS_OUT),
-			      req, req_len, &actual_req_len, 50 /* ms */);
+	r = zd_ep_regs_out_msg(udev, req, req_len, &actual_req_len, 50 /*ms*/);
 	if (r) {
 		dev_dbg_f(zd_usb_dev(usb),
-			"error in usb_interrupt_msg(). Error number %d\n", r);
+			"error in zd_ep_regs_out_msg(). Error number %d\n", r);
 		goto out;
 	}
 	if (req_len != actual_req_len) {
-		dev_dbg_f(zd_usb_dev(usb), "error in usb_interrupt_msg()"
+		dev_dbg_f(zd_usb_dev(usb), "error in zd_ep_regs_out_msg()"
 			" req_len %d != actual_req_len %d\n",
 			req_len, actual_req_len);
 		r = -EIO;

+ 1 - 1
drivers/rtc/rtc-m41t93.c

@@ -189,7 +189,7 @@ static int __devinit m41t93_probe(struct spi_device *spi)
 
 static int __devexit m41t93_remove(struct spi_device *spi)
 {
-	struct rtc_device *rtc = platform_get_drvdata(spi);
+	struct rtc_device *rtc = spi_get_drvdata(spi);
 
 	if (rtc)
 		rtc_device_unregister(rtc);

+ 1 - 1
drivers/scsi/scsi_scan.c

@@ -297,7 +297,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
 		kfree(sdev);
 		goto out;
 	}
-
+	blk_get_queue(sdev->request_queue);
 	sdev->request_queue->queuedata = sdev;
 	scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
 

+ 1 - 0
drivers/scsi/scsi_sysfs.c

@@ -322,6 +322,7 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
 		kfree(evt);
 	}
 
+	blk_put_queue(sdev->request_queue);
 	/* NULL queue means the device can't be used */
 	sdev->request_queue = NULL;
 

+ 291 - 12
drivers/spi/spi-ep93xx.c

@@ -1,7 +1,7 @@
 /*
  * Driver for Cirrus Logic EP93xx SPI controller.
  *
- * Copyright (c) 2010 Mika Westerberg
+ * Copyright (C) 2010-2011 Mika Westerberg
  *
  * Explicit FIFO handling code was inspired by amba-pl022 driver.
  *
@@ -21,13 +21,16 @@
 #include <linux/err.h>
 #include <linux/delay.h>
 #include <linux/device.h>
+#include <linux/dmaengine.h>
 #include <linux/bitops.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/workqueue.h>
 #include <linux/sched.h>
+#include <linux/scatterlist.h>
 #include <linux/spi/spi.h>
 
+#include <mach/dma.h>
 #include <mach/ep93xx_spi.h>
 
 #define SSPCR0			0x0000
@@ -71,6 +74,7 @@
  * @pdev: pointer to platform device
  * @clk: clock for the controller
  * @regs_base: pointer to ioremap()'d registers
+ * @sspdr_phys: physical address of the SSPDR register
  * @irq: IRQ number used by the driver
  * @min_rate: minimum clock rate (in Hz) supported by the controller
  * @max_rate: maximum clock rate (in Hz) supported by the controller
@@ -84,6 +88,14 @@
  * @rx: current byte in transfer to receive
  * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one
  *              frame decreases this level and sending one frame increases it.
+ * @dma_rx: RX DMA channel
+ * @dma_tx: TX DMA channel
+ * @dma_rx_data: RX parameters passed to the DMA engine
+ * @dma_tx_data: TX parameters passed to the DMA engine
+ * @rx_sgt: sg table for RX transfers
+ * @tx_sgt: sg table for TX transfers
+ * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by
+ *            the client
  *
  * This structure holds EP93xx SPI controller specific information. When
  * @running is %true, driver accepts transfer requests from protocol drivers.
@@ -100,6 +112,7 @@ struct ep93xx_spi {
 	const struct platform_device	*pdev;
 	struct clk			*clk;
 	void __iomem			*regs_base;
+	unsigned long			sspdr_phys;
 	int				irq;
 	unsigned long			min_rate;
 	unsigned long			max_rate;
@@ -112,6 +125,13 @@ struct ep93xx_spi {
 	size_t				tx;
 	size_t				rx;
 	size_t				fifo_level;
+	struct dma_chan			*dma_rx;
+	struct dma_chan			*dma_tx;
+	struct ep93xx_dma_data		dma_rx_data;
+	struct ep93xx_dma_data		dma_tx_data;
+	struct sg_table			rx_sgt;
+	struct sg_table			tx_sgt;
+	void				*zeropage;
 };
 
 /**
@@ -496,14 +516,195 @@ static int ep93xx_spi_read_write(struct ep93xx_spi *espi)
 		espi->fifo_level++;
 	}
 
-	if (espi->rx == t->len) {
-		msg->actual_length += t->len;
+	if (espi->rx == t->len)
 		return 0;
-	}
 
 	return -EINPROGRESS;
 }
 
+static void ep93xx_spi_pio_transfer(struct ep93xx_spi *espi)
+{
+	/*
+	 * Now everything is set up for the current transfer. We prime the TX
+	 * FIFO, enable interrupts, and wait for the transfer to complete.
+	 */
+	if (ep93xx_spi_read_write(espi)) {
+		ep93xx_spi_enable_interrupts(espi);
+		wait_for_completion(&espi->wait);
+	}
+}
+
+/**
+ * ep93xx_spi_dma_prepare() - prepares a DMA transfer
+ * @espi: ep93xx SPI controller struct
+ * @dir: DMA transfer direction
+ *
+ * Function configures the DMA, maps the buffer and prepares the DMA
+ * descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR
+ * in case of failure.
+ */
+static struct dma_async_tx_descriptor *
+ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
+{
+	struct spi_transfer *t = espi->current_msg->state;
+	struct dma_async_tx_descriptor *txd;
+	enum dma_slave_buswidth buswidth;
+	struct dma_slave_config conf;
+	struct scatterlist *sg;
+	struct sg_table *sgt;
+	struct dma_chan *chan;
+	const void *buf, *pbuf;
+	size_t len = t->len;
+	int i, ret, nents;
+
+	if (bits_per_word(espi) > 8)
+		buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+	else
+		buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+
+	memset(&conf, 0, sizeof(conf));
+	conf.direction = dir;
+
+	if (dir == DMA_FROM_DEVICE) {
+		chan = espi->dma_rx;
+		buf = t->rx_buf;
+		sgt = &espi->rx_sgt;
+
+		conf.src_addr = espi->sspdr_phys;
+		conf.src_addr_width = buswidth;
+	} else {
+		chan = espi->dma_tx;
+		buf = t->tx_buf;
+		sgt = &espi->tx_sgt;
+
+		conf.dst_addr = espi->sspdr_phys;
+		conf.dst_addr_width = buswidth;
+	}
+
+	ret = dmaengine_slave_config(chan, &conf);
+	if (ret)
+		return ERR_PTR(ret);
+
+	/*
+	 * We need to split the transfer into PAGE_SIZE'd chunks. This is
+	 * because we are using @espi->zeropage to provide a zero RX buffer
+	 * for the TX transfers and we have only allocated one page for that.
+	 *
+	 * For performance reasons we allocate a new sg_table only when
+	 * needed. Otherwise we will re-use the current one. Eventually the
+	 * last sg_table is released in ep93xx_spi_release_dma().
+	 */
+
+	nents = DIV_ROUND_UP(len, PAGE_SIZE);
+	if (nents != sgt->nents) {
+		sg_free_table(sgt);
+
+		ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
+		if (ret)
+			return ERR_PTR(ret);
+	}
+
+	pbuf = buf;
+	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+		size_t bytes = min_t(size_t, len, PAGE_SIZE);
+
+		if (buf) {
+			sg_set_page(sg, virt_to_page(pbuf), bytes,
+				    offset_in_page(pbuf));
+		} else {
+			sg_set_page(sg, virt_to_page(espi->zeropage),
+				    bytes, 0);
+		}
+
+		pbuf += bytes;
+		len -= bytes;
+	}
+
+	if (WARN_ON(len)) {
+		dev_warn(&espi->pdev->dev, "len = %d expected 0!", len);
+		return ERR_PTR(-EINVAL);
+	}
+
+	nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
+	if (!nents)
+		return ERR_PTR(-ENOMEM);
+
+	txd = chan->device->device_prep_slave_sg(chan, sgt->sgl, nents,
+						 dir, DMA_CTRL_ACK);
+	if (!txd) {
+		dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
+		return ERR_PTR(-ENOMEM);
+	}
+	return txd;
+}
+
+/**
+ * ep93xx_spi_dma_finish() - finishes with a DMA transfer
+ * @espi: ep93xx SPI controller struct
+ * @dir: DMA transfer direction
+ *
+ * Function finishes with the DMA transfer. After this, the DMA buffer is
+ * unmapped.
+ */
+static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi,
+				  enum dma_data_direction dir)
+{
+	struct dma_chan *chan;
+	struct sg_table *sgt;
+
+	if (dir == DMA_FROM_DEVICE) {
+		chan = espi->dma_rx;
+		sgt = &espi->rx_sgt;
+	} else {
+		chan = espi->dma_tx;
+		sgt = &espi->tx_sgt;
+	}
+
+	dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
+}
+
+static void ep93xx_spi_dma_callback(void *callback_param)
+{
+	complete(callback_param);
+}
+
+static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi)
+{
+	struct spi_message *msg = espi->current_msg;
+	struct dma_async_tx_descriptor *rxd, *txd;
+
+	rxd = ep93xx_spi_dma_prepare(espi, DMA_FROM_DEVICE);
+	if (IS_ERR(rxd)) {
+		dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
+		msg->status = PTR_ERR(rxd);
+		return;
+	}
+
+	txd = ep93xx_spi_dma_prepare(espi, DMA_TO_DEVICE);
+	if (IS_ERR(txd)) {
+		ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE);
+		dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd));
+		msg->status = PTR_ERR(txd);
+		return;
+	}
+
+	/* We are ready when RX is done */
+	rxd->callback = ep93xx_spi_dma_callback;
+	rxd->callback_param = &espi->wait;
+
+	/* Now submit both descriptors and wait while they finish */
+	dmaengine_submit(rxd);
+	dmaengine_submit(txd);
+
+	dma_async_issue_pending(espi->dma_rx);
+	dma_async_issue_pending(espi->dma_tx);
+
+	wait_for_completion(&espi->wait);
+
+	ep93xx_spi_dma_finish(espi, DMA_TO_DEVICE);
+	ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE);
+}
+
 /**
  * ep93xx_spi_process_transfer() - processes one SPI transfer
  * @espi: ep93xx SPI controller struct
@@ -556,13 +757,14 @@ static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi,
 	espi->tx = 0;
 
 	/*
-	 * Now everything is set up for the current transfer. We prime the TX
-	 * FIFO, enable interrupts, and wait for the transfer to complete.
+	 * There is no point of setting up DMA for the transfers which will
+	 * fit into the FIFO and can be transferred with a single interrupt.
+	 * So in these cases we will be using PIO and don't bother for DMA.
 	 */
-	if (ep93xx_spi_read_write(espi)) {
-		ep93xx_spi_enable_interrupts(espi);
-		wait_for_completion(&espi->wait);
-	}
+	if (espi->dma_rx && t->len > SPI_FIFO_SIZE)
+		ep93xx_spi_dma_transfer(espi);
+	else
+		ep93xx_spi_pio_transfer(espi);
 
 	/*
 	 * In case of error during transmit, we bail out from processing
@@ -571,6 +773,8 @@ static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi,
 	if (msg->status)
 		return;
 
+	msg->actual_length += t->len;
+
 	/*
 	 * After this transfer is finished, perform any possible
 	 * post-transfer actions requested by the protocol driver.
@@ -752,6 +956,75 @@ static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
 	return IRQ_HANDLED;
 }
 
+static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param)
+{
+	if (ep93xx_dma_chan_is_m2p(chan))
+		return false;
+
+	chan->private = filter_param;
+	return true;
+}
+
+static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
+{
+	dma_cap_mask_t mask;
+	int ret;
+
+	espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL);
+	if (!espi->zeropage)
+		return -ENOMEM;
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+
+	espi->dma_rx_data.port = EP93XX_DMA_SSP;
+	espi->dma_rx_data.direction = DMA_FROM_DEVICE;
+	espi->dma_rx_data.name = "ep93xx-spi-rx";
+
+	espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter,
+					   &espi->dma_rx_data);
+	if (!espi->dma_rx) {
+		ret = -ENODEV;
+		goto fail_free_page;
+	}
+
+	espi->dma_tx_data.port = EP93XX_DMA_SSP;
+	espi->dma_tx_data.direction = DMA_TO_DEVICE;
+	espi->dma_tx_data.name = "ep93xx-spi-tx";
+
+	espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter,
+					   &espi->dma_tx_data);
+	if (!espi->dma_tx) {
+		ret = -ENODEV;
+		goto fail_release_rx;
+	}
+
+	return 0;
+
+fail_release_rx:
+	dma_release_channel(espi->dma_rx);
+	espi->dma_rx = NULL;
+fail_free_page:
+	free_page((unsigned long)espi->zeropage);
+
+	return ret;
+}
+
+static void ep93xx_spi_release_dma(struct ep93xx_spi *espi)
+{
+	if (espi->dma_rx) {
+		dma_release_channel(espi->dma_rx);
+		sg_free_table(&espi->rx_sgt);
+	}
+	if (espi->dma_tx) {
+		dma_release_channel(espi->dma_tx);
+		sg_free_table(&espi->tx_sgt);
+	}
+
+	if (espi->zeropage)
+		free_page((unsigned long)espi->zeropage);
+}
+
 static int __init ep93xx_spi_probe(struct platform_device *pdev)
 {
 	struct spi_master *master;
@@ -818,6 +1091,7 @@ static int __init ep93xx_spi_probe(struct platform_device *pdev)
 		goto fail_put_clock;
 	}
 
+	espi->sspdr_phys = res->start + SSPDR;
 	espi->regs_base = ioremap(res->start, resource_size(res));
 	if (!espi->regs_base) {
 		dev_err(&pdev->dev, "failed to map resources\n");
@@ -832,10 +1106,13 @@ static int __init ep93xx_spi_probe(struct platform_device *pdev)
 		goto fail_unmap_regs;
 	}
 
+	if (info->use_dma && ep93xx_spi_setup_dma(espi))
+		dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n");
+
 	espi->wq = create_singlethread_workqueue("ep93xx_spid");
 	if (!espi->wq) {
 		dev_err(&pdev->dev, "unable to create workqueue\n");
-		goto fail_free_irq;
+		goto fail_free_dma;
 	}
 	INIT_WORK(&espi->msg_work, ep93xx_spi_work);
 	INIT_LIST_HEAD(&espi->msg_queue);
@@ -857,7 +1134,8 @@ static int __init ep93xx_spi_probe(struct platform_device *pdev)
 
 fail_free_queue:
 	destroy_workqueue(espi->wq);
-fail_free_irq:
+fail_free_dma:
+	ep93xx_spi_release_dma(espi);
 	free_irq(espi->irq, espi);
 fail_unmap_regs:
 	iounmap(espi->regs_base);
@@ -901,6 +1179,7 @@ static int __exit ep93xx_spi_remove(struct platform_device *pdev)
 	}
 	spin_unlock_irq(&espi->lock);
 
+	ep93xx_spi_release_dma(espi);
 	free_irq(espi->irq, espi);
 	iounmap(espi->regs_base);
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);

+ 2 - 4
drivers/tty/n_gsm.c

@@ -2128,8 +2128,8 @@ static void gsmld_detach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
 	gsm->tty = NULL;
 }
 
-static unsigned int gsmld_receive_buf(struct tty_struct *tty,
-		const unsigned char *cp, char *fp, int count)
+static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+			      char *fp, int count)
 {
 	struct gsm_mux *gsm = tty->disc_data;
 	const unsigned char *dp;
@@ -2162,8 +2162,6 @@ static unsigned int gsmld_receive_buf(struct tty_struct *tty,
 	}
 	/* FASYNC if needed ? */
 	/* If clogged call tty_throttle(tty); */
-
-	return count;
 }
 
 /**

+ 8 - 10
drivers/tty/n_hdlc.c

@@ -188,8 +188,8 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
 				    poll_table *wait);
 static int n_hdlc_tty_open(struct tty_struct *tty);
 static void n_hdlc_tty_close(struct tty_struct *tty);
-static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
-		const __u8 *cp, char *fp, int count);
+static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *cp,
+			       char *fp, int count);
 static void n_hdlc_tty_wakeup(struct tty_struct *tty);
 
 #define bset(p,b)	((p)[(b) >> 5] |= (1 << ((b) & 0x1f)))
@@ -509,8 +509,8 @@ static void n_hdlc_tty_wakeup(struct tty_struct *tty)
  * Called by tty low level driver when receive data is available. Data is
  * interpreted as one HDLC frame.
  */
-static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
-		const __u8 *data, char *flags, int count)
+static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data,
+			       char *flags, int count)
 {
 	register struct n_hdlc *n_hdlc = tty2n_hdlc (tty);
 	register struct n_hdlc_buf *buf;
@@ -521,20 +521,20 @@ static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
 		
 	/* This can happen if stuff comes in on the backup tty */
 	if (!n_hdlc || tty != n_hdlc->tty)
-		return -ENODEV;
+		return;
 		
 	/* verify line is using HDLC discipline */
 	if (n_hdlc->magic != HDLC_MAGIC) {
 		printk("%s(%d) line not using HDLC discipline\n",
 			__FILE__,__LINE__);
-		return -EINVAL;
+		return;
 	}
 	
 	if ( count>maxframe ) {
 		if (debuglevel >= DEBUG_LEVEL_INFO)	
 			printk("%s(%d) rx count>maxframesize, data discarded\n",
 			       __FILE__,__LINE__);
-		return -EINVAL;
+		return;
 	}
 
 	/* get a free HDLC buffer */	
@@ -550,7 +550,7 @@ static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
 		if (debuglevel >= DEBUG_LEVEL_INFO)	
 			printk("%s(%d) no more rx buffers, data discarded\n",
 			       __FILE__,__LINE__);
-		return -EINVAL;
+		return;
 	}
 		
 	/* copy received data to HDLC buffer */
@@ -565,8 +565,6 @@ static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
 	if (n_hdlc->tty->fasync != NULL)
 		kill_fasync (&n_hdlc->tty->fasync, SIGIO, POLL_IN);
 
-	return count;
-
 }	/* end of n_hdlc_tty_receive() */
 
 /**

+ 4 - 6
drivers/tty/n_r3964.c

@@ -139,8 +139,8 @@ static int r3964_ioctl(struct tty_struct *tty, struct file *file,
 static void r3964_set_termios(struct tty_struct *tty, struct ktermios *old);
 static unsigned int r3964_poll(struct tty_struct *tty, struct file *file,
 		struct poll_table_struct *wait);
-static unsigned int r3964_receive_buf(struct tty_struct *tty,
-		const unsigned char *cp, char *fp, int count);
+static void r3964_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+		char *fp, int count);
 
 static struct tty_ldisc_ops tty_ldisc_N_R3964 = {
 	.owner = THIS_MODULE,
@@ -1239,8 +1239,8 @@ static unsigned int r3964_poll(struct tty_struct *tty, struct file *file,
 	return result;
 }
 
-static unsigned int r3964_receive_buf(struct tty_struct *tty,
-		const unsigned char *cp, char *fp, int count)
+static void r3964_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+			char *fp, int count)
 {
 	struct r3964_info *pInfo = tty->disc_data;
 	const unsigned char *p;
@@ -1257,8 +1257,6 @@ static unsigned int r3964_receive_buf(struct tty_struct *tty,
 		}
 
 	}
-
-	return count;
 }
 
 MODULE_LICENSE("GPL");

+ 47 - 14
drivers/tty/n_tty.c

@@ -81,6 +81,38 @@ static inline int tty_put_user(struct tty_struct *tty, unsigned char x,
 	return put_user(x, ptr);
 }
 
+/**
+ *	n_tty_set__room	-	receive space
+ *	@tty: terminal
+ *
+ *	Called by the driver to find out how much data it is
+ *	permitted to feed to the line discipline without any being lost
+ *	and thus to manage flow control. Not serialized. Answers for the
+ *	"instant".
+ */
+
+static void n_tty_set_room(struct tty_struct *tty)
+{
+	/* tty->read_cnt is not read locked ? */
+	int	left = N_TTY_BUF_SIZE - tty->read_cnt - 1;
+	int old_left;
+
+	/*
+	 * If we are doing input canonicalization, and there are no
+	 * pending newlines, let characters through without limit, so
+	 * that erase characters will be handled.  Other excess
+	 * characters will be beeped.
+	 */
+	if (left <= 0)
+		left = tty->icanon && !tty->canon_data;
+	old_left = tty->receive_room;
+	tty->receive_room = left;
+
+	/* Did this open up the receive buffer? We may need to flip */
+	if (left && !old_left)
+		schedule_work(&tty->buf.work);
+}
+
 static void put_tty_queue_nolock(unsigned char c, struct tty_struct *tty)
 {
 	if (tty->read_cnt < N_TTY_BUF_SIZE) {
@@ -152,6 +184,7 @@ static void reset_buffer_flags(struct tty_struct *tty)
 
 	tty->canon_head = tty->canon_data = tty->erasing = 0;
 	memset(&tty->read_flags, 0, sizeof tty->read_flags);
+	n_tty_set_room(tty);
 	check_unthrottle(tty);
 }
 
@@ -1327,19 +1360,17 @@ static void n_tty_write_wakeup(struct tty_struct *tty)
  *	calls one at a time and in order (or using flush_to_ldisc)
  */
 
-static unsigned int n_tty_receive_buf(struct tty_struct *tty,
-		const unsigned char *cp, char *fp, int count)
+static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+			      char *fp, int count)
 {
 	const unsigned char *p;
 	char *f, flags = TTY_NORMAL;
 	int	i;
 	char	buf[64];
 	unsigned long cpuflags;
-	int left;
-	int ret = 0;
 
 	if (!tty->read_buf)
-		return 0;
+		return;
 
 	if (tty->real_raw) {
 		spin_lock_irqsave(&tty->read_lock, cpuflags);
@@ -1349,7 +1380,6 @@ static unsigned int n_tty_receive_buf(struct tty_struct *tty,
 		memcpy(tty->read_buf + tty->read_head, cp, i);
 		tty->read_head = (tty->read_head + i) & (N_TTY_BUF_SIZE-1);
 		tty->read_cnt += i;
-		ret += i;
 		cp += i;
 		count -= i;
 
@@ -1359,10 +1389,8 @@ static unsigned int n_tty_receive_buf(struct tty_struct *tty,
 		memcpy(tty->read_buf + tty->read_head, cp, i);
 		tty->read_head = (tty->read_head + i) & (N_TTY_BUF_SIZE-1);
 		tty->read_cnt += i;
-		ret += i;
 		spin_unlock_irqrestore(&tty->read_lock, cpuflags);
 	} else {
-		ret = count;
 		for (i = count, p = cp, f = fp; i; i--, p++) {
 			if (f)
 				flags = *f++;
@@ -1390,6 +1418,8 @@ static unsigned int n_tty_receive_buf(struct tty_struct *tty,
 			tty->ops->flush_chars(tty);
 	}
 
+	n_tty_set_room(tty);
+
 	if ((!tty->icanon && (tty->read_cnt >= tty->minimum_to_wake)) ||
 		L_EXTPROC(tty)) {
 		kill_fasync(&tty->fasync, SIGIO, POLL_IN);
@@ -1402,12 +1432,8 @@ static unsigned int n_tty_receive_buf(struct tty_struct *tty,
 	 * mode.  We don't want to throttle the driver if we're in
 	 * canonical mode and don't have a newline yet!
 	 */
-	left = N_TTY_BUF_SIZE - tty->read_cnt - 1;
-
-	if (left < TTY_THRESHOLD_THROTTLE)
+	if (tty->receive_room < TTY_THRESHOLD_THROTTLE)
 		tty_throttle(tty);
-
-	return ret;
 }
 
 int is_ignored(int sig)
@@ -1451,6 +1477,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
 	if (test_bit(TTY_HW_COOK_IN, &tty->flags)) {
 		tty->raw = 1;
 		tty->real_raw = 1;
+		n_tty_set_room(tty);
 		return;
 	}
 	if (I_ISTRIP(tty) || I_IUCLC(tty) || I_IGNCR(tty) ||
@@ -1503,6 +1530,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
 		else
 			tty->real_raw = 0;
 	}
+	n_tty_set_room(tty);
 	/* The termios change make the tty ready for I/O */
 	wake_up_interruptible(&tty->write_wait);
 	wake_up_interruptible(&tty->read_wait);
@@ -1784,6 +1812,8 @@ do_it_again:
 				retval = -ERESTARTSYS;
 				break;
 			}
+			/* FIXME: does n_tty_set_room need locking ? */
+			n_tty_set_room(tty);
 			timeout = schedule_timeout(timeout);
 			continue;
 		}
@@ -1855,8 +1885,10 @@ do_it_again:
 		 * longer than TTY_THRESHOLD_UNTHROTTLE in canonical mode,
 		 * we won't get any more characters.
 		 */
-		if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE)
+		if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE) {
+			n_tty_set_room(tty);
 			check_unthrottle(tty);
+		}
 
 		if (b - buf >= minimum)
 			break;
@@ -1878,6 +1910,7 @@ do_it_again:
 	} else if (test_and_clear_bit(TTY_PUSH, &tty->flags))
 		 goto do_it_again;
 
+	n_tty_set_room(tty);
 	return retval;
 }
 

Some files were not shown because too many files changed in this diff