|
@@ -23,14 +23,12 @@
|
|
|
#include <linux/init.h>
|
|
|
#include <linux/errno.h>
|
|
|
#include <linux/interrupt.h>
|
|
|
+#include <linux/irq.h>
|
|
|
#include <linux/device.h>
|
|
|
#include <linux/mutex.h>
|
|
|
#include <linux/mfd/ucb1x00.h>
|
|
|
+#include <linux/pm.h>
|
|
|
#include <linux/gpio.h>
|
|
|
-#include <linux/semaphore.h>
|
|
|
-
|
|
|
-#include <mach/dma.h>
|
|
|
-#include <mach/hardware.h>
|
|
|
|
|
|
static DEFINE_MUTEX(ucb1x00_mutex);
|
|
|
static LIST_HEAD(ucb1x00_drivers);
|
|
@@ -102,7 +100,7 @@ void ucb1x00_io_write(struct ucb1x00 *ucb, unsigned int set, unsigned int clear)
|
|
|
* ucb1x00_enable must have been called to enable the comms
|
|
|
* before using this function.
|
|
|
*
|
|
|
- * This function does not take any semaphores or spinlocks.
|
|
|
+ * This function does not take any mutexes or spinlocks.
|
|
|
*/
|
|
|
unsigned int ucb1x00_io_read(struct ucb1x00 *ucb)
|
|
|
{
|
|
@@ -120,14 +118,22 @@ static void ucb1x00_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
|
|
|
else
|
|
|
ucb->io_out &= ~(1 << offset);
|
|
|
|
|
|
+ ucb1x00_enable(ucb);
|
|
|
ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
|
|
|
+ ucb1x00_disable(ucb);
|
|
|
spin_unlock_irqrestore(&ucb->io_lock, flags);
|
|
|
}
|
|
|
|
|
|
static int ucb1x00_gpio_get(struct gpio_chip *chip, unsigned offset)
|
|
|
{
|
|
|
struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio);
|
|
|
- return ucb1x00_reg_read(ucb, UCB_IO_DATA) & (1 << offset);
|
|
|
+ unsigned val;
|
|
|
+
|
|
|
+ ucb1x00_enable(ucb);
|
|
|
+ val = ucb1x00_reg_read(ucb, UCB_IO_DATA);
|
|
|
+ ucb1x00_disable(ucb);
|
|
|
+
|
|
|
+ return val & (1 << offset);
|
|
|
}
|
|
|
|
|
|
static int ucb1x00_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
|
|
@@ -137,7 +143,9 @@ static int ucb1x00_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
|
|
|
|
|
|
spin_lock_irqsave(&ucb->io_lock, flags);
|
|
|
ucb->io_dir &= ~(1 << offset);
|
|
|
+ ucb1x00_enable(ucb);
|
|
|
ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
|
|
|
+ ucb1x00_disable(ucb);
|
|
|
spin_unlock_irqrestore(&ucb->io_lock, flags);
|
|
|
|
|
|
return 0;
|
|
@@ -157,6 +165,7 @@ static int ucb1x00_gpio_direction_output(struct gpio_chip *chip, unsigned offset
|
|
|
else
|
|
|
ucb->io_out &= ~mask;
|
|
|
|
|
|
+ ucb1x00_enable(ucb);
|
|
|
if (old != ucb->io_out)
|
|
|
ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
|
|
|
|
|
@@ -164,11 +173,19 @@ static int ucb1x00_gpio_direction_output(struct gpio_chip *chip, unsigned offset
|
|
|
ucb->io_dir |= mask;
|
|
|
ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
|
|
|
}
|
|
|
+ ucb1x00_disable(ucb);
|
|
|
spin_unlock_irqrestore(&ucb->io_lock, flags);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static int ucb1x00_to_irq(struct gpio_chip *chip, unsigned offset)
|
|
|
+{
|
|
|
+ struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio);
|
|
|
+
|
|
|
+ return ucb->irq_base > 0 ? ucb->irq_base + offset : -ENXIO;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* UCB1300 data sheet says we must:
|
|
|
* 1. enable ADC => 5us (including reference startup time)
|
|
@@ -186,7 +203,7 @@ static int ucb1x00_gpio_direction_output(struct gpio_chip *chip, unsigned offset
|
|
|
* Any code wishing to use the ADC converter must call this
|
|
|
* function prior to using it.
|
|
|
*
|
|
|
- * This function takes the ADC semaphore to prevent two or more
|
|
|
+ * This function takes the ADC mutex to prevent two or more
|
|
|
* concurrent uses, and therefore may sleep. As a result, it
|
|
|
* can only be called from process context, not interrupt
|
|
|
* context.
|
|
@@ -196,7 +213,7 @@ static int ucb1x00_gpio_direction_output(struct gpio_chip *chip, unsigned offset
|
|
|
*/
|
|
|
void ucb1x00_adc_enable(struct ucb1x00 *ucb)
|
|
|
{
|
|
|
- down(&ucb->adc_sem);
|
|
|
+ mutex_lock(&ucb->adc_mutex);
|
|
|
|
|
|
ucb->adc_cr |= UCB_ADC_ENA;
|
|
|
|
|
@@ -218,7 +235,7 @@ void ucb1x00_adc_enable(struct ucb1x00 *ucb)
|
|
|
* complete (2 frames max without sync).
|
|
|
*
|
|
|
* If called for a synchronised ADC conversion, it may sleep
|
|
|
- * with the ADC semaphore held.
|
|
|
+ * with the ADC mutex held.
|
|
|
*/
|
|
|
unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync)
|
|
|
{
|
|
@@ -246,7 +263,7 @@ unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync)
|
|
|
* ucb1x00_adc_disable - disable the ADC converter
|
|
|
* @ucb: UCB1x00 structure describing chip
|
|
|
*
|
|
|
- * Disable the ADC converter and release the ADC semaphore.
|
|
|
+ * Disable the ADC converter and release the ADC mutex.
|
|
|
*/
|
|
|
void ucb1x00_adc_disable(struct ucb1x00 *ucb)
|
|
|
{
|
|
@@ -254,7 +271,7 @@ void ucb1x00_adc_disable(struct ucb1x00 *ucb)
|
|
|
ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr);
|
|
|
ucb1x00_disable(ucb);
|
|
|
|
|
|
- up(&ucb->adc_sem);
|
|
|
+ mutex_unlock(&ucb->adc_mutex);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -265,10 +282,9 @@ void ucb1x00_adc_disable(struct ucb1x00 *ucb)
|
|
|
* SIBCLK to talk to the chip. We leave the clock running until
|
|
|
* we have finished processing all interrupts from the chip.
|
|
|
*/
|
|
|
-static irqreturn_t ucb1x00_irq(int irqnr, void *devid)
|
|
|
+static void ucb1x00_irq(unsigned int irq, struct irq_desc *desc)
|
|
|
{
|
|
|
- struct ucb1x00 *ucb = devid;
|
|
|
- struct ucb1x00_irq *irq;
|
|
|
+ struct ucb1x00 *ucb = irq_desc_get_handler_data(desc);
|
|
|
unsigned int isr, i;
|
|
|
|
|
|
ucb1x00_enable(ucb);
|
|
@@ -276,157 +292,104 @@ static irqreturn_t ucb1x00_irq(int irqnr, void *devid)
|
|
|
ucb1x00_reg_write(ucb, UCB_IE_CLEAR, isr);
|
|
|
ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0);
|
|
|
|
|
|
- for (i = 0, irq = ucb->irq_handler; i < 16 && isr; i++, isr >>= 1, irq++)
|
|
|
- if (isr & 1 && irq->fn)
|
|
|
- irq->fn(i, irq->devid);
|
|
|
+ for (i = 0; i < 16 && isr; i++, isr >>= 1, irq++)
|
|
|
+ if (isr & 1)
|
|
|
+ generic_handle_irq(ucb->irq_base + i);
|
|
|
ucb1x00_disable(ucb);
|
|
|
-
|
|
|
- return IRQ_HANDLED;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * ucb1x00_hook_irq - hook a UCB1x00 interrupt
|
|
|
- * @ucb: UCB1x00 structure describing chip
|
|
|
- * @idx: interrupt index
|
|
|
- * @fn: function to call when interrupt is triggered
|
|
|
- * @devid: device id to pass to interrupt handler
|
|
|
- *
|
|
|
- * Hook the specified interrupt. You can only register one handler
|
|
|
- * for each interrupt source. The interrupt source is not enabled
|
|
|
- * by this function; use ucb1x00_enable_irq instead.
|
|
|
- *
|
|
|
- * Interrupt handlers will be called with other interrupts enabled.
|
|
|
- *
|
|
|
- * Returns zero on success, or one of the following errors:
|
|
|
- * -EINVAL if the interrupt index is invalid
|
|
|
- * -EBUSY if the interrupt has already been hooked
|
|
|
- */
|
|
|
-int ucb1x00_hook_irq(struct ucb1x00 *ucb, unsigned int idx, void (*fn)(int, void *), void *devid)
|
|
|
+static void ucb1x00_irq_update(struct ucb1x00 *ucb, unsigned mask)
|
|
|
{
|
|
|
- struct ucb1x00_irq *irq;
|
|
|
- int ret = -EINVAL;
|
|
|
-
|
|
|
- if (idx < 16) {
|
|
|
- irq = ucb->irq_handler + idx;
|
|
|
- ret = -EBUSY;
|
|
|
-
|
|
|
- spin_lock_irq(&ucb->lock);
|
|
|
- if (irq->fn == NULL) {
|
|
|
- irq->devid = devid;
|
|
|
- irq->fn = fn;
|
|
|
- ret = 0;
|
|
|
- }
|
|
|
- spin_unlock_irq(&ucb->lock);
|
|
|
- }
|
|
|
- return ret;
|
|
|
+ ucb1x00_enable(ucb);
|
|
|
+ if (ucb->irq_ris_enbl & mask)
|
|
|
+ ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
|
|
|
+ ucb->irq_mask);
|
|
|
+ if (ucb->irq_fal_enbl & mask)
|
|
|
+ ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
|
|
|
+ ucb->irq_mask);
|
|
|
+ ucb1x00_disable(ucb);
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * ucb1x00_enable_irq - enable an UCB1x00 interrupt source
|
|
|
- * @ucb: UCB1x00 structure describing chip
|
|
|
- * @idx: interrupt index
|
|
|
- * @edges: interrupt edges to enable
|
|
|
- *
|
|
|
- * Enable the specified interrupt to trigger on %UCB_RISING,
|
|
|
- * %UCB_FALLING or both edges. The interrupt should have been
|
|
|
- * hooked by ucb1x00_hook_irq.
|
|
|
- */
|
|
|
-void ucb1x00_enable_irq(struct ucb1x00 *ucb, unsigned int idx, int edges)
|
|
|
+static void ucb1x00_irq_noop(struct irq_data *data)
|
|
|
{
|
|
|
- unsigned long flags;
|
|
|
+}
|
|
|
|
|
|
- if (idx < 16) {
|
|
|
- spin_lock_irqsave(&ucb->lock, flags);
|
|
|
+static void ucb1x00_irq_mask(struct irq_data *data)
|
|
|
+{
|
|
|
+ struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
|
|
|
+ unsigned mask = 1 << (data->irq - ucb->irq_base);
|
|
|
|
|
|
- ucb1x00_enable(ucb);
|
|
|
- if (edges & UCB_RISING) {
|
|
|
- ucb->irq_ris_enbl |= 1 << idx;
|
|
|
- ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl);
|
|
|
- }
|
|
|
- if (edges & UCB_FALLING) {
|
|
|
- ucb->irq_fal_enbl |= 1 << idx;
|
|
|
- ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl);
|
|
|
- }
|
|
|
- ucb1x00_disable(ucb);
|
|
|
- spin_unlock_irqrestore(&ucb->lock, flags);
|
|
|
- }
|
|
|
+ raw_spin_lock(&ucb->irq_lock);
|
|
|
+ ucb->irq_mask &= ~mask;
|
|
|
+ ucb1x00_irq_update(ucb, mask);
|
|
|
+ raw_spin_unlock(&ucb->irq_lock);
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * ucb1x00_disable_irq - disable an UCB1x00 interrupt source
|
|
|
- * @ucb: UCB1x00 structure describing chip
|
|
|
- * @edges: interrupt edges to disable
|
|
|
- *
|
|
|
- * Disable the specified interrupt triggering on the specified
|
|
|
- * (%UCB_RISING, %UCB_FALLING or both) edges.
|
|
|
- */
|
|
|
-void ucb1x00_disable_irq(struct ucb1x00 *ucb, unsigned int idx, int edges)
|
|
|
+static void ucb1x00_irq_unmask(struct irq_data *data)
|
|
|
{
|
|
|
- unsigned long flags;
|
|
|
+ struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
|
|
|
+ unsigned mask = 1 << (data->irq - ucb->irq_base);
|
|
|
|
|
|
- if (idx < 16) {
|
|
|
- spin_lock_irqsave(&ucb->lock, flags);
|
|
|
-
|
|
|
- ucb1x00_enable(ucb);
|
|
|
- if (edges & UCB_RISING) {
|
|
|
- ucb->irq_ris_enbl &= ~(1 << idx);
|
|
|
- ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl);
|
|
|
- }
|
|
|
- if (edges & UCB_FALLING) {
|
|
|
- ucb->irq_fal_enbl &= ~(1 << idx);
|
|
|
- ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl);
|
|
|
- }
|
|
|
- ucb1x00_disable(ucb);
|
|
|
- spin_unlock_irqrestore(&ucb->lock, flags);
|
|
|
- }
|
|
|
+ raw_spin_lock(&ucb->irq_lock);
|
|
|
+ ucb->irq_mask |= mask;
|
|
|
+ ucb1x00_irq_update(ucb, mask);
|
|
|
+ raw_spin_unlock(&ucb->irq_lock);
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * ucb1x00_free_irq - disable and free the specified UCB1x00 interrupt
|
|
|
- * @ucb: UCB1x00 structure describing chip
|
|
|
- * @idx: interrupt index
|
|
|
- * @devid: device id.
|
|
|
- *
|
|
|
- * Disable the interrupt source and remove the handler. devid must
|
|
|
- * match the devid passed when hooking the interrupt.
|
|
|
- *
|
|
|
- * Returns zero on success, or one of the following errors:
|
|
|
- * -EINVAL if the interrupt index is invalid
|
|
|
- * -ENOENT if devid does not match
|
|
|
- */
|
|
|
-int ucb1x00_free_irq(struct ucb1x00 *ucb, unsigned int idx, void *devid)
|
|
|
+static int ucb1x00_irq_set_type(struct irq_data *data, unsigned int type)
|
|
|
{
|
|
|
- struct ucb1x00_irq *irq;
|
|
|
- int ret;
|
|
|
+ struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
|
|
|
+ unsigned mask = 1 << (data->irq - ucb->irq_base);
|
|
|
|
|
|
- if (idx >= 16)
|
|
|
- goto bad;
|
|
|
+ raw_spin_lock(&ucb->irq_lock);
|
|
|
+ if (type & IRQ_TYPE_EDGE_RISING)
|
|
|
+ ucb->irq_ris_enbl |= mask;
|
|
|
+ else
|
|
|
+ ucb->irq_ris_enbl &= ~mask;
|
|
|
|
|
|
- irq = ucb->irq_handler + idx;
|
|
|
- ret = -ENOENT;
|
|
|
+ if (type & IRQ_TYPE_EDGE_FALLING)
|
|
|
+ ucb->irq_fal_enbl |= mask;
|
|
|
+ else
|
|
|
+ ucb->irq_fal_enbl &= ~mask;
|
|
|
+ if (ucb->irq_mask & mask) {
|
|
|
+ ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
|
|
|
+ ucb->irq_mask);
|
|
|
+ ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
|
|
|
+ ucb->irq_mask);
|
|
|
+ }
|
|
|
+ raw_spin_unlock(&ucb->irq_lock);
|
|
|
|
|
|
- spin_lock_irq(&ucb->lock);
|
|
|
- if (irq->devid == devid) {
|
|
|
- ucb->irq_ris_enbl &= ~(1 << idx);
|
|
|
- ucb->irq_fal_enbl &= ~(1 << idx);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
|
|
|
- ucb1x00_enable(ucb);
|
|
|
- ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl);
|
|
|
- ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl);
|
|
|
- ucb1x00_disable(ucb);
|
|
|
+static int ucb1x00_irq_set_wake(struct irq_data *data, unsigned int on)
|
|
|
+{
|
|
|
+ struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
|
|
|
+ struct ucb1x00_plat_data *pdata = ucb->mcp->attached_device.platform_data;
|
|
|
+ unsigned mask = 1 << (data->irq - ucb->irq_base);
|
|
|
|
|
|
- irq->fn = NULL;
|
|
|
- irq->devid = NULL;
|
|
|
- ret = 0;
|
|
|
- }
|
|
|
- spin_unlock_irq(&ucb->lock);
|
|
|
- return ret;
|
|
|
+ if (!pdata || !pdata->can_wakeup)
|
|
|
+ return -EINVAL;
|
|
|
|
|
|
-bad:
|
|
|
- printk(KERN_ERR "Freeing bad UCB1x00 irq %d\n", idx);
|
|
|
- return -EINVAL;
|
|
|
+ raw_spin_lock(&ucb->irq_lock);
|
|
|
+ if (on)
|
|
|
+ ucb->irq_wake |= mask;
|
|
|
+ else
|
|
|
+ ucb->irq_wake &= ~mask;
|
|
|
+ raw_spin_unlock(&ucb->irq_lock);
|
|
|
+
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
+static struct irq_chip ucb1x00_irqchip = {
|
|
|
+ .name = "ucb1x00",
|
|
|
+ .irq_ack = ucb1x00_irq_noop,
|
|
|
+ .irq_mask = ucb1x00_irq_mask,
|
|
|
+ .irq_unmask = ucb1x00_irq_unmask,
|
|
|
+ .irq_set_type = ucb1x00_irq_set_type,
|
|
|
+ .irq_set_wake = ucb1x00_irq_set_wake,
|
|
|
+};
|
|
|
+
|
|
|
static int ucb1x00_add_dev(struct ucb1x00 *ucb, struct ucb1x00_driver *drv)
|
|
|
{
|
|
|
struct ucb1x00_dev *dev;
|
|
@@ -440,8 +403,8 @@ static int ucb1x00_add_dev(struct ucb1x00 *ucb, struct ucb1x00_driver *drv)
|
|
|
ret = drv->add(dev);
|
|
|
|
|
|
if (ret == 0) {
|
|
|
- list_add(&dev->dev_node, &ucb->devs);
|
|
|
- list_add(&dev->drv_node, &drv->devs);
|
|
|
+ list_add_tail(&dev->dev_node, &ucb->devs);
|
|
|
+ list_add_tail(&dev->drv_node, &drv->devs);
|
|
|
} else {
|
|
|
kfree(dev);
|
|
|
}
|
|
@@ -533,98 +496,126 @@ static struct class ucb1x00_class = {
|
|
|
|
|
|
static int ucb1x00_probe(struct mcp *mcp)
|
|
|
{
|
|
|
- struct ucb1x00 *ucb;
|
|
|
+ struct ucb1x00_plat_data *pdata = mcp->attached_device.platform_data;
|
|
|
struct ucb1x00_driver *drv;
|
|
|
- unsigned int id;
|
|
|
+ struct ucb1x00 *ucb;
|
|
|
+ unsigned id, i, irq_base;
|
|
|
int ret = -ENODEV;
|
|
|
- int temp;
|
|
|
+
|
|
|
+ /* Tell the platform to deassert the UCB1x00 reset */
|
|
|
+ if (pdata && pdata->reset)
|
|
|
+ pdata->reset(UCB_RST_PROBE);
|
|
|
|
|
|
mcp_enable(mcp);
|
|
|
id = mcp_reg_read(mcp, UCB_ID);
|
|
|
+ mcp_disable(mcp);
|
|
|
|
|
|
if (id != UCB_ID_1200 && id != UCB_ID_1300 && id != UCB_ID_TC35143) {
|
|
|
printk(KERN_WARNING "UCB1x00 ID not found: %04x\n", id);
|
|
|
- goto err_disable;
|
|
|
+ goto out;
|
|
|
}
|
|
|
|
|
|
ucb = kzalloc(sizeof(struct ucb1x00), GFP_KERNEL);
|
|
|
ret = -ENOMEM;
|
|
|
if (!ucb)
|
|
|
- goto err_disable;
|
|
|
-
|
|
|
+ goto out;
|
|
|
|
|
|
+ device_initialize(&ucb->dev);
|
|
|
ucb->dev.class = &ucb1x00_class;
|
|
|
ucb->dev.parent = &mcp->attached_device;
|
|
|
dev_set_name(&ucb->dev, "ucb1x00");
|
|
|
|
|
|
- spin_lock_init(&ucb->lock);
|
|
|
+ raw_spin_lock_init(&ucb->irq_lock);
|
|
|
spin_lock_init(&ucb->io_lock);
|
|
|
- sema_init(&ucb->adc_sem, 1);
|
|
|
+ mutex_init(&ucb->adc_mutex);
|
|
|
|
|
|
ucb->id = id;
|
|
|
ucb->mcp = mcp;
|
|
|
+
|
|
|
+ ret = device_add(&ucb->dev);
|
|
|
+ if (ret)
|
|
|
+ goto err_dev_add;
|
|
|
+
|
|
|
+ ucb1x00_enable(ucb);
|
|
|
ucb->irq = ucb1x00_detect_irq(ucb);
|
|
|
+ ucb1x00_disable(ucb);
|
|
|
if (ucb->irq == NO_IRQ) {
|
|
|
- printk(KERN_ERR "UCB1x00: IRQ probe failed\n");
|
|
|
+ dev_err(&ucb->dev, "IRQ probe failed\n");
|
|
|
ret = -ENODEV;
|
|
|
- goto err_free;
|
|
|
+ goto err_no_irq;
|
|
|
}
|
|
|
|
|
|
ucb->gpio.base = -1;
|
|
|
- if (mcp->gpio_base != 0) {
|
|
|
+ irq_base = pdata ? pdata->irq_base : 0;
|
|
|
+ ucb->irq_base = irq_alloc_descs(-1, irq_base, 16, -1);
|
|
|
+ if (ucb->irq_base < 0) {
|
|
|
+ dev_err(&ucb->dev, "unable to allocate 16 irqs: %d\n",
|
|
|
+ ucb->irq_base);
|
|
|
+ goto err_irq_alloc;
|
|
|
+ }
|
|
|
+
|
|
|
+ for (i = 0; i < 16; i++) {
|
|
|
+ unsigned irq = ucb->irq_base + i;
|
|
|
+
|
|
|
+ irq_set_chip_and_handler(irq, &ucb1x00_irqchip, handle_edge_irq);
|
|
|
+ irq_set_chip_data(irq, ucb);
|
|
|
+ set_irq_flags(irq, IRQF_VALID | IRQ_NOREQUEST);
|
|
|
+ }
|
|
|
+
|
|
|
+ irq_set_irq_type(ucb->irq, IRQ_TYPE_EDGE_RISING);
|
|
|
+ irq_set_handler_data(ucb->irq, ucb);
|
|
|
+ irq_set_chained_handler(ucb->irq, ucb1x00_irq);
|
|
|
+
|
|
|
+ if (pdata && pdata->gpio_base) {
|
|
|
ucb->gpio.label = dev_name(&ucb->dev);
|
|
|
- ucb->gpio.base = mcp->gpio_base;
|
|
|
+ ucb->gpio.dev = &ucb->dev;
|
|
|
+ ucb->gpio.owner = THIS_MODULE;
|
|
|
+ ucb->gpio.base = pdata->gpio_base;
|
|
|
ucb->gpio.ngpio = 10;
|
|
|
ucb->gpio.set = ucb1x00_gpio_set;
|
|
|
ucb->gpio.get = ucb1x00_gpio_get;
|
|
|
ucb->gpio.direction_input = ucb1x00_gpio_direction_input;
|
|
|
ucb->gpio.direction_output = ucb1x00_gpio_direction_output;
|
|
|
+ ucb->gpio.to_irq = ucb1x00_to_irq;
|
|
|
ret = gpiochip_add(&ucb->gpio);
|
|
|
if (ret)
|
|
|
- goto err_free;
|
|
|
+ goto err_gpio_add;
|
|
|
} else
|
|
|
dev_info(&ucb->dev, "gpio_base not set so no gpiolib support");
|
|
|
|
|
|
- ret = request_irq(ucb->irq, ucb1x00_irq, IRQF_TRIGGER_RISING,
|
|
|
- "UCB1x00", ucb);
|
|
|
- if (ret) {
|
|
|
- printk(KERN_ERR "ucb1x00: unable to grab irq%d: %d\n",
|
|
|
- ucb->irq, ret);
|
|
|
- goto err_gpio;
|
|
|
- }
|
|
|
-
|
|
|
mcp_set_drvdata(mcp, ucb);
|
|
|
|
|
|
- ret = device_register(&ucb->dev);
|
|
|
- if (ret)
|
|
|
- goto err_irq;
|
|
|
-
|
|
|
+ if (pdata)
|
|
|
+ device_set_wakeup_capable(&ucb->dev, pdata->can_wakeup);
|
|
|
|
|
|
INIT_LIST_HEAD(&ucb->devs);
|
|
|
mutex_lock(&ucb1x00_mutex);
|
|
|
- list_add(&ucb->node, &ucb1x00_devices);
|
|
|
+ list_add_tail(&ucb->node, &ucb1x00_devices);
|
|
|
list_for_each_entry(drv, &ucb1x00_drivers, node) {
|
|
|
ucb1x00_add_dev(ucb, drv);
|
|
|
}
|
|
|
mutex_unlock(&ucb1x00_mutex);
|
|
|
|
|
|
- goto out;
|
|
|
+ return ret;
|
|
|
|
|
|
- err_irq:
|
|
|
- free_irq(ucb->irq, ucb);
|
|
|
- err_gpio:
|
|
|
- if (ucb->gpio.base != -1)
|
|
|
- temp = gpiochip_remove(&ucb->gpio);
|
|
|
- err_free:
|
|
|
- kfree(ucb);
|
|
|
- err_disable:
|
|
|
- mcp_disable(mcp);
|
|
|
+ err_gpio_add:
|
|
|
+ irq_set_chained_handler(ucb->irq, NULL);
|
|
|
+ err_irq_alloc:
|
|
|
+ if (ucb->irq_base > 0)
|
|
|
+ irq_free_descs(ucb->irq_base, 16);
|
|
|
+ err_no_irq:
|
|
|
+ device_del(&ucb->dev);
|
|
|
+ err_dev_add:
|
|
|
+ put_device(&ucb->dev);
|
|
|
out:
|
|
|
+ if (pdata && pdata->reset)
|
|
|
+ pdata->reset(UCB_RST_PROBE_FAIL);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
static void ucb1x00_remove(struct mcp *mcp)
|
|
|
{
|
|
|
+ struct ucb1x00_plat_data *pdata = mcp->attached_device.platform_data;
|
|
|
struct ucb1x00 *ucb = mcp_get_drvdata(mcp);
|
|
|
struct list_head *l, *n;
|
|
|
int ret;
|
|
@@ -643,8 +634,12 @@ static void ucb1x00_remove(struct mcp *mcp)
|
|
|
dev_err(&ucb->dev, "Can't remove gpio chip: %d\n", ret);
|
|
|
}
|
|
|
|
|
|
- free_irq(ucb->irq, ucb);
|
|
|
+ irq_set_chained_handler(ucb->irq, NULL);
|
|
|
+ irq_free_descs(ucb->irq_base, 16);
|
|
|
device_unregister(&ucb->dev);
|
|
|
+
|
|
|
+ if (pdata && pdata->reset)
|
|
|
+ pdata->reset(UCB_RST_REMOVE);
|
|
|
}
|
|
|
|
|
|
int ucb1x00_register_driver(struct ucb1x00_driver *drv)
|
|
@@ -653,7 +648,7 @@ int ucb1x00_register_driver(struct ucb1x00_driver *drv)
|
|
|
|
|
|
INIT_LIST_HEAD(&drv->devs);
|
|
|
mutex_lock(&ucb1x00_mutex);
|
|
|
- list_add(&drv->node, &ucb1x00_drivers);
|
|
|
+ list_add_tail(&drv->node, &ucb1x00_drivers);
|
|
|
list_for_each_entry(ucb, &ucb1x00_devices, node) {
|
|
|
ucb1x00_add_dev(ucb, drv);
|
|
|
}
|
|
@@ -674,44 +669,86 @@ void ucb1x00_unregister_driver(struct ucb1x00_driver *drv)
|
|
|
mutex_unlock(&ucb1x00_mutex);
|
|
|
}
|
|
|
|
|
|
-static int ucb1x00_suspend(struct mcp *mcp, pm_message_t state)
|
|
|
+static int ucb1x00_suspend(struct device *dev)
|
|
|
{
|
|
|
- struct ucb1x00 *ucb = mcp_get_drvdata(mcp);
|
|
|
- struct ucb1x00_dev *dev;
|
|
|
+ struct ucb1x00_plat_data *pdata = dev->platform_data;
|
|
|
+ struct ucb1x00 *ucb = dev_get_drvdata(dev);
|
|
|
+ struct ucb1x00_dev *udev;
|
|
|
|
|
|
mutex_lock(&ucb1x00_mutex);
|
|
|
- list_for_each_entry(dev, &ucb->devs, dev_node) {
|
|
|
- if (dev->drv->suspend)
|
|
|
- dev->drv->suspend(dev, state);
|
|
|
+ list_for_each_entry(udev, &ucb->devs, dev_node) {
|
|
|
+ if (udev->drv->suspend)
|
|
|
+ udev->drv->suspend(udev);
|
|
|
}
|
|
|
mutex_unlock(&ucb1x00_mutex);
|
|
|
+
|
|
|
+ if (ucb->irq_wake) {
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ raw_spin_lock_irqsave(&ucb->irq_lock, flags);
|
|
|
+ ucb1x00_enable(ucb);
|
|
|
+ ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
|
|
|
+ ucb->irq_wake);
|
|
|
+ ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
|
|
|
+ ucb->irq_wake);
|
|
|
+ ucb1x00_disable(ucb);
|
|
|
+ raw_spin_unlock_irqrestore(&ucb->irq_lock, flags);
|
|
|
+
|
|
|
+ enable_irq_wake(ucb->irq);
|
|
|
+ } else if (pdata && pdata->reset)
|
|
|
+ pdata->reset(UCB_RST_SUSPEND);
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int ucb1x00_resume(struct mcp *mcp)
|
|
|
+static int ucb1x00_resume(struct device *dev)
|
|
|
{
|
|
|
- struct ucb1x00 *ucb = mcp_get_drvdata(mcp);
|
|
|
- struct ucb1x00_dev *dev;
|
|
|
+ struct ucb1x00_plat_data *pdata = dev->platform_data;
|
|
|
+ struct ucb1x00 *ucb = dev_get_drvdata(dev);
|
|
|
+ struct ucb1x00_dev *udev;
|
|
|
+
|
|
|
+ if (!ucb->irq_wake && pdata && pdata->reset)
|
|
|
+ pdata->reset(UCB_RST_RESUME);
|
|
|
|
|
|
+ ucb1x00_enable(ucb);
|
|
|
ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
|
|
|
ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
|
|
|
+
|
|
|
+ if (ucb->irq_wake) {
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ raw_spin_lock_irqsave(&ucb->irq_lock, flags);
|
|
|
+ ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
|
|
|
+ ucb->irq_mask);
|
|
|
+ ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
|
|
|
+ ucb->irq_mask);
|
|
|
+ raw_spin_unlock_irqrestore(&ucb->irq_lock, flags);
|
|
|
+
|
|
|
+ disable_irq_wake(ucb->irq);
|
|
|
+ }
|
|
|
+ ucb1x00_disable(ucb);
|
|
|
+
|
|
|
mutex_lock(&ucb1x00_mutex);
|
|
|
- list_for_each_entry(dev, &ucb->devs, dev_node) {
|
|
|
- if (dev->drv->resume)
|
|
|
- dev->drv->resume(dev);
|
|
|
+ list_for_each_entry(udev, &ucb->devs, dev_node) {
|
|
|
+ if (udev->drv->resume)
|
|
|
+ udev->drv->resume(udev);
|
|
|
}
|
|
|
mutex_unlock(&ucb1x00_mutex);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static const struct dev_pm_ops ucb1x00_pm_ops = {
|
|
|
+ SET_SYSTEM_SLEEP_PM_OPS(ucb1x00_suspend, ucb1x00_resume)
|
|
|
+};
|
|
|
+
|
|
|
static struct mcp_driver ucb1x00_driver = {
|
|
|
.drv = {
|
|
|
.name = "ucb1x00",
|
|
|
+ .owner = THIS_MODULE,
|
|
|
+ .pm = &ucb1x00_pm_ops,
|
|
|
},
|
|
|
.probe = ucb1x00_probe,
|
|
|
.remove = ucb1x00_remove,
|
|
|
- .suspend = ucb1x00_suspend,
|
|
|
- .resume = ucb1x00_resume,
|
|
|
};
|
|
|
|
|
|
static int __init ucb1x00_init(void)
|
|
@@ -742,14 +779,10 @@ EXPORT_SYMBOL(ucb1x00_adc_enable);
|
|
|
EXPORT_SYMBOL(ucb1x00_adc_read);
|
|
|
EXPORT_SYMBOL(ucb1x00_adc_disable);
|
|
|
|
|
|
-EXPORT_SYMBOL(ucb1x00_hook_irq);
|
|
|
-EXPORT_SYMBOL(ucb1x00_free_irq);
|
|
|
-EXPORT_SYMBOL(ucb1x00_enable_irq);
|
|
|
-EXPORT_SYMBOL(ucb1x00_disable_irq);
|
|
|
-
|
|
|
EXPORT_SYMBOL(ucb1x00_register_driver);
|
|
|
EXPORT_SYMBOL(ucb1x00_unregister_driver);
|
|
|
|
|
|
+MODULE_ALIAS("mcp:ucb1x00");
|
|
|
MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
|
|
|
MODULE_DESCRIPTION("UCB1x00 core driver");
|
|
|
MODULE_LICENSE("GPL");
|