|
@@ -65,16 +65,18 @@ enum ec_command {
|
|
|
/* EC events */
|
|
|
enum ec_event {
|
|
|
ACPI_EC_EVENT_OBF_1 = 1, /* Output buffer full */
|
|
|
- ACPI_EC_EVENT_IBF_0, /* Input buffer empty */
|
|
|
+ ACPI_EC_EVENT_IBF_0, /* Input buffer empty */
|
|
|
};
|
|
|
|
|
|
#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
|
|
|
#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
|
|
|
|
|
|
-static enum ec_mode {
|
|
|
- EC_INTR = 1, /* Output buffer full */
|
|
|
- EC_POLL, /* Input buffer empty */
|
|
|
-} acpi_ec_mode = EC_INTR;
|
|
|
+enum {
|
|
|
+ EC_FLAGS_WAIT_GPE = 0, /* Don't check status until GPE arrives */
|
|
|
+ EC_FLAGS_QUERY_PENDING, /* Query is pending */
|
|
|
+ EC_FLAGS_GPE_MODE, /* Expect GPE to be sent for status change */
|
|
|
+ EC_FLAGS_ONLY_IBF_GPE, /* Expect GPE only for IBF = 0 event */
|
|
|
+};
|
|
|
|
|
|
static int acpi_ec_remove(struct acpi_device *device, int type);
|
|
|
static int acpi_ec_start(struct acpi_device *device);
|
|
@@ -116,9 +118,8 @@ static struct acpi_ec {
|
|
|
unsigned long command_addr;
|
|
|
unsigned long data_addr;
|
|
|
unsigned long global_lock;
|
|
|
+ unsigned long flags;
|
|
|
struct mutex lock;
|
|
|
- atomic_t query_pending;
|
|
|
- atomic_t event_count;
|
|
|
wait_queue_head_t wait;
|
|
|
struct list_head list;
|
|
|
u8 handlers_installed;
|
|
@@ -148,45 +149,54 @@ static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
|
|
|
outb(data, ec->data_addr);
|
|
|
}
|
|
|
|
|
|
-static inline int acpi_ec_check_status(struct acpi_ec *ec, enum ec_event event,
|
|
|
- unsigned old_count)
|
|
|
+static inline int acpi_ec_check_status(struct acpi_ec *ec, enum ec_event event)
|
|
|
{
|
|
|
- u8 status = acpi_ec_read_status(ec);
|
|
|
- if (old_count == atomic_read(&ec->event_count))
|
|
|
+ if (test_bit(EC_FLAGS_WAIT_GPE, &ec->flags))
|
|
|
return 0;
|
|
|
if (event == ACPI_EC_EVENT_OBF_1) {
|
|
|
- if (status & ACPI_EC_FLAG_OBF)
|
|
|
+ if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_OBF)
|
|
|
return 1;
|
|
|
} else if (event == ACPI_EC_EVENT_IBF_0) {
|
|
|
- if (!(status & ACPI_EC_FLAG_IBF))
|
|
|
+ if (!(acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF))
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event,
|
|
|
- unsigned count, int force_poll)
|
|
|
+static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, int force_poll)
|
|
|
{
|
|
|
- if (unlikely(force_poll) || acpi_ec_mode == EC_POLL) {
|
|
|
+ if (likely(test_bit(EC_FLAGS_GPE_MODE, &ec->flags)) &&
|
|
|
+ likely(!force_poll)) {
|
|
|
+ if (wait_event_timeout(ec->wait, acpi_ec_check_status(ec, event),
|
|
|
+ msecs_to_jiffies(ACPI_EC_DELAY)))
|
|
|
+ return 0;
|
|
|
+ clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
|
|
|
+ if (acpi_ec_check_status(ec, event)) {
|
|
|
+ if (event == ACPI_EC_EVENT_OBF_1) {
|
|
|
+ /* miss OBF = 1 GPE, don't expect it anymore */
|
|
|
+ printk(KERN_INFO PREFIX "missing OBF_1 confirmation,"
|
|
|
+ "switching to degraded mode.\n");
|
|
|
+ set_bit(EC_FLAGS_ONLY_IBF_GPE, &ec->flags);
|
|
|
+ } else {
|
|
|
+ /* missing GPEs, switch back to poll mode */
|
|
|
+ printk(KERN_INFO PREFIX "missing IBF_1 confirmations,"
|
|
|
+ "switch off interrupt mode.\n");
|
|
|
+ clear_bit(EC_FLAGS_GPE_MODE, &ec->flags);
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY);
|
|
|
+ clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
|
|
|
while (time_before(jiffies, delay)) {
|
|
|
- if (acpi_ec_check_status(ec, event, 0))
|
|
|
+ if (acpi_ec_check_status(ec, event))
|
|
|
return 0;
|
|
|
}
|
|
|
- } else {
|
|
|
- if (wait_event_timeout(ec->wait,
|
|
|
- acpi_ec_check_status(ec, event, count),
|
|
|
- msecs_to_jiffies(ACPI_EC_DELAY)) ||
|
|
|
- acpi_ec_check_status(ec, event, 0)) {
|
|
|
- return 0;
|
|
|
- } else {
|
|
|
- printk(KERN_ERR PREFIX "acpi_ec_wait timeout,"
|
|
|
+ }
|
|
|
+ printk(KERN_ERR PREFIX "acpi_ec_wait timeout,"
|
|
|
" status = %d, expect_event = %d\n",
|
|
|
acpi_ec_read_status(ec), event);
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
return -ETIME;
|
|
|
}
|
|
|
|
|
@@ -196,39 +206,42 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command,
|
|
|
int force_poll)
|
|
|
{
|
|
|
int result = 0;
|
|
|
- unsigned count = atomic_read(&ec->event_count);
|
|
|
+ set_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
|
|
|
acpi_ec_write_cmd(ec, command);
|
|
|
|
|
|
for (; wdata_len > 0; --wdata_len) {
|
|
|
- result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, count, force_poll);
|
|
|
+ result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, force_poll);
|
|
|
if (result) {
|
|
|
printk(KERN_ERR PREFIX
|
|
|
"write_cmd timeout, command = %d\n", command);
|
|
|
goto end;
|
|
|
}
|
|
|
- count = atomic_read(&ec->event_count);
|
|
|
+ set_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
|
|
|
acpi_ec_write_data(ec, *(wdata++));
|
|
|
}
|
|
|
|
|
|
if (!rdata_len) {
|
|
|
- result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, count, force_poll);
|
|
|
+ result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, force_poll);
|
|
|
if (result) {
|
|
|
printk(KERN_ERR PREFIX
|
|
|
"finish-write timeout, command = %d\n", command);
|
|
|
goto end;
|
|
|
}
|
|
|
- } else if (command == ACPI_EC_COMMAND_QUERY) {
|
|
|
- atomic_set(&ec->query_pending, 0);
|
|
|
- }
|
|
|
+ } else if (command == ACPI_EC_COMMAND_QUERY)
|
|
|
+ clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
|
|
|
|
|
|
for (; rdata_len > 0; --rdata_len) {
|
|
|
- result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF_1, count, force_poll);
|
|
|
+ if (test_bit(EC_FLAGS_ONLY_IBF_GPE, &ec->flags))
|
|
|
+ force_poll = 1;
|
|
|
+ result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF_1, force_poll);
|
|
|
if (result) {
|
|
|
printk(KERN_ERR PREFIX "read timeout, command = %d\n",
|
|
|
command);
|
|
|
goto end;
|
|
|
}
|
|
|
- count = atomic_read(&ec->event_count);
|
|
|
+ /* Don't expect GPE after last read */
|
|
|
+ if (rdata_len > 1)
|
|
|
+ set_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
|
|
|
*(rdata++) = acpi_ec_read_data(ec);
|
|
|
}
|
|
|
end:
|
|
@@ -258,10 +271,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, u8 command,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- /* Make sure GPE is enabled before doing transaction */
|
|
|
- acpi_enable_gpe(NULL, ec->gpe, ACPI_NOT_ISR);
|
|
|
-
|
|
|
- status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, 0, 0);
|
|
|
+ status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, 0);
|
|
|
if (status) {
|
|
|
printk(KERN_ERR PREFIX
|
|
|
"input buffer is not empty, aborting transaction\n");
|
|
@@ -435,9 +445,9 @@ EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler);
|
|
|
|
|
|
void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
|
|
|
{
|
|
|
- struct acpi_ec_query_handler *handler;
|
|
|
+ struct acpi_ec_query_handler *handler, *tmp;
|
|
|
mutex_lock(&ec->lock);
|
|
|
- list_for_each_entry(handler, &ec->list, node) {
|
|
|
+ list_for_each_entry_safe(handler, tmp, &ec->list, node) {
|
|
|
if (query_bit == handler->query_bit) {
|
|
|
list_del(&handler->node);
|
|
|
kfree(handler);
|
|
@@ -476,23 +486,24 @@ static void acpi_ec_gpe_query(void *ec_cxt)
|
|
|
static u32 acpi_ec_gpe_handler(void *data)
|
|
|
{
|
|
|
acpi_status status = AE_OK;
|
|
|
- u8 value;
|
|
|
struct acpi_ec *ec = data;
|
|
|
|
|
|
- atomic_inc(&ec->event_count);
|
|
|
-
|
|
|
- if (acpi_ec_mode == EC_INTR) {
|
|
|
+ clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
|
|
|
+ if (test_bit(EC_FLAGS_GPE_MODE, &ec->flags))
|
|
|
wake_up(&ec->wait);
|
|
|
- }
|
|
|
|
|
|
- value = acpi_ec_read_status(ec);
|
|
|
- if ((value & ACPI_EC_FLAG_SCI) && !atomic_read(&ec->query_pending)) {
|
|
|
- atomic_set(&ec->query_pending, 1);
|
|
|
- status =
|
|
|
- acpi_os_execute(OSL_EC_BURST_HANDLER, acpi_ec_gpe_query, ec);
|
|
|
+ if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_SCI) {
|
|
|
+ if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
|
|
|
+ status = acpi_os_execute(OSL_EC_BURST_HANDLER,
|
|
|
+ acpi_ec_gpe_query, ec);
|
|
|
+ } else if (unlikely(!test_bit(EC_FLAGS_GPE_MODE, &ec->flags))) {
|
|
|
+ /* this is non-query, must be confirmation */
|
|
|
+ printk(KERN_INFO PREFIX "non-query interrupt received,"
|
|
|
+ " switching to interrupt mode\n");
|
|
|
+ set_bit(EC_FLAGS_GPE_MODE, &ec->flags);
|
|
|
}
|
|
|
|
|
|
- return status == AE_OK ?
|
|
|
+ return ACPI_SUCCESS(status) ?
|
|
|
ACPI_INTERRUPT_HANDLED : ACPI_INTERRUPT_NOT_HANDLED;
|
|
|
}
|
|
|
|
|
@@ -641,13 +652,10 @@ static struct acpi_ec *make_acpi_ec(void)
|
|
|
struct acpi_ec *ec = kzalloc(sizeof(struct acpi_ec), GFP_KERNEL);
|
|
|
if (!ec)
|
|
|
return NULL;
|
|
|
-
|
|
|
- atomic_set(&ec->query_pending, 1);
|
|
|
- atomic_set(&ec->event_count, 1);
|
|
|
+ ec->flags = 1 << EC_FLAGS_QUERY_PENDING;
|
|
|
mutex_init(&ec->lock);
|
|
|
init_waitqueue_head(&ec->wait);
|
|
|
INIT_LIST_HEAD(&ec->list);
|
|
|
-
|
|
|
return ec;
|
|
|
}
|
|
|
|
|
@@ -741,6 +749,8 @@ static int acpi_ec_add(struct acpi_device *device)
|
|
|
acpi_ec_add_fs(device);
|
|
|
printk(KERN_INFO PREFIX "GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n",
|
|
|
ec->gpe, ec->command_addr, ec->data_addr);
|
|
|
+ printk(KERN_INFO PREFIX "driver started in %s mode\n",
|
|
|
+ (test_bit(EC_FLAGS_GPE_MODE, &ec->flags))?"interrupt":"poll");
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -833,7 +843,7 @@ static int acpi_ec_start(struct acpi_device *device)
|
|
|
ret = ec_install_handlers(ec);
|
|
|
|
|
|
/* EC is fully operational, allow queries */
|
|
|
- atomic_set(&ec->query_pending, 0);
|
|
|
+ clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -924,20 +934,4 @@ static void __exit acpi_ec_exit(void)
|
|
|
|
|
|
return;
|
|
|
}
|
|
|
-#endif /* 0 */
|
|
|
-
|
|
|
-static int __init acpi_ec_set_intr_mode(char *str)
|
|
|
-{
|
|
|
- int intr;
|
|
|
-
|
|
|
- if (!get_option(&str, &intr))
|
|
|
- return 0;
|
|
|
-
|
|
|
- acpi_ec_mode = (intr) ? EC_INTR : EC_POLL;
|
|
|
-
|
|
|
- printk(KERN_NOTICE PREFIX "%s mode.\n", intr ? "interrupt" : "polling");
|
|
|
-
|
|
|
- return 1;
|
|
|
-}
|
|
|
-
|
|
|
-__setup("ec_intr=", acpi_ec_set_intr_mode);
|
|
|
+#endif /* 0 */
|