|
@@ -167,7 +167,13 @@ static int acpi_system_sysfs_init(void)
|
|
#define COUNT_ERROR 2 /* other */
|
|
#define COUNT_ERROR 2 /* other */
|
|
#define NUM_COUNTERS_EXTRA 3
|
|
#define NUM_COUNTERS_EXTRA 3
|
|
|
|
|
|
-static u32 *all_counters;
|
|
|
|
|
|
+#define ACPI_EVENT_VALID 0x01
|
|
|
|
+struct event_counter {
|
|
|
|
+ u32 count;
|
|
|
|
+ u32 flags;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+static struct event_counter *all_counters;
|
|
static u32 num_gpes;
|
|
static u32 num_gpes;
|
|
static u32 num_counters;
|
|
static u32 num_counters;
|
|
static struct attribute **all_attrs;
|
|
static struct attribute **all_attrs;
|
|
@@ -202,9 +208,44 @@ static int count_num_gpes(void)
|
|
return count;
|
|
return count;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int get_gpe_device(int index, acpi_handle *handle)
|
|
|
|
+{
|
|
|
|
+ struct acpi_gpe_xrupt_info *gpe_xrupt_info;
|
|
|
|
+ struct acpi_gpe_block_info *gpe_block;
|
|
|
|
+ acpi_cpu_flags flags;
|
|
|
|
+ struct acpi_namespace_node *node;
|
|
|
|
+
|
|
|
|
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
|
|
|
|
+
|
|
|
|
+ gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
|
|
|
|
+ while (gpe_xrupt_info) {
|
|
|
|
+ gpe_block = gpe_xrupt_info->gpe_block_list_head;
|
|
|
|
+ node = gpe_block->node;
|
|
|
|
+ while (gpe_block) {
|
|
|
|
+ index -= gpe_block->register_count *
|
|
|
|
+ ACPI_GPE_REGISTER_WIDTH;
|
|
|
|
+ if (index < 0) {
|
|
|
|
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
|
|
|
|
+ /* return NULL if it's FADT GPE */
|
|
|
|
+ if (node->type != ACPI_TYPE_DEVICE)
|
|
|
|
+ *handle = NULL;
|
|
|
|
+ else
|
|
|
|
+ *handle = node;
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+ node = gpe_block->node;
|
|
|
|
+ gpe_block = gpe_block->next;
|
|
|
|
+ }
|
|
|
|
+ gpe_xrupt_info = gpe_xrupt_info->next;
|
|
|
|
+ }
|
|
|
|
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
|
|
|
|
+
|
|
|
|
+ return -ENODEV;
|
|
|
|
+}
|
|
|
|
+
|
|
static void delete_gpe_attr_array(void)
|
|
static void delete_gpe_attr_array(void)
|
|
{
|
|
{
|
|
- u32 *tmp = all_counters;
|
|
|
|
|
|
+ struct event_counter *tmp = all_counters;
|
|
|
|
|
|
all_counters = NULL;
|
|
all_counters = NULL;
|
|
kfree(tmp);
|
|
kfree(tmp);
|
|
@@ -230,9 +271,10 @@ void acpi_os_gpe_count(u32 gpe_number)
|
|
return;
|
|
return;
|
|
|
|
|
|
if (gpe_number < num_gpes)
|
|
if (gpe_number < num_gpes)
|
|
- all_counters[gpe_number]++;
|
|
|
|
|
|
+ all_counters[gpe_number].count++;
|
|
else
|
|
else
|
|
- all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR]++;
|
|
|
|
|
|
+ all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR].
|
|
|
|
+ count++;
|
|
|
|
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
@@ -243,44 +285,144 @@ void acpi_os_fixed_event_count(u32 event_number)
|
|
return;
|
|
return;
|
|
|
|
|
|
if (event_number < ACPI_NUM_FIXED_EVENTS)
|
|
if (event_number < ACPI_NUM_FIXED_EVENTS)
|
|
- all_counters[num_gpes + event_number]++;
|
|
|
|
|
|
+ all_counters[num_gpes + event_number].count++;
|
|
else
|
|
else
|
|
- all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR]++;
|
|
|
|
|
|
+ all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR].
|
|
|
|
+ count++;
|
|
|
|
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int get_status(u32 index, acpi_event_status *status, acpi_handle *handle)
|
|
|
|
+{
|
|
|
|
+ int result = 0;
|
|
|
|
+
|
|
|
|
+ if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
|
|
|
|
+ goto end;
|
|
|
|
+
|
|
|
|
+ if (index < num_gpes) {
|
|
|
|
+ result = get_gpe_device(index, handle);
|
|
|
|
+ if (result) {
|
|
|
|
+ ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND,
|
|
|
|
+ "Invalid GPE 0x%x\n", index));
|
|
|
|
+ goto end;
|
|
|
|
+ }
|
|
|
|
+ result = acpi_get_gpe_status(*handle, index,
|
|
|
|
+ ACPI_NOT_ISR, status);
|
|
|
|
+ } else if (index < (num_gpes + ACPI_NUM_FIXED_EVENTS))
|
|
|
|
+ result = acpi_get_event_status(index - num_gpes, status);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * sleep/power button GPE/Fixed Event is enabled after acpi_system_init,
|
|
|
|
+ * check the status at runtime and mark it as valid once it's enabled
|
|
|
|
+ */
|
|
|
|
+ if (!result && (*status & ACPI_EVENT_FLAG_ENABLED))
|
|
|
|
+ all_counters[index].flags |= ACPI_EVENT_VALID;
|
|
|
|
+end:
|
|
|
|
+ return result;
|
|
|
|
+}
|
|
|
|
+
|
|
static ssize_t counter_show(struct kobject *kobj,
|
|
static ssize_t counter_show(struct kobject *kobj,
|
|
struct kobj_attribute *attr, char *buf)
|
|
struct kobj_attribute *attr, char *buf)
|
|
{
|
|
{
|
|
- all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI] =
|
|
|
|
|
|
+ int index = attr - counter_attrs;
|
|
|
|
+ int size;
|
|
|
|
+ acpi_handle handle;
|
|
|
|
+ acpi_event_status status;
|
|
|
|
+ int result = 0;
|
|
|
|
+
|
|
|
|
+ all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI].count =
|
|
acpi_irq_handled;
|
|
acpi_irq_handled;
|
|
- all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE] =
|
|
|
|
|
|
+ all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE].count =
|
|
acpi_gpe_count;
|
|
acpi_gpe_count;
|
|
|
|
|
|
- return sprintf(buf, "%d\n", all_counters[attr - counter_attrs]);
|
|
|
|
|
|
+ size = sprintf(buf, "%8d", all_counters[index].count);
|
|
|
|
+
|
|
|
|
+ /* "gpe_all" or "sci" */
|
|
|
|
+ if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
|
|
|
|
+ goto end;
|
|
|
|
+
|
|
|
|
+ result = get_status(index, &status, &handle);
|
|
|
|
+ if (result)
|
|
|
|
+ goto end;
|
|
|
|
+
|
|
|
|
+ if (!(all_counters[index].flags & ACPI_EVENT_VALID))
|
|
|
|
+ size += sprintf(buf + size, " invalid");
|
|
|
|
+ else if (status & ACPI_EVENT_FLAG_ENABLED)
|
|
|
|
+ size += sprintf(buf + size, " enable");
|
|
|
|
+ else
|
|
|
|
+ size += sprintf(buf + size, " disable");
|
|
|
|
+
|
|
|
|
+end:
|
|
|
|
+ size += sprintf(buf + size, "\n");
|
|
|
|
+ return result ? result : size;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
* counter_set() sets the specified counter.
|
|
* counter_set() sets the specified counter.
|
|
* setting the total "sci" file to any value clears all counters.
|
|
* setting the total "sci" file to any value clears all counters.
|
|
|
|
+ * enable/disable/clear a gpe/fixed event in user space.
|
|
*/
|
|
*/
|
|
static ssize_t counter_set(struct kobject *kobj,
|
|
static ssize_t counter_set(struct kobject *kobj,
|
|
struct kobj_attribute *attr, const char *buf, size_t size)
|
|
struct kobj_attribute *attr, const char *buf, size_t size)
|
|
{
|
|
{
|
|
int index = attr - counter_attrs;
|
|
int index = attr - counter_attrs;
|
|
|
|
+ acpi_event_status status;
|
|
|
|
+ acpi_handle handle;
|
|
|
|
+ int result = 0;
|
|
|
|
|
|
if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) {
|
|
if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) {
|
|
int i;
|
|
int i;
|
|
for (i = 0; i < num_counters; ++i)
|
|
for (i = 0; i < num_counters; ++i)
|
|
- all_counters[i] = 0;
|
|
|
|
|
|
+ all_counters[i].count = 0;
|
|
acpi_gpe_count = 0;
|
|
acpi_gpe_count = 0;
|
|
acpi_irq_handled = 0;
|
|
acpi_irq_handled = 0;
|
|
|
|
+ goto end;
|
|
|
|
+ }
|
|
|
|
|
|
|
|
+ /* show the event status for both GPEs and Fixed Events */
|
|
|
|
+ result = get_status(index, &status, &handle);
|
|
|
|
+ if (result)
|
|
|
|
+ goto end;
|
|
|
|
+
|
|
|
|
+ if (!(all_counters[index].flags & ACPI_EVENT_VALID)) {
|
|
|
|
+ ACPI_DEBUG_PRINT((ACPI_DB_WARN,
|
|
|
|
+ "Can not change Invalid GPE/Fixed Event status\n"));
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (index < num_gpes) {
|
|
|
|
+ if (!strcmp(buf, "disable\n") &&
|
|
|
|
+ (status & ACPI_EVENT_FLAG_ENABLED))
|
|
|
|
+ result = acpi_disable_gpe(handle, index, ACPI_NOT_ISR);
|
|
|
|
+ else if (!strcmp(buf, "enable\n") &&
|
|
|
|
+ !(status & ACPI_EVENT_FLAG_ENABLED))
|
|
|
|
+ result = acpi_enable_gpe(handle, index, ACPI_NOT_ISR);
|
|
|
|
+ else if (!strcmp(buf, "clear\n") &&
|
|
|
|
+ (status & ACPI_EVENT_FLAG_SET))
|
|
|
|
+ result = acpi_clear_gpe(handle, index, ACPI_NOT_ISR);
|
|
|
|
+ else
|
|
|
|
+ all_counters[index].count = strtoul(buf, NULL, 0);
|
|
|
|
+ } else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) {
|
|
|
|
+ int event = index - num_gpes;
|
|
|
|
+ if (!strcmp(buf, "disable\n") &&
|
|
|
|
+ (status & ACPI_EVENT_FLAG_ENABLED))
|
|
|
|
+ result = acpi_disable_event(event, ACPI_NOT_ISR);
|
|
|
|
+ else if (!strcmp(buf, "enable\n") &&
|
|
|
|
+ !(status & ACPI_EVENT_FLAG_ENABLED))
|
|
|
|
+ result = acpi_enable_event(event, ACPI_NOT_ISR);
|
|
|
|
+ else if (!strcmp(buf, "clear\n") &&
|
|
|
|
+ (status & ACPI_EVENT_FLAG_SET))
|
|
|
|
+ result = acpi_clear_event(event);
|
|
|
|
+ else
|
|
|
|
+ all_counters[index].count = strtoul(buf, NULL, 0);
|
|
} else
|
|
} else
|
|
- all_counters[index] = strtoul(buf, NULL, 0);
|
|
|
|
|
|
+ all_counters[index].count = strtoul(buf, NULL, 0);
|
|
|
|
|
|
- return size;
|
|
|
|
|
|
+ if (ACPI_FAILURE(result))
|
|
|
|
+ result = -EINVAL;
|
|
|
|
+end:
|
|
|
|
+ return result ? result : size;
|
|
}
|
|
}
|
|
|
|
|
|
void acpi_irq_stats_init(void)
|
|
void acpi_irq_stats_init(void)
|
|
@@ -298,7 +440,8 @@ void acpi_irq_stats_init(void)
|
|
if (all_attrs == NULL)
|
|
if (all_attrs == NULL)
|
|
return;
|
|
return;
|
|
|
|
|
|
- all_counters = kzalloc(sizeof(u32) * (num_counters), GFP_KERNEL);
|
|
|
|
|
|
+ all_counters = kzalloc(sizeof(struct event_counter) * (num_counters),
|
|
|
|
+ GFP_KERNEL);
|
|
if (all_counters == NULL)
|
|
if (all_counters == NULL)
|
|
goto fail;
|
|
goto fail;
|
|
|
|
|