|
@@ -61,11 +61,14 @@ static int acpi_processor_update_tsd_coord(void)
|
|
|
int count, count_target;
|
|
|
int retval = 0;
|
|
|
unsigned int i, j;
|
|
|
- cpumask_t covered_cpus;
|
|
|
+ cpumask_var_t covered_cpus;
|
|
|
struct acpi_processor *pr, *match_pr;
|
|
|
struct acpi_tsd_package *pdomain, *match_pdomain;
|
|
|
struct acpi_processor_throttling *pthrottling, *match_pthrottling;
|
|
|
|
|
|
+ if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
/*
|
|
|
* Now that we have _TSD data from all CPUs, lets setup T-state
|
|
|
* coordination between all CPUs.
|
|
@@ -91,19 +94,19 @@ static int acpi_processor_update_tsd_coord(void)
|
|
|
if (retval)
|
|
|
goto err_ret;
|
|
|
|
|
|
- cpus_clear(covered_cpus);
|
|
|
+ cpumask_clear(covered_cpus);
|
|
|
for_each_possible_cpu(i) {
|
|
|
pr = per_cpu(processors, i);
|
|
|
if (!pr)
|
|
|
continue;
|
|
|
|
|
|
- if (cpu_isset(i, covered_cpus))
|
|
|
+ if (cpumask_test_cpu(i, covered_cpus))
|
|
|
continue;
|
|
|
pthrottling = &pr->throttling;
|
|
|
|
|
|
pdomain = &(pthrottling->domain_info);
|
|
|
- cpu_set(i, pthrottling->shared_cpu_map);
|
|
|
- cpu_set(i, covered_cpus);
|
|
|
+ cpumask_set_cpu(i, pthrottling->shared_cpu_map);
|
|
|
+ cpumask_set_cpu(i, covered_cpus);
|
|
|
/*
|
|
|
* If the number of processor in the TSD domain is 1, it is
|
|
|
* unnecessary to parse the coordination for this CPU.
|
|
@@ -144,8 +147,8 @@ static int acpi_processor_update_tsd_coord(void)
|
|
|
goto err_ret;
|
|
|
}
|
|
|
|
|
|
- cpu_set(j, covered_cpus);
|
|
|
- cpu_set(j, pthrottling->shared_cpu_map);
|
|
|
+ cpumask_set_cpu(j, covered_cpus);
|
|
|
+ cpumask_set_cpu(j, pthrottling->shared_cpu_map);
|
|
|
count++;
|
|
|
}
|
|
|
for_each_possible_cpu(j) {
|
|
@@ -165,12 +168,14 @@ static int acpi_processor_update_tsd_coord(void)
|
|
|
* If some CPUS have the same domain, they
|
|
|
* will have the same shared_cpu_map.
|
|
|
*/
|
|
|
- match_pthrottling->shared_cpu_map =
|
|
|
- pthrottling->shared_cpu_map;
|
|
|
+ cpumask_copy(match_pthrottling->shared_cpu_map,
|
|
|
+ pthrottling->shared_cpu_map);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
err_ret:
|
|
|
+ free_cpumask_var(covered_cpus);
|
|
|
+
|
|
|
for_each_possible_cpu(i) {
|
|
|
pr = per_cpu(processors, i);
|
|
|
if (!pr)
|
|
@@ -182,8 +187,8 @@ err_ret:
|
|
|
*/
|
|
|
if (retval) {
|
|
|
pthrottling = &(pr->throttling);
|
|
|
- cpus_clear(pthrottling->shared_cpu_map);
|
|
|
- cpu_set(i, pthrottling->shared_cpu_map);
|
|
|
+ cpumask_clear(pthrottling->shared_cpu_map);
|
|
|
+ cpumask_set_cpu(i, pthrottling->shared_cpu_map);
|
|
|
pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
|
|
|
}
|
|
|
}
|
|
@@ -567,7 +572,7 @@ static int acpi_processor_get_tsd(struct acpi_processor *pr)
|
|
|
pthrottling = &pr->throttling;
|
|
|
pthrottling->tsd_valid_flag = 1;
|
|
|
pthrottling->shared_type = pdomain->coord_type;
|
|
|
- cpu_set(pr->id, pthrottling->shared_cpu_map);
|
|
|
+ cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
|
|
|
/*
|
|
|
* If the coordination type is not defined in ACPI spec,
|
|
|
* the tsd_valid_flag will be clear and coordination type
|
|
@@ -826,7 +831,7 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
|
|
|
|
|
|
static int acpi_processor_get_throttling(struct acpi_processor *pr)
|
|
|
{
|
|
|
- cpumask_t saved_mask;
|
|
|
+ cpumask_var_t saved_mask;
|
|
|
int ret;
|
|
|
|
|
|
if (!pr)
|
|
@@ -834,14 +839,20 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr)
|
|
|
|
|
|
if (!pr->flags.throttling)
|
|
|
return -ENODEV;
|
|
|
+
|
|
|
+ if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
/*
|
|
|
* Migrate task to the cpu pointed by pr.
|
|
|
*/
|
|
|
- saved_mask = current->cpus_allowed;
|
|
|
- set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
|
|
|
+ cpumask_copy(saved_mask, ¤t->cpus_allowed);
|
|
|
+ /* FIXME: use work_on_cpu() */
|
|
|
+ set_cpus_allowed_ptr(current, cpumask_of(pr->id));
|
|
|
ret = pr->throttling.acpi_processor_get_throttling(pr);
|
|
|
/* restore the previous state */
|
|
|
- set_cpus_allowed_ptr(current, &saved_mask);
|
|
|
+ set_cpus_allowed_ptr(current, saved_mask);
|
|
|
+ free_cpumask_var(saved_mask);
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
@@ -986,13 +997,13 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
|
|
|
|
|
|
int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
|
|
|
{
|
|
|
- cpumask_t saved_mask;
|
|
|
+ cpumask_var_t saved_mask;
|
|
|
int ret = 0;
|
|
|
unsigned int i;
|
|
|
struct acpi_processor *match_pr;
|
|
|
struct acpi_processor_throttling *p_throttling;
|
|
|
struct throttling_tstate t_state;
|
|
|
- cpumask_t online_throttling_cpus;
|
|
|
+ cpumask_var_t online_throttling_cpus;
|
|
|
|
|
|
if (!pr)
|
|
|
return -EINVAL;
|
|
@@ -1003,17 +1014,25 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
|
|
|
if ((state < 0) || (state > (pr->throttling.state_count - 1)))
|
|
|
return -EINVAL;
|
|
|
|
|
|
- saved_mask = current->cpus_allowed;
|
|
|
+ if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) {
|
|
|
+ free_cpumask_var(saved_mask);
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+
|
|
|
+ cpumask_copy(saved_mask, ¤t->cpus_allowed);
|
|
|
t_state.target_state = state;
|
|
|
p_throttling = &(pr->throttling);
|
|
|
- cpus_and(online_throttling_cpus, cpu_online_map,
|
|
|
- p_throttling->shared_cpu_map);
|
|
|
+ cpumask_and(online_throttling_cpus, cpu_online_mask,
|
|
|
+ p_throttling->shared_cpu_map);
|
|
|
/*
|
|
|
* The throttling notifier will be called for every
|
|
|
* affected cpu in order to get one proper T-state.
|
|
|
* The notifier event is THROTTLING_PRECHANGE.
|
|
|
*/
|
|
|
- for_each_cpu_mask_nr(i, online_throttling_cpus) {
|
|
|
+ for_each_cpu(i, online_throttling_cpus) {
|
|
|
t_state.cpu = i;
|
|
|
acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
|
|
|
&t_state);
|
|
@@ -1025,7 +1044,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
|
|
|
* it can be called only for the cpu pointed by pr.
|
|
|
*/
|
|
|
if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
|
|
|
- set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
|
|
|
+ /* FIXME: use work_on_cpu() */
|
|
|
+ set_cpus_allowed_ptr(current, cpumask_of(pr->id));
|
|
|
ret = p_throttling->acpi_processor_set_throttling(pr,
|
|
|
t_state.target_state);
|
|
|
} else {
|
|
@@ -1034,7 +1054,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
|
|
|
* it is necessary to set T-state for every affected
|
|
|
* cpus.
|
|
|
*/
|
|
|
- for_each_cpu_mask_nr(i, online_throttling_cpus) {
|
|
|
+ for_each_cpu(i, online_throttling_cpus) {
|
|
|
match_pr = per_cpu(processors, i);
|
|
|
/*
|
|
|
* If the pointer is invalid, we will report the
|
|
@@ -1056,7 +1076,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
|
|
|
continue;
|
|
|
}
|
|
|
t_state.cpu = i;
|
|
|
- set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
|
|
|
+ /* FIXME: use work_on_cpu() */
|
|
|
+ set_cpus_allowed_ptr(current, cpumask_of(i));
|
|
|
ret = match_pr->throttling.
|
|
|
acpi_processor_set_throttling(
|
|
|
match_pr, t_state.target_state);
|
|
@@ -1068,13 +1089,16 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
|
|
|
* affected cpu to update the T-states.
|
|
|
* The notifier event is THROTTLING_POSTCHANGE
|
|
|
*/
|
|
|
- for_each_cpu_mask_nr(i, online_throttling_cpus) {
|
|
|
+ for_each_cpu(i, online_throttling_cpus) {
|
|
|
t_state.cpu = i;
|
|
|
acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
|
|
|
&t_state);
|
|
|
}
|
|
|
/* restore the previous state */
|
|
|
- set_cpus_allowed_ptr(current, &saved_mask);
|
|
|
+ /* FIXME: use work_on_cpu() */
|
|
|
+ set_cpus_allowed_ptr(current, saved_mask);
|
|
|
+ free_cpumask_var(online_throttling_cpus);
|
|
|
+ free_cpumask_var(saved_mask);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -1120,7 +1144,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
|
|
|
if (acpi_processor_get_tsd(pr)) {
|
|
|
pthrottling = &pr->throttling;
|
|
|
pthrottling->tsd_valid_flag = 0;
|
|
|
- cpu_set(pr->id, pthrottling->shared_cpu_map);
|
|
|
+ cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
|
|
|
pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
|
|
|
}
|
|
|
|