sched_cpupri.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220
  1. /*
  2. * kernel/sched_cpupri.c
  3. *
  4. * CPU priority management
  5. *
  6. * Copyright (C) 2007-2008 Novell
  7. *
  8. * Author: Gregory Haskins <ghaskins@novell.com>
  9. *
  10. * This code tracks the priority of each CPU so that global migration
  11. * decisions are easy to calculate. Each CPU can be in a state as follows:
  12. *
  13. * (INVALID), IDLE, NORMAL, RT1, ... RT99
  14. *
  15. * going from the lowest priority to the highest. CPUs in the INVALID state
  16. * are not eligible for routing. The system maintains this state with
  17. * a 2 dimensional bitmap (the first for priority class, the second for cpus
  18. * in that class). Therefore a typical application without affinity
  19. * restrictions can find a suitable CPU with O(1) complexity (e.g. two bit
  20. * searches). For tasks with affinity restrictions, the algorithm has a
  21. * worst case complexity of O(min(102, nr_domcpus)), though the scenario that
  22. * yields the worst case search is fairly contrived.
  23. *
  24. * This program is free software; you can redistribute it and/or
  25. * modify it under the terms of the GNU General Public License
  26. * as published by the Free Software Foundation; version 2
  27. * of the License.
  28. */
  29. #include <linux/gfp.h>
  30. #include "sched_cpupri.h"
  31. /* Convert between a 140 based task->prio, and our 102 based cpupri */
  32. static int convert_prio(int prio)
  33. {
  34. int cpupri;
  35. if (prio == CPUPRI_INVALID)
  36. cpupri = CPUPRI_INVALID;
  37. else if (prio == MAX_PRIO)
  38. cpupri = CPUPRI_IDLE;
  39. else if (prio >= MAX_RT_PRIO)
  40. cpupri = CPUPRI_NORMAL;
  41. else
  42. cpupri = MAX_RT_PRIO - prio + 1;
  43. return cpupri;
  44. }
  45. /**
  46. * cpupri_find - find the best (lowest-pri) CPU in the system
  47. * @cp: The cpupri context
  48. * @p: The task
  49. * @lowest_mask: A mask to fill in with selected CPUs (or NULL)
  50. *
  51. * Note: This function returns the recommended CPUs as calculated during the
  52. * current invocation. By the time the call returns, the CPUs may have in
  53. * fact changed priorities any number of times. While not ideal, it is not
  54. * an issue of correctness since the normal rebalancer logic will correct
  55. * any discrepancies created by racing against the uncertainty of the current
  56. * priority configuration.
  57. *
  58. * Returns: (int)bool - CPUs were found
  59. */
  60. int cpupri_find(struct cpupri *cp, struct task_struct *p,
  61. struct cpumask *lowest_mask)
  62. {
  63. int idx = 0;
  64. int task_pri = convert_prio(p->prio);
  65. if (task_pri >= MAX_RT_PRIO)
  66. return 0;
  67. for (idx = 0; idx < task_pri; idx++) {
  68. struct cpupri_vec *vec = &cp->pri_to_cpu[idx];
  69. if (!atomic_read(&(vec)->count))
  70. continue;
  71. /*
  72. * When looking at the vector, we need to read the counter,
  73. * do a memory barrier, then read the mask.
  74. *
  75. * Note: This is still all racey, but we can deal with it.
  76. * Ideally, we only want to look at masks that are set.
  77. *
  78. * If a mask is not set, then the only thing wrong is that we
  79. * did a little more work than necessary.
  80. *
  81. * If we read a zero count but the mask is set, because of the
  82. * memory barriers, that can only happen when the highest prio
  83. * task for a run queue has left the run queue, in which case,
  84. * it will be followed by a pull. If the task we are processing
  85. * fails to find a proper place to go, that pull request will
  86. * pull this task if the run queue is running at a lower
  87. * priority.
  88. */
  89. smp_rmb();
  90. if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
  91. continue;
  92. if (lowest_mask) {
  93. cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
  94. /*
  95. * We have to ensure that we have at least one bit
  96. * still set in the array, since the map could have
  97. * been concurrently emptied between the first and
  98. * second reads of vec->mask. If we hit this
  99. * condition, simply act as though we never hit this
  100. * priority level and continue on.
  101. */
  102. if (cpumask_any(lowest_mask) >= nr_cpu_ids)
  103. continue;
  104. }
  105. return 1;
  106. }
  107. return 0;
  108. }
  109. /**
  110. * cpupri_set - update the cpu priority setting
  111. * @cp: The cpupri context
  112. * @cpu: The target cpu
  113. * @pri: The priority (INVALID-RT99) to assign to this CPU
  114. *
  115. * Note: Assumes cpu_rq(cpu)->lock is locked
  116. *
  117. * Returns: (void)
  118. */
  119. void cpupri_set(struct cpupri *cp, int cpu, int newpri)
  120. {
  121. int *currpri = &cp->cpu_to_pri[cpu];
  122. int oldpri = *currpri;
  123. newpri = convert_prio(newpri);
  124. BUG_ON(newpri >= CPUPRI_NR_PRIORITIES);
  125. if (newpri == oldpri)
  126. return;
  127. /*
  128. * If the cpu was currently mapped to a different value, we
  129. * need to map it to the new value then remove the old value.
  130. * Note, we must add the new value first, otherwise we risk the
  131. * cpu being cleared from pri_active, and this cpu could be
  132. * missed for a push or pull.
  133. */
  134. if (likely(newpri != CPUPRI_INVALID)) {
  135. struct cpupri_vec *vec = &cp->pri_to_cpu[newpri];
  136. cpumask_set_cpu(cpu, vec->mask);
  137. /*
  138. * When adding a new vector, we update the mask first,
  139. * do a write memory barrier, and then update the count, to
  140. * make sure the vector is visible when count is set.
  141. */
  142. smp_wmb();
  143. atomic_inc(&(vec)->count);
  144. }
  145. if (likely(oldpri != CPUPRI_INVALID)) {
  146. struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri];
  147. /*
  148. * When removing from the vector, we decrement the counter first
  149. * do a memory barrier and then clear the mask.
  150. */
  151. atomic_dec(&(vec)->count);
  152. smp_wmb();
  153. cpumask_clear_cpu(cpu, vec->mask);
  154. }
  155. *currpri = newpri;
  156. }
  157. /**
  158. * cpupri_init - initialize the cpupri structure
  159. * @cp: The cpupri context
  160. * @bootmem: true if allocations need to use bootmem
  161. *
  162. * Returns: -ENOMEM if memory fails.
  163. */
  164. int cpupri_init(struct cpupri *cp)
  165. {
  166. int i;
  167. memset(cp, 0, sizeof(*cp));
  168. for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
  169. struct cpupri_vec *vec = &cp->pri_to_cpu[i];
  170. atomic_set(&vec->count, 0);
  171. if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL))
  172. goto cleanup;
  173. }
  174. for_each_possible_cpu(i)
  175. cp->cpu_to_pri[i] = CPUPRI_INVALID;
  176. return 0;
  177. cleanup:
  178. for (i--; i >= 0; i--)
  179. free_cpumask_var(cp->pri_to_cpu[i].mask);
  180. return -ENOMEM;
  181. }
  182. /**
  183. * cpupri_cleanup - clean up the cpupri structure
  184. * @cp: The cpupri context
  185. */
  186. void cpupri_cleanup(struct cpupri *cp)
  187. {
  188. int i;
  189. for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
  190. free_cpumask_var(cp->pri_to_cpu[i].mask);
  191. }