sched_features.h 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596
  1. /*
  2. * Disregards a certain amount of sleep time (sched_latency_ns) and
  3. * considers the task to be running during that period. This gives it
  4. * a service deficit on wakeup, allowing it to run sooner.
  5. */
  6. SCHED_FEAT(FAIR_SLEEPERS, 1)
  7. /*
  8. * Only give sleepers 50% of their service deficit. This allows
  9. * them to run sooner, but does not allow tons of sleepers to
  10. * rip the spread apart.
  11. */
  12. SCHED_FEAT(GENTLE_FAIR_SLEEPERS, 1)
  13. /*
  14. * By not normalizing the sleep time, heavy tasks get an effective
  15. * longer period, and lighter task an effective shorter period they
  16. * are considered running.
  17. */
  18. SCHED_FEAT(NORMALIZED_SLEEPER, 0)
  19. /*
  20. * Place new tasks ahead so that they do not starve already running
  21. * tasks
  22. */
  23. SCHED_FEAT(START_DEBIT, 1)
  24. /*
  25. * Should wakeups try to preempt running tasks.
  26. */
  27. SCHED_FEAT(WAKEUP_PREEMPT, 1)
  28. /*
  29. * When converting the wakeup granularity to virtual time, do it such
  30. * that heavier tasks preempting a lighter task have an edge.
  31. */
  32. SCHED_FEAT(ASYM_GRAN, 1)
  33. /*
  34. * Always wakeup-preempt SYNC wakeups, see SYNC_WAKEUPS.
  35. */
  36. SCHED_FEAT(WAKEUP_SYNC, 0)
  37. /*
  38. * Use the SYNC wakeup hint, pipes and the likes use this to indicate
  39. * the remote end is likely to consume the data we just wrote, and
  40. * therefore has cache benefit from being placed on the same cpu, see
  41. * also AFFINE_WAKEUPS.
  42. */
  43. SCHED_FEAT(SYNC_WAKEUPS, 1)
  44. /*
  45. * Based on load and program behaviour, see if it makes sense to place
  46. * a newly woken task on the same cpu as the task that woke it --
  47. * improve cache locality. Typically used with SYNC wakeups as
  48. * generated by pipes and the like, see also SYNC_WAKEUPS.
  49. */
  50. SCHED_FEAT(AFFINE_WAKEUPS, 1)
  51. /*
  52. * Prefer to schedule the task we woke last (assuming it failed
  53. * wakeup-preemption), since its likely going to consume data we
  54. * touched, increases cache locality.
  55. */
  56. SCHED_FEAT(NEXT_BUDDY, 0)
  57. /*
  58. * Prefer to schedule the task that ran last (when we did
  59. * wake-preempt) as that likely will touch the same data, increases
  60. * cache locality.
  61. */
  62. SCHED_FEAT(LAST_BUDDY, 1)
  63. /*
  64. * Consider buddies to be cache hot, decreases the likelyness of a
  65. * cache buddy being migrated away, increases cache locality.
  66. */
  67. SCHED_FEAT(CACHE_HOT_BUDDY, 1)
  68. /*
  69. * Use arch dependent cpu power functions
  70. */
  71. SCHED_FEAT(ARCH_POWER, 0)
  72. SCHED_FEAT(HRTICK, 0)
  73. SCHED_FEAT(DOUBLE_TICK, 0)
  74. SCHED_FEAT(LB_BIAS, 1)
  75. SCHED_FEAT(LB_SHARES_UPDATE, 1)
  76. SCHED_FEAT(ASYM_EFF_LOAD, 1)
  77. /*
  78. * Spin-wait on mutex acquisition when the mutex owner is running on
  79. * another cpu -- assumes that when the owner is running, it will soon
  80. * release the lock. Decreases scheduling overhead.
  81. */
  82. SCHED_FEAT(OWNER_SPIN, 1)