sched_features.h 2.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889
  1. /*
  2. * Disregards a certain amount of sleep time (sched_latency_ns) and
  3. * considers the task to be running during that period. This gives it
  4. * a service deficit on wakeup, allowing it to run sooner.
  5. */
  6. SCHED_FEAT(FAIR_SLEEPERS, 1)
  7. /*
  8. * Only give sleepers 50% of their service deficit. This allows
  9. * them to run sooner, but does not allow tons of sleepers to
  10. * rip the spread apart.
  11. */
  12. SCHED_FEAT(GENTLE_FAIR_SLEEPERS, 1)
  13. /*
  14. * Place new tasks ahead so that they do not starve already running
  15. * tasks
  16. */
  17. SCHED_FEAT(START_DEBIT, 1)
  18. /*
  19. * Should wakeups try to preempt running tasks.
  20. */
  21. SCHED_FEAT(WAKEUP_PREEMPT, 1)
  22. /*
  23. * When converting the wakeup granularity to virtual time, do it such
  24. * that heavier tasks preempting a lighter task have an edge.
  25. */
  26. SCHED_FEAT(ASYM_GRAN, 1)
  27. /*
  28. * Always wakeup-preempt SYNC wakeups, see SYNC_WAKEUPS.
  29. */
  30. SCHED_FEAT(WAKEUP_SYNC, 0)
  31. /*
  32. * Use the SYNC wakeup hint, pipes and the likes use this to indicate
  33. * the remote end is likely to consume the data we just wrote, and
  34. * therefore has cache benefit from being placed on the same cpu, see
  35. * also AFFINE_WAKEUPS.
  36. */
  37. SCHED_FEAT(SYNC_WAKEUPS, 1)
  38. /*
  39. * Based on load and program behaviour, see if it makes sense to place
  40. * a newly woken task on the same cpu as the task that woke it --
  41. * improve cache locality. Typically used with SYNC wakeups as
  42. * generated by pipes and the like, see also SYNC_WAKEUPS.
  43. */
  44. SCHED_FEAT(AFFINE_WAKEUPS, 1)
  45. /*
  46. * Prefer to schedule the task we woke last (assuming it failed
  47. * wakeup-preemption), since its likely going to consume data we
  48. * touched, increases cache locality.
  49. */
  50. SCHED_FEAT(NEXT_BUDDY, 0)
  51. /*
  52. * Prefer to schedule the task that ran last (when we did
  53. * wake-preempt) as that likely will touch the same data, increases
  54. * cache locality.
  55. */
  56. SCHED_FEAT(LAST_BUDDY, 1)
  57. /*
  58. * Consider buddies to be cache hot, decreases the likelyness of a
  59. * cache buddy being migrated away, increases cache locality.
  60. */
  61. SCHED_FEAT(CACHE_HOT_BUDDY, 1)
  62. /*
  63. * Use arch dependent cpu power functions
  64. */
  65. SCHED_FEAT(ARCH_POWER, 0)
  66. SCHED_FEAT(HRTICK, 0)
  67. SCHED_FEAT(DOUBLE_TICK, 0)
  68. SCHED_FEAT(LB_BIAS, 1)
  69. SCHED_FEAT(LB_SHARES_UPDATE, 1)
  70. SCHED_FEAT(ASYM_EFF_LOAD, 1)
  71. /*
  72. * Spin-wait on mutex acquisition when the mutex owner is running on
  73. * another cpu -- assumes that when the owner is running, it will soon
  74. * release the lock. Decreases scheduling overhead.
  75. */
  76. SCHED_FEAT(OWNER_SPIN, 1)