sched_features.h 2.1 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182
  1. /*
  2. * Only give sleepers 50% of their service deficit. This allows
  3. * them to run sooner, but does not allow tons of sleepers to
  4. * rip the spread apart.
  5. */
  6. SCHED_FEAT(GENTLE_FAIR_SLEEPERS, 1)
  7. /*
  8. * Place new tasks ahead so that they do not starve already running
  9. * tasks
  10. */
  11. SCHED_FEAT(START_DEBIT, 1)
  12. /*
  13. * Should wakeups try to preempt running tasks.
  14. */
  15. SCHED_FEAT(WAKEUP_PREEMPT, 1)
  16. /*
  17. * When converting the wakeup granularity to virtual time, do it such
  18. * that heavier tasks preempting a lighter task have an edge.
  19. */
  20. SCHED_FEAT(ASYM_GRAN, 1)
  21. /*
  22. * Always wakeup-preempt SYNC wakeups, see SYNC_WAKEUPS.
  23. */
  24. SCHED_FEAT(WAKEUP_SYNC, 0)
  25. /*
  26. * Use the SYNC wakeup hint, pipes and the likes use this to indicate
  27. * the remote end is likely to consume the data we just wrote, and
  28. * therefore has cache benefit from being placed on the same cpu, see
  29. * also AFFINE_WAKEUPS.
  30. */
  31. SCHED_FEAT(SYNC_WAKEUPS, 1)
  32. /*
  33. * Based on load and program behaviour, see if it makes sense to place
  34. * a newly woken task on the same cpu as the task that woke it --
  35. * improve cache locality. Typically used with SYNC wakeups as
  36. * generated by pipes and the like, see also SYNC_WAKEUPS.
  37. */
  38. SCHED_FEAT(AFFINE_WAKEUPS, 1)
  39. /*
  40. * Prefer to schedule the task we woke last (assuming it failed
  41. * wakeup-preemption), since its likely going to consume data we
  42. * touched, increases cache locality.
  43. */
  44. SCHED_FEAT(NEXT_BUDDY, 0)
  45. /*
  46. * Prefer to schedule the task that ran last (when we did
  47. * wake-preempt) as that likely will touch the same data, increases
  48. * cache locality.
  49. */
  50. SCHED_FEAT(LAST_BUDDY, 1)
  51. /*
  52. * Consider buddies to be cache hot, decreases the likelyness of a
  53. * cache buddy being migrated away, increases cache locality.
  54. */
  55. SCHED_FEAT(CACHE_HOT_BUDDY, 1)
  56. /*
  57. * Use arch dependent cpu power functions
  58. */
  59. SCHED_FEAT(ARCH_POWER, 0)
  60. SCHED_FEAT(HRTICK, 0)
  61. SCHED_FEAT(DOUBLE_TICK, 0)
  62. SCHED_FEAT(LB_BIAS, 1)
  63. SCHED_FEAT(LB_SHARES_UPDATE, 1)
  64. SCHED_FEAT(ASYM_EFF_LOAD, 1)
  65. /*
  66. * Spin-wait on mutex acquisition when the mutex owner is running on
  67. * another cpu -- assumes that when the owner is running, it will soon
  68. * release the lock. Decreases scheduling overhead.
  69. */
  70. SCHED_FEAT(OWNER_SPIN, 1)