sched.h 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330
  1. #include <linux/sched.h>
  2. #include <linux/sched/sysctl.h>
  3. #include <linux/sched/rt.h>
  4. #include <linux/mutex.h>
  5. #include <linux/spinlock.h>
  6. #include <linux/stop_machine.h>
  7. #include "cpupri.h"
  8. extern __read_mostly int scheduler_running;
  9. /*
  10. * Convert user-nice values [ -20 ... 0 ... 19 ]
  11. * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
  12. * and back.
  13. */
  14. #define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
  15. #define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
  16. #define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
  17. /*
  18. * 'User priority' is the nice value converted to something we
  19. * can work with better when scaling various scheduler parameters,
  20. * it's a [ 0 ... 39 ] range.
  21. */
  22. #define USER_PRIO(p) ((p)-MAX_RT_PRIO)
  23. #define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
  24. #define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
  25. /*
  26. * Helpers for converting nanosecond timing to jiffy resolution
  27. */
  28. #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
  29. /*
  30. * Increase resolution of nice-level calculations for 64-bit architectures.
  31. * The extra resolution improves shares distribution and load balancing of
  32. * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
  33. * hierarchies, especially on larger systems. This is not a user-visible change
  34. * and does not change the user-interface for setting shares/weights.
  35. *
  36. * We increase resolution only if we have enough bits to allow this increased
  37. * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution
  38. * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the
  39. * increased costs.
  40. */
  41. #if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load */
  42. # define SCHED_LOAD_RESOLUTION 10
  43. # define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION)
  44. # define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION)
  45. #else
  46. # define SCHED_LOAD_RESOLUTION 0
  47. # define scale_load(w) (w)
  48. # define scale_load_down(w) (w)
  49. #endif
  50. #define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION)
  51. #define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
  52. #define NICE_0_LOAD SCHED_LOAD_SCALE
  53. #define NICE_0_SHIFT SCHED_LOAD_SHIFT
  54. /*
  55. * These are the 'tuning knobs' of the scheduler:
  56. */
  57. /*
  58. * single value that denotes runtime == period, ie unlimited time.
  59. */
  60. #define RUNTIME_INF ((u64)~0ULL)
  61. static inline int rt_policy(int policy)
  62. {
  63. if (policy == SCHED_FIFO || policy == SCHED_RR)
  64. return 1;
  65. return 0;
  66. }
  67. static inline int task_has_rt_policy(struct task_struct *p)
  68. {
  69. return rt_policy(p->policy);
  70. }
  71. /*
  72. * This is the priority-queue data structure of the RT scheduling class:
  73. */
  74. struct rt_prio_array {
  75. DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
  76. struct list_head queue[MAX_RT_PRIO];
  77. };
  78. struct rt_bandwidth {
  79. /* nests inside the rq lock: */
  80. raw_spinlock_t rt_runtime_lock;
  81. ktime_t rt_period;
  82. u64 rt_runtime;
  83. struct hrtimer rt_period_timer;
  84. };
  85. extern struct mutex sched_domains_mutex;
  86. #ifdef CONFIG_CGROUP_SCHED
  87. #include <linux/cgroup.h>
  88. struct cfs_rq;
  89. struct rt_rq;
  90. extern struct list_head task_groups;
  91. struct cfs_bandwidth {
  92. #ifdef CONFIG_CFS_BANDWIDTH
  93. raw_spinlock_t lock;
  94. ktime_t period;
  95. u64 quota, runtime;
  96. s64 hierarchal_quota;
  97. u64 runtime_expires;
  98. int idle, timer_active;
  99. struct hrtimer period_timer, slack_timer;
  100. struct list_head throttled_cfs_rq;
  101. /* statistics */
  102. int nr_periods, nr_throttled;
  103. u64 throttled_time;
  104. #endif
  105. };
  106. /* task group related information */
  107. struct task_group {
  108. struct cgroup_subsys_state css;
  109. #ifdef CONFIG_FAIR_GROUP_SCHED
  110. /* schedulable entities of this group on each cpu */
  111. struct sched_entity **se;
  112. /* runqueue "owned" by this group on each cpu */
  113. struct cfs_rq **cfs_rq;
  114. unsigned long shares;
  115. atomic_t load_weight;
  116. atomic64_t load_avg;
  117. atomic_t runnable_avg;
  118. #endif
  119. #ifdef CONFIG_RT_GROUP_SCHED
  120. struct sched_rt_entity **rt_se;
  121. struct rt_rq **rt_rq;
  122. struct rt_bandwidth rt_bandwidth;
  123. #endif
  124. struct rcu_head rcu;
  125. struct list_head list;
  126. struct task_group *parent;
  127. struct list_head siblings;
  128. struct list_head children;
  129. #ifdef CONFIG_SCHED_AUTOGROUP
  130. struct autogroup *autogroup;
  131. #endif
  132. struct cfs_bandwidth cfs_bandwidth;
  133. };
  134. #ifdef CONFIG_FAIR_GROUP_SCHED
  135. #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
  136. /*
  137. * A weight of 0 or 1 can cause arithmetics problems.
  138. * A weight of a cfs_rq is the sum of weights of which entities
  139. * are queued on this cfs_rq, so a weight of a entity should not be
  140. * too large, so as the shares value of a task group.
  141. * (The default weight is 1024 - so there's no practical
  142. * limitation from this.)
  143. */
  144. #define MIN_SHARES (1UL << 1)
  145. #define MAX_SHARES (1UL << 18)
  146. #endif
  147. /* Default task group.
  148. * Every task in system belong to this group at bootup.
  149. */
  150. extern struct task_group root_task_group;
  151. typedef int (*tg_visitor)(struct task_group *, void *);
  152. extern int walk_tg_tree_from(struct task_group *from,
  153. tg_visitor down, tg_visitor up, void *data);
  154. /*
  155. * Iterate the full tree, calling @down when first entering a node and @up when
  156. * leaving it for the final time.
  157. *
  158. * Caller must hold rcu_lock or sufficient equivalent.
  159. */
  160. static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
  161. {
  162. return walk_tg_tree_from(&root_task_group, down, up, data);
  163. }
  164. extern int tg_nop(struct task_group *tg, void *data);
  165. extern void free_fair_sched_group(struct task_group *tg);
  166. extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
  167. extern void unregister_fair_sched_group(struct task_group *tg, int cpu);
  168. extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
  169. struct sched_entity *se, int cpu,
  170. struct sched_entity *parent);
  171. extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
  172. extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
  173. extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
  174. extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
  175. extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
  176. extern void free_rt_sched_group(struct task_group *tg);
  177. extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
  178. extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
  179. struct sched_rt_entity *rt_se, int cpu,
  180. struct sched_rt_entity *parent);
  181. #else /* CONFIG_CGROUP_SCHED */
  182. struct cfs_bandwidth { };
  183. #endif /* CONFIG_CGROUP_SCHED */
  184. /* CFS-related fields in a runqueue */
  185. struct cfs_rq {
  186. struct load_weight load;
  187. unsigned int nr_running, h_nr_running;
  188. u64 exec_clock;
  189. u64 min_vruntime;
  190. #ifndef CONFIG_64BIT
  191. u64 min_vruntime_copy;
  192. #endif
  193. struct rb_root tasks_timeline;
  194. struct rb_node *rb_leftmost;
  195. /*
  196. * 'curr' points to currently running entity on this cfs_rq.
  197. * It is set to NULL otherwise (i.e when none are currently running).
  198. */
  199. struct sched_entity *curr, *next, *last, *skip;
  200. #ifdef CONFIG_SCHED_DEBUG
  201. unsigned int nr_spread_over;
  202. #endif
  203. #ifdef CONFIG_SMP
  204. /*
  205. * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
  206. * removed when useful for applications beyond shares distribution (e.g.
  207. * load-balance).
  208. */
  209. #ifdef CONFIG_FAIR_GROUP_SCHED
  210. /*
  211. * CFS Load tracking
  212. * Under CFS, load is tracked on a per-entity basis and aggregated up.
  213. * This allows for the description of both thread and group usage (in
  214. * the FAIR_GROUP_SCHED case).
  215. */
  216. u64 runnable_load_avg, blocked_load_avg;
  217. atomic64_t decay_counter, removed_load;
  218. u64 last_decay;
  219. #endif /* CONFIG_FAIR_GROUP_SCHED */
  220. /* These always depend on CONFIG_FAIR_GROUP_SCHED */
  221. #ifdef CONFIG_FAIR_GROUP_SCHED
  222. u32 tg_runnable_contrib;
  223. u64 tg_load_contrib;
  224. #endif /* CONFIG_FAIR_GROUP_SCHED */
  225. /*
  226. * h_load = weight * f(tg)
  227. *
  228. * Where f(tg) is the recursive weight fraction assigned to
  229. * this group.
  230. */
  231. unsigned long h_load;
  232. #endif /* CONFIG_SMP */
  233. #ifdef CONFIG_FAIR_GROUP_SCHED
  234. struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
  235. /*
  236. * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
  237. * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
  238. * (like users, containers etc.)
  239. *
  240. * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
  241. * list is used during load balance.
  242. */
  243. int on_list;
  244. struct list_head leaf_cfs_rq_list;
  245. struct task_group *tg; /* group that "owns" this runqueue */
  246. #ifdef CONFIG_CFS_BANDWIDTH
  247. int runtime_enabled;
  248. u64 runtime_expires;
  249. s64 runtime_remaining;
  250. u64 throttled_clock, throttled_clock_task;
  251. u64 throttled_clock_task_time;
  252. int throttled, throttle_count;
  253. struct list_head throttled_list;
  254. #endif /* CONFIG_CFS_BANDWIDTH */
  255. #endif /* CONFIG_FAIR_GROUP_SCHED */
  256. };
  257. static inline int rt_bandwidth_enabled(void)
  258. {
  259. return sysctl_sched_rt_runtime >= 0;
  260. }
  261. /* Real-Time classes' related field in a runqueue: */
  262. struct rt_rq {
  263. struct rt_prio_array active;
  264. unsigned int rt_nr_running;
  265. #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
  266. struct {
  267. int curr; /* highest queued rt task prio */
  268. #ifdef CONFIG_SMP
  269. int next; /* next highest */
  270. #endif
  271. } highest_prio;
  272. #endif
  273. #ifdef CONFIG_SMP
  274. unsigned long rt_nr_migratory;
  275. unsigned long rt_nr_total;
  276. int overloaded;
  277. struct plist_head pushable_tasks;
  278. #endif
  279. int rt_throttled;
  280. u64 rt_time;
  281. u64 rt_runtime;
  282. /* Nests inside the rq lock: */
  283. raw_spinlock_t rt_runtime_lock;
  284. #ifdef CONFIG_RT_GROUP_SCHED
  285. unsigned long rt_nr_boosted;
  286. struct rq *rq;
  287. struct list_head leaf_rt_rq_list;
  288. struct task_group *tg;
  289. #endif
  290. };
  291. #ifdef CONFIG_SMP
  292. /*
  293. * We add the notion of a root-domain which will be used to define per-domain
  294. * variables. Each exclusive cpuset essentially defines an island domain by
  295. * fully partitioning the member cpus from any other cpuset. Whenever a new
  296. * exclusive cpuset is created, we also create and attach a new root-domain
  297. * object.
  298. *
  299. */
  300. struct root_domain {
  301. atomic_t refcount;
  302. atomic_t rto_count;
  303. struct rcu_head rcu;
  304. cpumask_var_t span;
  305. cpumask_var_t online;
  306. /*
  307. * The "RT overload" flag: it gets set if a CPU has more than
  308. * one runnable RT task.
  309. */
  310. cpumask_var_t rto_mask;
  311. struct cpupri cpupri;
  312. };
  313. extern struct root_domain def_root_domain;
  314. #endif /* CONFIG_SMP */
  315. /*
  316. * This is the main, per-CPU runqueue data structure.
  317. *
  318. * Locking rule: those places that want to lock multiple runqueues
  319. * (such as the load balancing or the thread migration code), lock
  320. * acquire operations must be ordered by ascending &runqueue.
  321. */
  322. struct rq {
  323. /* runqueue lock: */
  324. raw_spinlock_t lock;
  325. /*
  326. * nr_running and cpu_load should be in the same cacheline because
  327. * remote CPUs use both these fields when doing load calculation.
  328. */
  329. unsigned int nr_running;
  330. #define CPU_LOAD_IDX_MAX 5
  331. unsigned long cpu_load[CPU_LOAD_IDX_MAX];
  332. unsigned long last_load_update_tick;
  333. #ifdef CONFIG_NO_HZ
  334. u64 nohz_stamp;
  335. unsigned long nohz_flags;
  336. #endif
  337. int skip_clock_update;
  338. /* capture load from *all* tasks on this cpu: */
  339. struct load_weight load;
  340. unsigned long nr_load_updates;
  341. u64 nr_switches;
  342. struct cfs_rq cfs;
  343. struct rt_rq rt;
  344. #ifdef CONFIG_FAIR_GROUP_SCHED
  345. /* list of leaf cfs_rq on this cpu: */
  346. struct list_head leaf_cfs_rq_list;
  347. #ifdef CONFIG_SMP
  348. unsigned long h_load_throttle;
  349. #endif /* CONFIG_SMP */
  350. #endif /* CONFIG_FAIR_GROUP_SCHED */
  351. #ifdef CONFIG_RT_GROUP_SCHED
  352. struct list_head leaf_rt_rq_list;
  353. #endif
  354. /*
  355. * This is part of a global counter where only the total sum
  356. * over all CPUs matters. A task can increase this counter on
  357. * one CPU and if it got migrated afterwards it may decrease
  358. * it on another CPU. Always updated under the runqueue lock:
  359. */
  360. unsigned long nr_uninterruptible;
  361. struct task_struct *curr, *idle, *stop;
  362. unsigned long next_balance;
  363. struct mm_struct *prev_mm;
  364. u64 clock;
  365. u64 clock_task;
  366. atomic_t nr_iowait;
  367. #ifdef CONFIG_SMP
  368. struct root_domain *rd;
  369. struct sched_domain *sd;
  370. unsigned long cpu_power;
  371. unsigned char idle_balance;
  372. /* For active balancing */
  373. int post_schedule;
  374. int active_balance;
  375. int push_cpu;
  376. struct cpu_stop_work active_balance_work;
  377. /* cpu of this runqueue: */
  378. int cpu;
  379. int online;
  380. struct list_head cfs_tasks;
  381. u64 rt_avg;
  382. u64 age_stamp;
  383. u64 idle_stamp;
  384. u64 avg_idle;
  385. #endif
  386. #ifdef CONFIG_IRQ_TIME_ACCOUNTING
  387. u64 prev_irq_time;
  388. #endif
  389. #ifdef CONFIG_PARAVIRT
  390. u64 prev_steal_time;
  391. #endif
  392. #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
  393. u64 prev_steal_time_rq;
  394. #endif
  395. /* calc_load related fields */
  396. unsigned long calc_load_update;
  397. long calc_load_active;
  398. #ifdef CONFIG_SCHED_HRTICK
  399. #ifdef CONFIG_SMP
  400. int hrtick_csd_pending;
  401. struct call_single_data hrtick_csd;
  402. #endif
  403. struct hrtimer hrtick_timer;
  404. #endif
  405. #ifdef CONFIG_SCHEDSTATS
  406. /* latency stats */
  407. struct sched_info rq_sched_info;
  408. unsigned long long rq_cpu_time;
  409. /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
  410. /* sys_sched_yield() stats */
  411. unsigned int yld_count;
  412. /* schedule() stats */
  413. unsigned int sched_count;
  414. unsigned int sched_goidle;
  415. /* try_to_wake_up() stats */
  416. unsigned int ttwu_count;
  417. unsigned int ttwu_local;
  418. #endif
  419. #ifdef CONFIG_SMP
  420. struct llist_head wake_list;
  421. #endif
  422. struct sched_avg avg;
  423. };
  424. static inline int cpu_of(struct rq *rq)
  425. {
  426. #ifdef CONFIG_SMP
  427. return rq->cpu;
  428. #else
  429. return 0;
  430. #endif
  431. }
  432. DECLARE_PER_CPU(struct rq, runqueues);
  433. #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
  434. #define this_rq() (&__get_cpu_var(runqueues))
  435. #define task_rq(p) cpu_rq(task_cpu(p))
  436. #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
  437. #define raw_rq() (&__raw_get_cpu_var(runqueues))
  438. #ifdef CONFIG_SMP
  439. #define rcu_dereference_check_sched_domain(p) \
  440. rcu_dereference_check((p), \
  441. lockdep_is_held(&sched_domains_mutex))
  442. /*
  443. * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
  444. * See detach_destroy_domains: synchronize_sched for details.
  445. *
  446. * The domain tree of any CPU may only be accessed from within
  447. * preempt-disabled sections.
  448. */
  449. #define for_each_domain(cpu, __sd) \
  450. for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
  451. __sd; __sd = __sd->parent)
  452. #define for_each_lower_domain(sd) for (; sd; sd = sd->child)
  453. /**
  454. * highest_flag_domain - Return highest sched_domain containing flag.
  455. * @cpu: The cpu whose highest level of sched domain is to
  456. * be returned.
  457. * @flag: The flag to check for the highest sched_domain
  458. * for the given cpu.
  459. *
  460. * Returns the highest sched_domain of a cpu which contains the given flag.
  461. */
  462. static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
  463. {
  464. struct sched_domain *sd, *hsd = NULL;
  465. for_each_domain(cpu, sd) {
  466. if (!(sd->flags & flag))
  467. break;
  468. hsd = sd;
  469. }
  470. return hsd;
  471. }
  472. DECLARE_PER_CPU(struct sched_domain *, sd_llc);
  473. DECLARE_PER_CPU(int, sd_llc_id);
  474. struct sched_group_power {
  475. atomic_t ref;
  476. /*
  477. * CPU power of this group, SCHED_LOAD_SCALE being max power for a
  478. * single CPU.
  479. */
  480. unsigned int power, power_orig;
  481. unsigned long next_update;
  482. /*
  483. * Number of busy cpus in this group.
  484. */
  485. atomic_t nr_busy_cpus;
  486. unsigned long cpumask[0]; /* iteration mask */
  487. };
  488. struct sched_group {
  489. struct sched_group *next; /* Must be a circular list */
  490. atomic_t ref;
  491. unsigned int group_weight;
  492. struct sched_group_power *sgp;
  493. /*
  494. * The CPUs this group covers.
  495. *
  496. * NOTE: this field is variable length. (Allocated dynamically
  497. * by attaching extra space to the end of the structure,
  498. * depending on how many CPUs the kernel has booted up with)
  499. */
  500. unsigned long cpumask[0];
  501. };
  502. static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
  503. {
  504. return to_cpumask(sg->cpumask);
  505. }
  506. /*
  507. * cpumask masking which cpus in the group are allowed to iterate up the domain
  508. * tree.
  509. */
  510. static inline struct cpumask *sched_group_mask(struct sched_group *sg)
  511. {
  512. return to_cpumask(sg->sgp->cpumask);
  513. }
  514. /**
  515. * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
  516. * @group: The group whose first cpu is to be returned.
  517. */
  518. static inline unsigned int group_first_cpu(struct sched_group *group)
  519. {
  520. return cpumask_first(sched_group_cpus(group));
  521. }
  522. extern int group_balance_cpu(struct sched_group *sg);
  523. #endif /* CONFIG_SMP */
  524. #include "stats.h"
  525. #include "auto_group.h"
  526. #ifdef CONFIG_CGROUP_SCHED
  527. /*
  528. * Return the group to which this tasks belongs.
  529. *
  530. * We cannot use task_subsys_state() and friends because the cgroup
  531. * subsystem changes that value before the cgroup_subsys::attach() method
  532. * is called, therefore we cannot pin it and might observe the wrong value.
  533. *
  534. * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
  535. * core changes this before calling sched_move_task().
  536. *
  537. * Instead we use a 'copy' which is updated from sched_move_task() while
  538. * holding both task_struct::pi_lock and rq::lock.
  539. */
  540. static inline struct task_group *task_group(struct task_struct *p)
  541. {
  542. return p->sched_task_group;
  543. }
  544. /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
  545. static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
  546. {
  547. #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
  548. struct task_group *tg = task_group(p);
  549. #endif
  550. #ifdef CONFIG_FAIR_GROUP_SCHED
  551. p->se.cfs_rq = tg->cfs_rq[cpu];
  552. p->se.parent = tg->se[cpu];
  553. #endif
  554. #ifdef CONFIG_RT_GROUP_SCHED
  555. p->rt.rt_rq = tg->rt_rq[cpu];
  556. p->rt.parent = tg->rt_se[cpu];
  557. #endif
  558. }
  559. #else /* CONFIG_CGROUP_SCHED */
  560. static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
  561. static inline struct task_group *task_group(struct task_struct *p)
  562. {
  563. return NULL;
  564. }
  565. #endif /* CONFIG_CGROUP_SCHED */
  566. static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
  567. {
  568. set_task_rq(p, cpu);
  569. #ifdef CONFIG_SMP
  570. /*
  571. * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
  572. * successfuly executed on another CPU. We must ensure that updates of
  573. * per-task data have been completed by this moment.
  574. */
  575. smp_wmb();
  576. task_thread_info(p)->cpu = cpu;
  577. #endif
  578. }
  579. /*
  580. * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
  581. */
  582. #ifdef CONFIG_SCHED_DEBUG
  583. # include <linux/static_key.h>
  584. # define const_debug __read_mostly
  585. #else
  586. # define const_debug const
  587. #endif
  588. extern const_debug unsigned int sysctl_sched_features;
  589. #define SCHED_FEAT(name, enabled) \
  590. __SCHED_FEAT_##name ,
  591. enum {
  592. #include "features.h"
  593. __SCHED_FEAT_NR,
  594. };
  595. #undef SCHED_FEAT
  596. #if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
  597. static __always_inline bool static_branch__true(struct static_key *key)
  598. {
  599. return static_key_true(key); /* Not out of line branch. */
  600. }
  601. static __always_inline bool static_branch__false(struct static_key *key)
  602. {
  603. return static_key_false(key); /* Out of line branch. */
  604. }
  605. #define SCHED_FEAT(name, enabled) \
  606. static __always_inline bool static_branch_##name(struct static_key *key) \
  607. { \
  608. return static_branch__##enabled(key); \
  609. }
  610. #include "features.h"
  611. #undef SCHED_FEAT
  612. extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
  613. #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
  614. #else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
  615. #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
  616. #endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
  617. #ifdef CONFIG_NUMA_BALANCING
  618. #define sched_feat_numa(x) sched_feat(x)
  619. #ifdef CONFIG_SCHED_DEBUG
  620. #define numabalancing_enabled sched_feat_numa(NUMA)
  621. #else
  622. extern bool numabalancing_enabled;
  623. #endif /* CONFIG_SCHED_DEBUG */
  624. #else
  625. #define sched_feat_numa(x) (0)
  626. #define numabalancing_enabled (0)
  627. #endif /* CONFIG_NUMA_BALANCING */
  628. static inline u64 global_rt_period(void)
  629. {
  630. return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
  631. }
  632. static inline u64 global_rt_runtime(void)
  633. {
  634. if (sysctl_sched_rt_runtime < 0)
  635. return RUNTIME_INF;
  636. return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
  637. }
  638. static inline int task_current(struct rq *rq, struct task_struct *p)
  639. {
  640. return rq->curr == p;
  641. }
  642. static inline int task_running(struct rq *rq, struct task_struct *p)
  643. {
  644. #ifdef CONFIG_SMP
  645. return p->on_cpu;
  646. #else
  647. return task_current(rq, p);
  648. #endif
  649. }
  650. #ifndef prepare_arch_switch
  651. # define prepare_arch_switch(next) do { } while (0)
  652. #endif
  653. #ifndef finish_arch_switch
  654. # define finish_arch_switch(prev) do { } while (0)
  655. #endif
  656. #ifndef finish_arch_post_lock_switch
  657. # define finish_arch_post_lock_switch() do { } while (0)
  658. #endif
  659. #ifndef __ARCH_WANT_UNLOCKED_CTXSW
  660. static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
  661. {
  662. #ifdef CONFIG_SMP
  663. /*
  664. * We can optimise this out completely for !SMP, because the
  665. * SMP rebalancing from interrupt is the only thing that cares
  666. * here.
  667. */
  668. next->on_cpu = 1;
  669. #endif
  670. }
  671. static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
  672. {
  673. #ifdef CONFIG_SMP
  674. /*
  675. * After ->on_cpu is cleared, the task can be moved to a different CPU.
  676. * We must ensure this doesn't happen until the switch is completely
  677. * finished.
  678. */
  679. smp_wmb();
  680. prev->on_cpu = 0;
  681. #endif
  682. #ifdef CONFIG_DEBUG_SPINLOCK
  683. /* this is a valid case when another task releases the spinlock */
  684. rq->lock.owner = current;
  685. #endif
  686. /*
  687. * If we are tracking spinlock dependencies then we have to
  688. * fix up the runqueue lock - which gets 'carried over' from
  689. * prev into current:
  690. */
  691. spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
  692. raw_spin_unlock_irq(&rq->lock);
  693. }
  694. #else /* __ARCH_WANT_UNLOCKED_CTXSW */
  695. static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
  696. {
  697. #ifdef CONFIG_SMP
  698. /*
  699. * We can optimise this out completely for !SMP, because the
  700. * SMP rebalancing from interrupt is the only thing that cares
  701. * here.
  702. */
  703. next->on_cpu = 1;
  704. #endif
  705. raw_spin_unlock(&rq->lock);
  706. }
  707. static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
  708. {
  709. #ifdef CONFIG_SMP
  710. /*
  711. * After ->on_cpu is cleared, the task can be moved to a different CPU.
  712. * We must ensure this doesn't happen until the switch is completely
  713. * finished.
  714. */
  715. smp_wmb();
  716. prev->on_cpu = 0;
  717. #endif
  718. local_irq_enable();
  719. }
  720. #endif /* __ARCH_WANT_UNLOCKED_CTXSW */
  721. /*
  722. * wake flags
  723. */
  724. #define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
  725. #define WF_FORK 0x02 /* child wakeup after fork */
  726. #define WF_MIGRATED 0x4 /* internal use, task got migrated */
  727. static inline void update_load_add(struct load_weight *lw, unsigned long inc)
  728. {
  729. lw->weight += inc;
  730. lw->inv_weight = 0;
  731. }
  732. static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
  733. {
  734. lw->weight -= dec;
  735. lw->inv_weight = 0;
  736. }
  737. static inline void update_load_set(struct load_weight *lw, unsigned long w)
  738. {
  739. lw->weight = w;
  740. lw->inv_weight = 0;
  741. }
  742. /*
  743. * To aid in avoiding the subversion of "niceness" due to uneven distribution
  744. * of tasks with abnormal "nice" values across CPUs the contribution that
  745. * each task makes to its run queue's load is weighted according to its
  746. * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
  747. * scaled version of the new time slice allocation that they receive on time
  748. * slice expiry etc.
  749. */
  750. #define WEIGHT_IDLEPRIO 3
  751. #define WMULT_IDLEPRIO 1431655765
  752. /*
  753. * Nice levels are multiplicative, with a gentle 10% change for every
  754. * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
  755. * nice 1, it will get ~10% less CPU time than another CPU-bound task
  756. * that remained on nice 0.
  757. *
  758. * The "10% effect" is relative and cumulative: from _any_ nice level,
  759. * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
  760. * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
  761. * If a task goes up by ~10% and another task goes down by ~10% then
  762. * the relative distance between them is ~25%.)
  763. */
  764. static const int prio_to_weight[40] = {
  765. /* -20 */ 88761, 71755, 56483, 46273, 36291,
  766. /* -15 */ 29154, 23254, 18705, 14949, 11916,
  767. /* -10 */ 9548, 7620, 6100, 4904, 3906,
  768. /* -5 */ 3121, 2501, 1991, 1586, 1277,
  769. /* 0 */ 1024, 820, 655, 526, 423,
  770. /* 5 */ 335, 272, 215, 172, 137,
  771. /* 10 */ 110, 87, 70, 56, 45,
  772. /* 15 */ 36, 29, 23, 18, 15,
  773. };
  774. /*
  775. * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
  776. *
  777. * In cases where the weight does not change often, we can use the
  778. * precalculated inverse to speed up arithmetics by turning divisions
  779. * into multiplications:
  780. */
  781. static const u32 prio_to_wmult[40] = {
  782. /* -20 */ 48388, 59856, 76040, 92818, 118348,
  783. /* -15 */ 147320, 184698, 229616, 287308, 360437,
  784. /* -10 */ 449829, 563644, 704093, 875809, 1099582,
  785. /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
  786. /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
  787. /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
  788. /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
  789. /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
  790. };
  791. /* Time spent by the tasks of the cpu accounting group executing in ... */
  792. enum cpuacct_stat_index {
  793. CPUACCT_STAT_USER, /* ... user mode */
  794. CPUACCT_STAT_SYSTEM, /* ... kernel mode */
  795. CPUACCT_STAT_NSTATS,
  796. };
  797. #define sched_class_highest (&stop_sched_class)
  798. #define for_each_class(class) \
  799. for (class = sched_class_highest; class; class = class->next)
  800. extern const struct sched_class stop_sched_class;
  801. extern const struct sched_class rt_sched_class;
  802. extern const struct sched_class fair_sched_class;
  803. extern const struct sched_class idle_sched_class;
  804. #ifdef CONFIG_SMP
  805. extern void trigger_load_balance(struct rq *rq, int cpu);
  806. extern void idle_balance(int this_cpu, struct rq *this_rq);
  807. #else /* CONFIG_SMP */
  808. static inline void idle_balance(int cpu, struct rq *rq)
  809. {
  810. }
  811. #endif
  812. extern void sysrq_sched_debug_show(void);
  813. extern void sched_init_granularity(void);
  814. extern void update_max_interval(void);
  815. extern void update_group_power(struct sched_domain *sd, int cpu);
  816. extern int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu);
  817. extern void init_sched_rt_class(void);
  818. extern void init_sched_fair_class(void);
  819. extern void resched_task(struct task_struct *p);
  820. extern void resched_cpu(int cpu);
  821. extern struct rt_bandwidth def_rt_bandwidth;
  822. extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
  823. extern void update_idle_cpu_load(struct rq *this_rq);
  824. #ifdef CONFIG_CGROUP_CPUACCT
  825. #include <linux/cgroup.h>
  826. /* track cpu usage of a group of tasks and its child groups */
  827. struct cpuacct {
  828. struct cgroup_subsys_state css;
  829. /* cpuusage holds pointer to a u64-type object on every cpu */
  830. u64 __percpu *cpuusage;
  831. struct kernel_cpustat __percpu *cpustat;
  832. };
  833. extern struct cgroup_subsys cpuacct_subsys;
  834. extern struct cpuacct root_cpuacct;
  835. /* return cpu accounting group corresponding to this container */
  836. static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
  837. {
  838. return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
  839. struct cpuacct, css);
  840. }
  841. /* return cpu accounting group to which this task belongs */
  842. static inline struct cpuacct *task_ca(struct task_struct *tsk)
  843. {
  844. return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
  845. struct cpuacct, css);
  846. }
  847. static inline struct cpuacct *parent_ca(struct cpuacct *ca)
  848. {
  849. if (!ca || !ca->css.cgroup->parent)
  850. return NULL;
  851. return cgroup_ca(ca->css.cgroup->parent);
  852. }
  853. extern void cpuacct_charge(struct task_struct *tsk, u64 cputime);
  854. #else
  855. static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
  856. #endif
  857. #ifdef CONFIG_PARAVIRT
  858. static inline u64 steal_ticks(u64 steal)
  859. {
  860. if (unlikely(steal > NSEC_PER_SEC))
  861. return div_u64(steal, TICK_NSEC);
  862. return __iter_div_u64_rem(steal, TICK_NSEC, &steal);
  863. }
  864. #endif
  865. static inline void inc_nr_running(struct rq *rq)
  866. {
  867. rq->nr_running++;
  868. }
  869. static inline void dec_nr_running(struct rq *rq)
  870. {
  871. rq->nr_running--;
  872. }
  873. extern void update_rq_clock(struct rq *rq);
  874. extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
  875. extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
  876. extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
  877. extern const_debug unsigned int sysctl_sched_time_avg;
  878. extern const_debug unsigned int sysctl_sched_nr_migrate;
  879. extern const_debug unsigned int sysctl_sched_migration_cost;
  880. static inline u64 sched_avg_period(void)
  881. {
  882. return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
  883. }
  884. #ifdef CONFIG_SCHED_HRTICK
  885. /*
  886. * Use hrtick when:
  887. * - enabled by features
  888. * - hrtimer is actually high res
  889. */
  890. static inline int hrtick_enabled(struct rq *rq)
  891. {
  892. if (!sched_feat(HRTICK))
  893. return 0;
  894. if (!cpu_active(cpu_of(rq)))
  895. return 0;
  896. return hrtimer_is_hres_active(&rq->hrtick_timer);
  897. }
  898. void hrtick_start(struct rq *rq, u64 delay);
  899. #else
  900. static inline int hrtick_enabled(struct rq *rq)
  901. {
  902. return 0;
  903. }
  904. #endif /* CONFIG_SCHED_HRTICK */
  905. #ifdef CONFIG_SMP
  906. extern void sched_avg_update(struct rq *rq);
  907. static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
  908. {
  909. rq->rt_avg += rt_delta;
  910. sched_avg_update(rq);
  911. }
  912. #else
  913. static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
  914. static inline void sched_avg_update(struct rq *rq) { }
  915. #endif
  916. extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period);
  917. #ifdef CONFIG_SMP
  918. #ifdef CONFIG_PREEMPT
  919. static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
  920. /*
  921. * fair double_lock_balance: Safely acquires both rq->locks in a fair
  922. * way at the expense of forcing extra atomic operations in all
  923. * invocations. This assures that the double_lock is acquired using the
  924. * same underlying policy as the spinlock_t on this architecture, which
  925. * reduces latency compared to the unfair variant below. However, it
  926. * also adds more overhead and therefore may reduce throughput.
  927. */
  928. static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
  929. __releases(this_rq->lock)
  930. __acquires(busiest->lock)
  931. __acquires(this_rq->lock)
  932. {
  933. raw_spin_unlock(&this_rq->lock);
  934. double_rq_lock(this_rq, busiest);
  935. return 1;
  936. }
  937. #else
  938. /*
  939. * Unfair double_lock_balance: Optimizes throughput at the expense of
  940. * latency by eliminating extra atomic operations when the locks are
  941. * already in proper order on entry. This favors lower cpu-ids and will
  942. * grant the double lock to lower cpus over higher ids under contention,
  943. * regardless of entry order into the function.
  944. */
  945. static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
  946. __releases(this_rq->lock)
  947. __acquires(busiest->lock)
  948. __acquires(this_rq->lock)
  949. {
  950. int ret = 0;
  951. if (unlikely(!raw_spin_trylock(&busiest->lock))) {
  952. if (busiest < this_rq) {
  953. raw_spin_unlock(&this_rq->lock);
  954. raw_spin_lock(&busiest->lock);
  955. raw_spin_lock_nested(&this_rq->lock,
  956. SINGLE_DEPTH_NESTING);
  957. ret = 1;
  958. } else
  959. raw_spin_lock_nested(&busiest->lock,
  960. SINGLE_DEPTH_NESTING);
  961. }
  962. return ret;
  963. }
  964. #endif /* CONFIG_PREEMPT */
  965. /*
  966. * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
  967. */
  968. static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
  969. {
  970. if (unlikely(!irqs_disabled())) {
  971. /* printk() doesn't work good under rq->lock */
  972. raw_spin_unlock(&this_rq->lock);
  973. BUG_ON(1);
  974. }
  975. return _double_lock_balance(this_rq, busiest);
  976. }
  977. static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
  978. __releases(busiest->lock)
  979. {
  980. raw_spin_unlock(&busiest->lock);
  981. lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
  982. }
  983. /*
  984. * double_rq_lock - safely lock two runqueues
  985. *
  986. * Note this does not disable interrupts like task_rq_lock,
  987. * you need to do so manually before calling.
  988. */
  989. static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
  990. __acquires(rq1->lock)
  991. __acquires(rq2->lock)
  992. {
  993. BUG_ON(!irqs_disabled());
  994. if (rq1 == rq2) {
  995. raw_spin_lock(&rq1->lock);
  996. __acquire(rq2->lock); /* Fake it out ;) */
  997. } else {
  998. if (rq1 < rq2) {
  999. raw_spin_lock(&rq1->lock);
  1000. raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
  1001. } else {
  1002. raw_spin_lock(&rq2->lock);
  1003. raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
  1004. }
  1005. }
  1006. }
  1007. /*
  1008. * double_rq_unlock - safely unlock two runqueues
  1009. *
  1010. * Note this does not restore interrupts like task_rq_unlock,
  1011. * you need to do so manually after calling.
  1012. */
  1013. static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
  1014. __releases(rq1->lock)
  1015. __releases(rq2->lock)
  1016. {
  1017. raw_spin_unlock(&rq1->lock);
  1018. if (rq1 != rq2)
  1019. raw_spin_unlock(&rq2->lock);
  1020. else
  1021. __release(rq2->lock);
  1022. }
  1023. #else /* CONFIG_SMP */
  1024. /*
  1025. * double_rq_lock - safely lock two runqueues
  1026. *
  1027. * Note this does not disable interrupts like task_rq_lock,
  1028. * you need to do so manually before calling.
  1029. */
  1030. static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
  1031. __acquires(rq1->lock)
  1032. __acquires(rq2->lock)
  1033. {
  1034. BUG_ON(!irqs_disabled());
  1035. BUG_ON(rq1 != rq2);
  1036. raw_spin_lock(&rq1->lock);
  1037. __acquire(rq2->lock); /* Fake it out ;) */
  1038. }
  1039. /*
  1040. * double_rq_unlock - safely unlock two runqueues
  1041. *
  1042. * Note this does not restore interrupts like task_rq_unlock,
  1043. * you need to do so manually after calling.
  1044. */
  1045. static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
  1046. __releases(rq1->lock)
  1047. __releases(rq2->lock)
  1048. {
  1049. BUG_ON(rq1 != rq2);
  1050. raw_spin_unlock(&rq1->lock);
  1051. __release(rq2->lock);
  1052. }
  1053. #endif
  1054. extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
  1055. extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
  1056. extern void print_cfs_stats(struct seq_file *m, int cpu);
  1057. extern void print_rt_stats(struct seq_file *m, int cpu);
  1058. extern void init_cfs_rq(struct cfs_rq *cfs_rq);
  1059. extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
  1060. extern void account_cfs_bandwidth_used(int enabled, int was_enabled);
  1061. #ifdef CONFIG_NO_HZ
  1062. enum rq_nohz_flag_bits {
  1063. NOHZ_TICK_STOPPED,
  1064. NOHZ_BALANCE_KICK,
  1065. NOHZ_IDLE,
  1066. };
  1067. #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
  1068. #endif
  1069. #ifdef CONFIG_IRQ_TIME_ACCOUNTING
  1070. DECLARE_PER_CPU(u64, cpu_hardirq_time);
  1071. DECLARE_PER_CPU(u64, cpu_softirq_time);
  1072. #ifndef CONFIG_64BIT
  1073. DECLARE_PER_CPU(seqcount_t, irq_time_seq);
  1074. static inline void irq_time_write_begin(void)
  1075. {
  1076. __this_cpu_inc(irq_time_seq.sequence);
  1077. smp_wmb();
  1078. }
  1079. static inline void irq_time_write_end(void)
  1080. {
  1081. smp_wmb();
  1082. __this_cpu_inc(irq_time_seq.sequence);
  1083. }
  1084. static inline u64 irq_time_read(int cpu)
  1085. {
  1086. u64 irq_time;
  1087. unsigned seq;
  1088. do {
  1089. seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
  1090. irq_time = per_cpu(cpu_softirq_time, cpu) +
  1091. per_cpu(cpu_hardirq_time, cpu);
  1092. } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
  1093. return irq_time;
  1094. }
  1095. #else /* CONFIG_64BIT */
  1096. static inline void irq_time_write_begin(void)
  1097. {
  1098. }
  1099. static inline void irq_time_write_end(void)
  1100. {
  1101. }
  1102. static inline u64 irq_time_read(int cpu)
  1103. {
  1104. return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
  1105. }
  1106. #endif /* CONFIG_64BIT */
  1107. #endif /* CONFIG_IRQ_TIME_ACCOUNTING */