slow-work.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638
  1. /* Worker thread pool for slow items, such as filesystem lookups or mkdirs
  2. *
  3. * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public Licence
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the Licence, or (at your option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/slow-work.h>
  13. #include <linux/kthread.h>
  14. #include <linux/freezer.h>
  15. #include <linux/wait.h>
  16. #define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of
  17. * things to do */
  18. #define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after
  19. * OOM */
  20. static void slow_work_cull_timeout(unsigned long);
  21. static void slow_work_oom_timeout(unsigned long);
  22. #ifdef CONFIG_SYSCTL
  23. static int slow_work_min_threads_sysctl(struct ctl_table *, int, struct file *,
  24. void __user *, size_t *, loff_t *);
  25. static int slow_work_max_threads_sysctl(struct ctl_table *, int , struct file *,
  26. void __user *, size_t *, loff_t *);
  27. #endif
  28. /*
  29. * The pool of threads has at least min threads in it as long as someone is
  30. * using the facility, and may have as many as max.
  31. *
  32. * A portion of the pool may be processing very slow operations.
  33. */
  34. static unsigned slow_work_min_threads = 2;
  35. static unsigned slow_work_max_threads = 4;
  36. static unsigned vslow_work_proportion = 50; /* % of threads that may process
  37. * very slow work */
  38. #ifdef CONFIG_SYSCTL
  39. static const int slow_work_min_min_threads = 2;
  40. static int slow_work_max_max_threads = 255;
  41. static const int slow_work_min_vslow = 1;
  42. static const int slow_work_max_vslow = 99;
  43. ctl_table slow_work_sysctls[] = {
  44. {
  45. .ctl_name = CTL_UNNUMBERED,
  46. .procname = "min-threads",
  47. .data = &slow_work_min_threads,
  48. .maxlen = sizeof(unsigned),
  49. .mode = 0644,
  50. .proc_handler = slow_work_min_threads_sysctl,
  51. .extra1 = (void *) &slow_work_min_min_threads,
  52. .extra2 = &slow_work_max_threads,
  53. },
  54. {
  55. .ctl_name = CTL_UNNUMBERED,
  56. .procname = "max-threads",
  57. .data = &slow_work_max_threads,
  58. .maxlen = sizeof(unsigned),
  59. .mode = 0644,
  60. .proc_handler = slow_work_max_threads_sysctl,
  61. .extra1 = &slow_work_min_threads,
  62. .extra2 = (void *) &slow_work_max_max_threads,
  63. },
  64. {
  65. .ctl_name = CTL_UNNUMBERED,
  66. .procname = "vslow-percentage",
  67. .data = &vslow_work_proportion,
  68. .maxlen = sizeof(unsigned),
  69. .mode = 0644,
  70. .proc_handler = &proc_dointvec_minmax,
  71. .extra1 = (void *) &slow_work_min_vslow,
  72. .extra2 = (void *) &slow_work_max_vslow,
  73. },
  74. { .ctl_name = 0 }
  75. };
  76. #endif
  77. /*
  78. * The active state of the thread pool
  79. */
  80. static atomic_t slow_work_thread_count;
  81. static atomic_t vslow_work_executing_count;
  82. static bool slow_work_may_not_start_new_thread;
  83. static bool slow_work_cull; /* cull a thread due to lack of activity */
  84. static DEFINE_TIMER(slow_work_cull_timer, slow_work_cull_timeout, 0, 0);
  85. static DEFINE_TIMER(slow_work_oom_timer, slow_work_oom_timeout, 0, 0);
  86. static struct slow_work slow_work_new_thread; /* new thread starter */
  87. /*
  88. * The queues of work items and the lock governing access to them. These are
  89. * shared between all the CPUs. It doesn't make sense to have per-CPU queues
  90. * as the number of threads bears no relation to the number of CPUs.
  91. *
  92. * There are two queues of work items: one for slow work items, and one for
  93. * very slow work items.
  94. */
  95. static LIST_HEAD(slow_work_queue);
  96. static LIST_HEAD(vslow_work_queue);
  97. static DEFINE_SPINLOCK(slow_work_queue_lock);
  98. /*
  99. * The thread controls. A variable used to signal to the threads that they
  100. * should exit when the queue is empty, a waitqueue used by the threads to wait
  101. * for signals, and a completion set by the last thread to exit.
  102. */
  103. static bool slow_work_threads_should_exit;
  104. static DECLARE_WAIT_QUEUE_HEAD(slow_work_thread_wq);
  105. static DECLARE_COMPLETION(slow_work_last_thread_exited);
  106. /*
  107. * The number of users of the thread pool and its lock. Whilst this is zero we
  108. * have no threads hanging around, and when this reaches zero, we wait for all
  109. * active or queued work items to complete and kill all the threads we do have.
  110. */
  111. static int slow_work_user_count;
  112. static DEFINE_MUTEX(slow_work_user_lock);
  113. /*
  114. * Calculate the maximum number of active threads in the pool that are
  115. * permitted to process very slow work items.
  116. *
  117. * The answer is rounded up to at least 1, but may not equal or exceed the
  118. * maximum number of the threads in the pool. This means we always have at
  119. * least one thread that can process slow work items, and we always have at
  120. * least one thread that won't get tied up doing so.
  121. */
  122. static unsigned slow_work_calc_vsmax(void)
  123. {
  124. unsigned vsmax;
  125. vsmax = atomic_read(&slow_work_thread_count) * vslow_work_proportion;
  126. vsmax /= 100;
  127. vsmax = max(vsmax, 1U);
  128. return min(vsmax, slow_work_max_threads - 1);
  129. }
  130. /*
  131. * Attempt to execute stuff queued on a slow thread. Return true if we managed
  132. * it, false if there was nothing to do.
  133. */
  134. static bool slow_work_execute(void)
  135. {
  136. struct slow_work *work = NULL;
  137. unsigned vsmax;
  138. bool very_slow;
  139. vsmax = slow_work_calc_vsmax();
  140. /* see if we can schedule a new thread to be started if we're not
  141. * keeping up with the work */
  142. if (!waitqueue_active(&slow_work_thread_wq) &&
  143. (!list_empty(&slow_work_queue) || !list_empty(&vslow_work_queue)) &&
  144. atomic_read(&slow_work_thread_count) < slow_work_max_threads &&
  145. !slow_work_may_not_start_new_thread)
  146. slow_work_enqueue(&slow_work_new_thread);
  147. /* find something to execute */
  148. spin_lock_irq(&slow_work_queue_lock);
  149. if (!list_empty(&vslow_work_queue) &&
  150. atomic_read(&vslow_work_executing_count) < vsmax) {
  151. work = list_entry(vslow_work_queue.next,
  152. struct slow_work, link);
  153. if (test_and_set_bit_lock(SLOW_WORK_EXECUTING, &work->flags))
  154. BUG();
  155. list_del_init(&work->link);
  156. atomic_inc(&vslow_work_executing_count);
  157. very_slow = true;
  158. } else if (!list_empty(&slow_work_queue)) {
  159. work = list_entry(slow_work_queue.next,
  160. struct slow_work, link);
  161. if (test_and_set_bit_lock(SLOW_WORK_EXECUTING, &work->flags))
  162. BUG();
  163. list_del_init(&work->link);
  164. very_slow = false;
  165. } else {
  166. very_slow = false; /* avoid the compiler warning */
  167. }
  168. spin_unlock_irq(&slow_work_queue_lock);
  169. if (!work)
  170. return false;
  171. if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags))
  172. BUG();
  173. work->ops->execute(work);
  174. if (very_slow)
  175. atomic_dec(&vslow_work_executing_count);
  176. clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags);
  177. /* if someone tried to enqueue the item whilst we were executing it,
  178. * then it'll be left unenqueued to avoid multiple threads trying to
  179. * execute it simultaneously
  180. *
  181. * there is, however, a race between us testing the pending flag and
  182. * getting the spinlock, and between the enqueuer setting the pending
  183. * flag and getting the spinlock, so we use a deferral bit to tell us
  184. * if the enqueuer got there first
  185. */
  186. if (test_bit(SLOW_WORK_PENDING, &work->flags)) {
  187. spin_lock_irq(&slow_work_queue_lock);
  188. if (!test_bit(SLOW_WORK_EXECUTING, &work->flags) &&
  189. test_and_clear_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags))
  190. goto auto_requeue;
  191. spin_unlock_irq(&slow_work_queue_lock);
  192. }
  193. work->ops->put_ref(work);
  194. return true;
  195. auto_requeue:
  196. /* we must complete the enqueue operation
  197. * - we transfer our ref on the item back to the appropriate queue
  198. * - don't wake another thread up as we're awake already
  199. */
  200. if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
  201. list_add_tail(&work->link, &vslow_work_queue);
  202. else
  203. list_add_tail(&work->link, &slow_work_queue);
  204. spin_unlock_irq(&slow_work_queue_lock);
  205. return true;
  206. }
  207. /**
  208. * slow_work_enqueue - Schedule a slow work item for processing
  209. * @work: The work item to queue
  210. *
  211. * Schedule a slow work item for processing. If the item is already undergoing
  212. * execution, this guarantees not to re-enter the execution routine until the
  213. * first execution finishes.
  214. *
  215. * The item is pinned by this function as it retains a reference to it, managed
  216. * through the item operations. The item is unpinned once it has been
  217. * executed.
  218. *
  219. * An item may hog the thread that is running it for a relatively large amount
  220. * of time, sufficient, for example, to perform several lookup, mkdir, create
  221. * and setxattr operations. It may sleep on I/O and may sleep to obtain locks.
  222. *
  223. * Conversely, if a number of items are awaiting processing, it may take some
  224. * time before any given item is given attention. The number of threads in the
  225. * pool may be increased to deal with demand, but only up to a limit.
  226. *
  227. * If SLOW_WORK_VERY_SLOW is set on the work item, then it will be placed in
  228. * the very slow queue, from which only a portion of the threads will be
  229. * allowed to pick items to execute. This ensures that very slow items won't
  230. * overly block ones that are just ordinarily slow.
  231. *
  232. * Returns 0 if successful, -EAGAIN if not.
  233. */
  234. int slow_work_enqueue(struct slow_work *work)
  235. {
  236. unsigned long flags;
  237. BUG_ON(slow_work_user_count <= 0);
  238. BUG_ON(!work);
  239. BUG_ON(!work->ops);
  240. BUG_ON(!work->ops->get_ref);
  241. /* when honouring an enqueue request, we only promise that we will run
  242. * the work function in the future; we do not promise to run it once
  243. * per enqueue request
  244. *
  245. * we use the PENDING bit to merge together repeat requests without
  246. * having to disable IRQs and take the spinlock, whilst still
  247. * maintaining our promise
  248. */
  249. if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) {
  250. spin_lock_irqsave(&slow_work_queue_lock, flags);
  251. /* we promise that we will not attempt to execute the work
  252. * function in more than one thread simultaneously
  253. *
  254. * this, however, leaves us with a problem if we're asked to
  255. * enqueue the work whilst someone is executing the work
  256. * function as simply queueing the work immediately means that
  257. * another thread may try executing it whilst it is already
  258. * under execution
  259. *
  260. * to deal with this, we set the ENQ_DEFERRED bit instead of
  261. * enqueueing, and the thread currently executing the work
  262. * function will enqueue the work item when the work function
  263. * returns and it has cleared the EXECUTING bit
  264. */
  265. if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) {
  266. set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
  267. } else {
  268. if (work->ops->get_ref(work) < 0)
  269. goto cant_get_ref;
  270. if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
  271. list_add_tail(&work->link, &vslow_work_queue);
  272. else
  273. list_add_tail(&work->link, &slow_work_queue);
  274. wake_up(&slow_work_thread_wq);
  275. }
  276. spin_unlock_irqrestore(&slow_work_queue_lock, flags);
  277. }
  278. return 0;
  279. cant_get_ref:
  280. spin_unlock_irqrestore(&slow_work_queue_lock, flags);
  281. return -EAGAIN;
  282. }
  283. EXPORT_SYMBOL(slow_work_enqueue);
  284. /*
  285. * Worker thread culling algorithm
  286. */
  287. static bool slow_work_cull_thread(void)
  288. {
  289. unsigned long flags;
  290. bool do_cull = false;
  291. spin_lock_irqsave(&slow_work_queue_lock, flags);
  292. if (slow_work_cull) {
  293. slow_work_cull = false;
  294. if (list_empty(&slow_work_queue) &&
  295. list_empty(&vslow_work_queue) &&
  296. atomic_read(&slow_work_thread_count) >
  297. slow_work_min_threads) {
  298. mod_timer(&slow_work_cull_timer,
  299. jiffies + SLOW_WORK_CULL_TIMEOUT);
  300. do_cull = true;
  301. }
  302. }
  303. spin_unlock_irqrestore(&slow_work_queue_lock, flags);
  304. return do_cull;
  305. }
  306. /*
  307. * Determine if there is slow work available for dispatch
  308. */
  309. static inline bool slow_work_available(int vsmax)
  310. {
  311. return !list_empty(&slow_work_queue) ||
  312. (!list_empty(&vslow_work_queue) &&
  313. atomic_read(&vslow_work_executing_count) < vsmax);
  314. }
  315. /*
  316. * Worker thread dispatcher
  317. */
  318. static int slow_work_thread(void *_data)
  319. {
  320. int vsmax;
  321. DEFINE_WAIT(wait);
  322. set_freezable();
  323. set_user_nice(current, -5);
  324. for (;;) {
  325. vsmax = vslow_work_proportion;
  326. vsmax *= atomic_read(&slow_work_thread_count);
  327. vsmax /= 100;
  328. prepare_to_wait(&slow_work_thread_wq, &wait,
  329. TASK_INTERRUPTIBLE);
  330. if (!freezing(current) &&
  331. !slow_work_threads_should_exit &&
  332. !slow_work_available(vsmax) &&
  333. !slow_work_cull)
  334. schedule();
  335. finish_wait(&slow_work_thread_wq, &wait);
  336. try_to_freeze();
  337. vsmax = vslow_work_proportion;
  338. vsmax *= atomic_read(&slow_work_thread_count);
  339. vsmax /= 100;
  340. if (slow_work_available(vsmax) && slow_work_execute()) {
  341. cond_resched();
  342. if (list_empty(&slow_work_queue) &&
  343. list_empty(&vslow_work_queue) &&
  344. atomic_read(&slow_work_thread_count) >
  345. slow_work_min_threads)
  346. mod_timer(&slow_work_cull_timer,
  347. jiffies + SLOW_WORK_CULL_TIMEOUT);
  348. continue;
  349. }
  350. if (slow_work_threads_should_exit)
  351. break;
  352. if (slow_work_cull && slow_work_cull_thread())
  353. break;
  354. }
  355. if (atomic_dec_and_test(&slow_work_thread_count))
  356. complete_and_exit(&slow_work_last_thread_exited, 0);
  357. return 0;
  358. }
  359. /*
  360. * Handle thread cull timer expiration
  361. */
  362. static void slow_work_cull_timeout(unsigned long data)
  363. {
  364. slow_work_cull = true;
  365. wake_up(&slow_work_thread_wq);
  366. }
  367. /*
  368. * Get a reference on slow work thread starter
  369. */
  370. static int slow_work_new_thread_get_ref(struct slow_work *work)
  371. {
  372. return 0;
  373. }
  374. /*
  375. * Drop a reference on slow work thread starter
  376. */
  377. static void slow_work_new_thread_put_ref(struct slow_work *work)
  378. {
  379. }
  380. /*
  381. * Start a new slow work thread
  382. */
  383. static void slow_work_new_thread_execute(struct slow_work *work)
  384. {
  385. struct task_struct *p;
  386. if (slow_work_threads_should_exit)
  387. return;
  388. if (atomic_read(&slow_work_thread_count) >= slow_work_max_threads)
  389. return;
  390. if (!mutex_trylock(&slow_work_user_lock))
  391. return;
  392. slow_work_may_not_start_new_thread = true;
  393. atomic_inc(&slow_work_thread_count);
  394. p = kthread_run(slow_work_thread, NULL, "kslowd");
  395. if (IS_ERR(p)) {
  396. printk(KERN_DEBUG "Slow work thread pool: OOM\n");
  397. if (atomic_dec_and_test(&slow_work_thread_count))
  398. BUG(); /* we're running on a slow work thread... */
  399. mod_timer(&slow_work_oom_timer,
  400. jiffies + SLOW_WORK_OOM_TIMEOUT);
  401. } else {
  402. /* ratelimit the starting of new threads */
  403. mod_timer(&slow_work_oom_timer, jiffies + 1);
  404. }
  405. mutex_unlock(&slow_work_user_lock);
  406. }
  407. static const struct slow_work_ops slow_work_new_thread_ops = {
  408. .get_ref = slow_work_new_thread_get_ref,
  409. .put_ref = slow_work_new_thread_put_ref,
  410. .execute = slow_work_new_thread_execute,
  411. };
  412. /*
  413. * post-OOM new thread start suppression expiration
  414. */
  415. static void slow_work_oom_timeout(unsigned long data)
  416. {
  417. slow_work_may_not_start_new_thread = false;
  418. }
  419. #ifdef CONFIG_SYSCTL
  420. /*
  421. * Handle adjustment of the minimum number of threads
  422. */
  423. static int slow_work_min_threads_sysctl(struct ctl_table *table, int write,
  424. struct file *filp, void __user *buffer,
  425. size_t *lenp, loff_t *ppos)
  426. {
  427. int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
  428. int n;
  429. if (ret == 0) {
  430. mutex_lock(&slow_work_user_lock);
  431. if (slow_work_user_count > 0) {
  432. /* see if we need to start or stop threads */
  433. n = atomic_read(&slow_work_thread_count) -
  434. slow_work_min_threads;
  435. if (n < 0 && !slow_work_may_not_start_new_thread)
  436. slow_work_enqueue(&slow_work_new_thread);
  437. else if (n > 0)
  438. mod_timer(&slow_work_cull_timer,
  439. jiffies + SLOW_WORK_CULL_TIMEOUT);
  440. }
  441. mutex_unlock(&slow_work_user_lock);
  442. }
  443. return ret;
  444. }
  445. /*
  446. * Handle adjustment of the maximum number of threads
  447. */
  448. static int slow_work_max_threads_sysctl(struct ctl_table *table, int write,
  449. struct file *filp, void __user *buffer,
  450. size_t *lenp, loff_t *ppos)
  451. {
  452. int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
  453. int n;
  454. if (ret == 0) {
  455. mutex_lock(&slow_work_user_lock);
  456. if (slow_work_user_count > 0) {
  457. /* see if we need to stop threads */
  458. n = slow_work_max_threads -
  459. atomic_read(&slow_work_thread_count);
  460. if (n < 0)
  461. mod_timer(&slow_work_cull_timer,
  462. jiffies + SLOW_WORK_CULL_TIMEOUT);
  463. }
  464. mutex_unlock(&slow_work_user_lock);
  465. }
  466. return ret;
  467. }
  468. #endif /* CONFIG_SYSCTL */
  469. /**
  470. * slow_work_register_user - Register a user of the facility
  471. *
  472. * Register a user of the facility, starting up the initial threads if there
  473. * aren't any other users at this point. This will return 0 if successful, or
  474. * an error if not.
  475. */
  476. int slow_work_register_user(void)
  477. {
  478. struct task_struct *p;
  479. int loop;
  480. mutex_lock(&slow_work_user_lock);
  481. if (slow_work_user_count == 0) {
  482. printk(KERN_NOTICE "Slow work thread pool: Starting up\n");
  483. init_completion(&slow_work_last_thread_exited);
  484. slow_work_threads_should_exit = false;
  485. slow_work_init(&slow_work_new_thread,
  486. &slow_work_new_thread_ops);
  487. slow_work_may_not_start_new_thread = false;
  488. slow_work_cull = false;
  489. /* start the minimum number of threads */
  490. for (loop = 0; loop < slow_work_min_threads; loop++) {
  491. atomic_inc(&slow_work_thread_count);
  492. p = kthread_run(slow_work_thread, NULL, "kslowd");
  493. if (IS_ERR(p))
  494. goto error;
  495. }
  496. printk(KERN_NOTICE "Slow work thread pool: Ready\n");
  497. }
  498. slow_work_user_count++;
  499. mutex_unlock(&slow_work_user_lock);
  500. return 0;
  501. error:
  502. if (atomic_dec_and_test(&slow_work_thread_count))
  503. complete(&slow_work_last_thread_exited);
  504. if (loop > 0) {
  505. printk(KERN_ERR "Slow work thread pool:"
  506. " Aborting startup on ENOMEM\n");
  507. slow_work_threads_should_exit = true;
  508. wake_up_all(&slow_work_thread_wq);
  509. wait_for_completion(&slow_work_last_thread_exited);
  510. printk(KERN_ERR "Slow work thread pool: Aborted\n");
  511. }
  512. mutex_unlock(&slow_work_user_lock);
  513. return PTR_ERR(p);
  514. }
  515. EXPORT_SYMBOL(slow_work_register_user);
  516. /**
  517. * slow_work_unregister_user - Unregister a user of the facility
  518. *
  519. * Unregister a user of the facility, killing all the threads if this was the
  520. * last one.
  521. */
  522. void slow_work_unregister_user(void)
  523. {
  524. mutex_lock(&slow_work_user_lock);
  525. BUG_ON(slow_work_user_count <= 0);
  526. slow_work_user_count--;
  527. if (slow_work_user_count == 0) {
  528. printk(KERN_NOTICE "Slow work thread pool: Shutting down\n");
  529. slow_work_threads_should_exit = true;
  530. wake_up_all(&slow_work_thread_wq);
  531. wait_for_completion(&slow_work_last_thread_exited);
  532. printk(KERN_NOTICE "Slow work thread pool:"
  533. " Shut down complete\n");
  534. }
  535. del_timer_sync(&slow_work_cull_timer);
  536. mutex_unlock(&slow_work_user_lock);
  537. }
  538. EXPORT_SYMBOL(slow_work_unregister_user);
  539. /*
  540. * Initialise the slow work facility
  541. */
  542. static int __init init_slow_work(void)
  543. {
  544. unsigned nr_cpus = num_possible_cpus();
  545. if (slow_work_max_threads < nr_cpus)
  546. slow_work_max_threads = nr_cpus;
  547. #ifdef CONFIG_SYSCTL
  548. if (slow_work_max_max_threads < nr_cpus * 2)
  549. slow_work_max_max_threads = nr_cpus * 2;
  550. #endif
  551. return 0;
  552. }
  553. subsys_initcall(init_slow_work);