sched.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142
  1. /*
  2. * linux/net/sunrpc/sched.c
  3. *
  4. * Scheduling for synchronous and asynchronous RPC requests.
  5. *
  6. * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
  7. *
  8. * TCP NFS related read + write fixes
  9. * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
  10. */
  11. #include <linux/module.h>
  12. #include <linux/sched.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/slab.h>
  15. #include <linux/mempool.h>
  16. #include <linux/smp.h>
  17. #include <linux/smp_lock.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/mutex.h>
  20. #include <linux/sunrpc/clnt.h>
  21. #ifdef RPC_DEBUG
  22. #define RPCDBG_FACILITY RPCDBG_SCHED
  23. #define RPC_TASK_MAGIC_ID 0xf00baa
  24. static int rpc_task_id;
  25. #endif
  26. /*
  27. * RPC slabs and memory pools
  28. */
  29. #define RPC_BUFFER_MAXSIZE (2048)
  30. #define RPC_BUFFER_POOLSIZE (8)
  31. #define RPC_TASK_POOLSIZE (8)
  32. static struct kmem_cache *rpc_task_slabp __read_mostly;
  33. static struct kmem_cache *rpc_buffer_slabp __read_mostly;
  34. static mempool_t *rpc_task_mempool __read_mostly;
  35. static mempool_t *rpc_buffer_mempool __read_mostly;
  36. static void __rpc_default_timer(struct rpc_task *task);
  37. static void rpciod_killall(void);
  38. static void rpc_async_schedule(struct work_struct *);
  39. static void rpc_release_task(struct rpc_task *task);
  40. /*
  41. * RPC tasks sit here while waiting for conditions to improve.
  42. */
  43. static RPC_WAITQ(delay_queue, "delayq");
  44. /*
  45. * All RPC tasks are linked into this list
  46. */
  47. static LIST_HEAD(all_tasks);
  48. /*
  49. * rpciod-related stuff
  50. */
  51. static DEFINE_MUTEX(rpciod_mutex);
  52. static unsigned int rpciod_users;
  53. struct workqueue_struct *rpciod_workqueue;
  54. /*
  55. * Spinlock for other critical sections of code.
  56. */
  57. static DEFINE_SPINLOCK(rpc_sched_lock);
  58. /*
  59. * Disable the timer for a given RPC task. Should be called with
  60. * queue->lock and bh_disabled in order to avoid races within
  61. * rpc_run_timer().
  62. */
  63. static inline void
  64. __rpc_disable_timer(struct rpc_task *task)
  65. {
  66. dprintk("RPC: %5u disabling timer\n", task->tk_pid);
  67. task->tk_timeout_fn = NULL;
  68. task->tk_timeout = 0;
  69. }
  70. /*
  71. * Run a timeout function.
  72. * We use the callback in order to allow __rpc_wake_up_task()
  73. * and friends to disable the timer synchronously on SMP systems
  74. * without calling del_timer_sync(). The latter could cause a
  75. * deadlock if called while we're holding spinlocks...
  76. */
  77. static void rpc_run_timer(struct rpc_task *task)
  78. {
  79. void (*callback)(struct rpc_task *);
  80. callback = task->tk_timeout_fn;
  81. task->tk_timeout_fn = NULL;
  82. if (callback && RPC_IS_QUEUED(task)) {
  83. dprintk("RPC: %5u running timer\n", task->tk_pid);
  84. callback(task);
  85. }
  86. smp_mb__before_clear_bit();
  87. clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate);
  88. smp_mb__after_clear_bit();
  89. }
  90. /*
  91. * Set up a timer for the current task.
  92. */
  93. static inline void
  94. __rpc_add_timer(struct rpc_task *task, rpc_action timer)
  95. {
  96. if (!task->tk_timeout)
  97. return;
  98. dprintk("RPC: %5u setting alarm for %lu ms\n",
  99. task->tk_pid, task->tk_timeout * 1000 / HZ);
  100. if (timer)
  101. task->tk_timeout_fn = timer;
  102. else
  103. task->tk_timeout_fn = __rpc_default_timer;
  104. set_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate);
  105. mod_timer(&task->tk_timer, jiffies + task->tk_timeout);
  106. }
  107. /*
  108. * Delete any timer for the current task. Because we use del_timer_sync(),
  109. * this function should never be called while holding queue->lock.
  110. */
  111. static void
  112. rpc_delete_timer(struct rpc_task *task)
  113. {
  114. if (RPC_IS_QUEUED(task))
  115. return;
  116. if (test_and_clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate)) {
  117. del_singleshot_timer_sync(&task->tk_timer);
  118. dprintk("RPC: %5u deleting timer\n", task->tk_pid);
  119. }
  120. }
  121. /*
  122. * Add new request to a priority queue.
  123. */
  124. static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *task)
  125. {
  126. struct list_head *q;
  127. struct rpc_task *t;
  128. INIT_LIST_HEAD(&task->u.tk_wait.links);
  129. q = &queue->tasks[task->tk_priority];
  130. if (unlikely(task->tk_priority > queue->maxpriority))
  131. q = &queue->tasks[queue->maxpriority];
  132. list_for_each_entry(t, q, u.tk_wait.list) {
  133. if (t->tk_cookie == task->tk_cookie) {
  134. list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
  135. return;
  136. }
  137. }
  138. list_add_tail(&task->u.tk_wait.list, q);
  139. }
  140. /*
  141. * Add new request to wait queue.
  142. *
  143. * Swapper tasks always get inserted at the head of the queue.
  144. * This should avoid many nasty memory deadlocks and hopefully
  145. * improve overall performance.
  146. * Everyone else gets appended to the queue to ensure proper FIFO behavior.
  147. */
  148. static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
  149. {
  150. BUG_ON (RPC_IS_QUEUED(task));
  151. if (RPC_IS_PRIORITY(queue))
  152. __rpc_add_wait_queue_priority(queue, task);
  153. else if (RPC_IS_SWAPPER(task))
  154. list_add(&task->u.tk_wait.list, &queue->tasks[0]);
  155. else
  156. list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
  157. task->u.tk_wait.rpc_waitq = queue;
  158. queue->qlen++;
  159. rpc_set_queued(task);
  160. dprintk("RPC: %5u added to queue %p \"%s\"\n",
  161. task->tk_pid, queue, rpc_qname(queue));
  162. }
  163. /*
  164. * Remove request from a priority queue.
  165. */
  166. static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
  167. {
  168. struct rpc_task *t;
  169. if (!list_empty(&task->u.tk_wait.links)) {
  170. t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list);
  171. list_move(&t->u.tk_wait.list, &task->u.tk_wait.list);
  172. list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links);
  173. }
  174. list_del(&task->u.tk_wait.list);
  175. }
  176. /*
  177. * Remove request from queue.
  178. * Note: must be called with spin lock held.
  179. */
  180. static void __rpc_remove_wait_queue(struct rpc_task *task)
  181. {
  182. struct rpc_wait_queue *queue;
  183. queue = task->u.tk_wait.rpc_waitq;
  184. if (RPC_IS_PRIORITY(queue))
  185. __rpc_remove_wait_queue_priority(task);
  186. else
  187. list_del(&task->u.tk_wait.list);
  188. queue->qlen--;
  189. dprintk("RPC: %5u removed from queue %p \"%s\"\n",
  190. task->tk_pid, queue, rpc_qname(queue));
  191. }
  192. static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
  193. {
  194. queue->priority = priority;
  195. queue->count = 1 << (priority * 2);
  196. }
  197. static inline void rpc_set_waitqueue_cookie(struct rpc_wait_queue *queue, unsigned long cookie)
  198. {
  199. queue->cookie = cookie;
  200. queue->nr = RPC_BATCH_COUNT;
  201. }
  202. static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
  203. {
  204. rpc_set_waitqueue_priority(queue, queue->maxpriority);
  205. rpc_set_waitqueue_cookie(queue, 0);
  206. }
  207. static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, int maxprio)
  208. {
  209. int i;
  210. spin_lock_init(&queue->lock);
  211. for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
  212. INIT_LIST_HEAD(&queue->tasks[i]);
  213. queue->maxpriority = maxprio;
  214. rpc_reset_waitqueue_priority(queue);
  215. #ifdef RPC_DEBUG
  216. queue->name = qname;
  217. #endif
  218. }
  219. void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
  220. {
  221. __rpc_init_priority_wait_queue(queue, qname, RPC_PRIORITY_HIGH);
  222. }
  223. void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
  224. {
  225. __rpc_init_priority_wait_queue(queue, qname, 0);
  226. }
  227. EXPORT_SYMBOL(rpc_init_wait_queue);
  228. static int rpc_wait_bit_interruptible(void *word)
  229. {
  230. if (signal_pending(current))
  231. return -ERESTARTSYS;
  232. schedule();
  233. return 0;
  234. }
  235. static void rpc_set_active(struct rpc_task *task)
  236. {
  237. if (test_and_set_bit(RPC_TASK_ACTIVE, &task->tk_runstate) != 0)
  238. return;
  239. spin_lock(&rpc_sched_lock);
  240. #ifdef RPC_DEBUG
  241. task->tk_magic = RPC_TASK_MAGIC_ID;
  242. task->tk_pid = rpc_task_id++;
  243. #endif
  244. /* Add to global list of all tasks */
  245. list_add_tail(&task->tk_task, &all_tasks);
  246. spin_unlock(&rpc_sched_lock);
  247. }
  248. /*
  249. * Mark an RPC call as having completed by clearing the 'active' bit
  250. */
  251. static void rpc_mark_complete_task(struct rpc_task *task)
  252. {
  253. smp_mb__before_clear_bit();
  254. clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
  255. smp_mb__after_clear_bit();
  256. wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE);
  257. }
  258. /*
  259. * Allow callers to wait for completion of an RPC call
  260. */
  261. int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
  262. {
  263. if (action == NULL)
  264. action = rpc_wait_bit_interruptible;
  265. return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
  266. action, TASK_INTERRUPTIBLE);
  267. }
  268. EXPORT_SYMBOL(__rpc_wait_for_completion_task);
  269. /*
  270. * Make an RPC task runnable.
  271. *
  272. * Note: If the task is ASYNC, this must be called with
  273. * the spinlock held to protect the wait queue operation.
  274. */
  275. static void rpc_make_runnable(struct rpc_task *task)
  276. {
  277. BUG_ON(task->tk_timeout_fn);
  278. rpc_clear_queued(task);
  279. if (rpc_test_and_set_running(task))
  280. return;
  281. /* We might have raced */
  282. if (RPC_IS_QUEUED(task)) {
  283. rpc_clear_running(task);
  284. return;
  285. }
  286. if (RPC_IS_ASYNC(task)) {
  287. int status;
  288. INIT_WORK(&task->u.tk_work, rpc_async_schedule);
  289. status = queue_work(task->tk_workqueue, &task->u.tk_work);
  290. if (status < 0) {
  291. printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
  292. task->tk_status = status;
  293. return;
  294. }
  295. } else
  296. wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
  297. }
  298. /*
  299. * Prepare for sleeping on a wait queue.
  300. * By always appending tasks to the list we ensure FIFO behavior.
  301. * NB: An RPC task will only receive interrupt-driven events as long
  302. * as it's on a wait queue.
  303. */
  304. static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
  305. rpc_action action, rpc_action timer)
  306. {
  307. dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
  308. task->tk_pid, rpc_qname(q), jiffies);
  309. if (!RPC_IS_ASYNC(task) && !RPC_IS_ACTIVATED(task)) {
  310. printk(KERN_ERR "RPC: Inactive synchronous task put to sleep!\n");
  311. return;
  312. }
  313. __rpc_add_wait_queue(q, task);
  314. BUG_ON(task->tk_callback != NULL);
  315. task->tk_callback = action;
  316. __rpc_add_timer(task, timer);
  317. }
  318. void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
  319. rpc_action action, rpc_action timer)
  320. {
  321. /* Mark the task as being activated if so needed */
  322. rpc_set_active(task);
  323. /*
  324. * Protect the queue operations.
  325. */
  326. spin_lock_bh(&q->lock);
  327. __rpc_sleep_on(q, task, action, timer);
  328. spin_unlock_bh(&q->lock);
  329. }
  330. /**
  331. * __rpc_do_wake_up_task - wake up a single rpc_task
  332. * @task: task to be woken up
  333. *
  334. * Caller must hold queue->lock, and have cleared the task queued flag.
  335. */
  336. static void __rpc_do_wake_up_task(struct rpc_task *task)
  337. {
  338. dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n",
  339. task->tk_pid, jiffies);
  340. #ifdef RPC_DEBUG
  341. BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
  342. #endif
  343. /* Has the task been executed yet? If not, we cannot wake it up! */
  344. if (!RPC_IS_ACTIVATED(task)) {
  345. printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
  346. return;
  347. }
  348. __rpc_disable_timer(task);
  349. __rpc_remove_wait_queue(task);
  350. rpc_make_runnable(task);
  351. dprintk("RPC: __rpc_wake_up_task done\n");
  352. }
  353. /*
  354. * Wake up the specified task
  355. */
  356. static void __rpc_wake_up_task(struct rpc_task *task)
  357. {
  358. if (rpc_start_wakeup(task)) {
  359. if (RPC_IS_QUEUED(task))
  360. __rpc_do_wake_up_task(task);
  361. rpc_finish_wakeup(task);
  362. }
  363. }
  364. /*
  365. * Default timeout handler if none specified by user
  366. */
  367. static void
  368. __rpc_default_timer(struct rpc_task *task)
  369. {
  370. dprintk("RPC: %5u timeout (default timer)\n", task->tk_pid);
  371. task->tk_status = -ETIMEDOUT;
  372. rpc_wake_up_task(task);
  373. }
  374. /*
  375. * Wake up the specified task
  376. */
  377. void rpc_wake_up_task(struct rpc_task *task)
  378. {
  379. rcu_read_lock_bh();
  380. if (rpc_start_wakeup(task)) {
  381. if (RPC_IS_QUEUED(task)) {
  382. struct rpc_wait_queue *queue = task->u.tk_wait.rpc_waitq;
  383. /* Note: we're already in a bh-safe context */
  384. spin_lock(&queue->lock);
  385. __rpc_do_wake_up_task(task);
  386. spin_unlock(&queue->lock);
  387. }
  388. rpc_finish_wakeup(task);
  389. }
  390. rcu_read_unlock_bh();
  391. }
  392. /*
  393. * Wake up the next task on a priority queue.
  394. */
  395. static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue)
  396. {
  397. struct list_head *q;
  398. struct rpc_task *task;
  399. /*
  400. * Service a batch of tasks from a single cookie.
  401. */
  402. q = &queue->tasks[queue->priority];
  403. if (!list_empty(q)) {
  404. task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
  405. if (queue->cookie == task->tk_cookie) {
  406. if (--queue->nr)
  407. goto out;
  408. list_move_tail(&task->u.tk_wait.list, q);
  409. }
  410. /*
  411. * Check if we need to switch queues.
  412. */
  413. if (--queue->count)
  414. goto new_cookie;
  415. }
  416. /*
  417. * Service the next queue.
  418. */
  419. do {
  420. if (q == &queue->tasks[0])
  421. q = &queue->tasks[queue->maxpriority];
  422. else
  423. q = q - 1;
  424. if (!list_empty(q)) {
  425. task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
  426. goto new_queue;
  427. }
  428. } while (q != &queue->tasks[queue->priority]);
  429. rpc_reset_waitqueue_priority(queue);
  430. return NULL;
  431. new_queue:
  432. rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
  433. new_cookie:
  434. rpc_set_waitqueue_cookie(queue, task->tk_cookie);
  435. out:
  436. __rpc_wake_up_task(task);
  437. return task;
  438. }
  439. /*
  440. * Wake up the next task on the wait queue.
  441. */
  442. struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue)
  443. {
  444. struct rpc_task *task = NULL;
  445. dprintk("RPC: wake_up_next(%p \"%s\")\n",
  446. queue, rpc_qname(queue));
  447. rcu_read_lock_bh();
  448. spin_lock(&queue->lock);
  449. if (RPC_IS_PRIORITY(queue))
  450. task = __rpc_wake_up_next_priority(queue);
  451. else {
  452. task_for_first(task, &queue->tasks[0])
  453. __rpc_wake_up_task(task);
  454. }
  455. spin_unlock(&queue->lock);
  456. rcu_read_unlock_bh();
  457. return task;
  458. }
  459. /**
  460. * rpc_wake_up - wake up all rpc_tasks
  461. * @queue: rpc_wait_queue on which the tasks are sleeping
  462. *
  463. * Grabs queue->lock
  464. */
  465. void rpc_wake_up(struct rpc_wait_queue *queue)
  466. {
  467. struct rpc_task *task, *next;
  468. struct list_head *head;
  469. rcu_read_lock_bh();
  470. spin_lock(&queue->lock);
  471. head = &queue->tasks[queue->maxpriority];
  472. for (;;) {
  473. list_for_each_entry_safe(task, next, head, u.tk_wait.list)
  474. __rpc_wake_up_task(task);
  475. if (head == &queue->tasks[0])
  476. break;
  477. head--;
  478. }
  479. spin_unlock(&queue->lock);
  480. rcu_read_unlock_bh();
  481. }
  482. /**
  483. * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
  484. * @queue: rpc_wait_queue on which the tasks are sleeping
  485. * @status: status value to set
  486. *
  487. * Grabs queue->lock
  488. */
  489. void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
  490. {
  491. struct rpc_task *task, *next;
  492. struct list_head *head;
  493. rcu_read_lock_bh();
  494. spin_lock(&queue->lock);
  495. head = &queue->tasks[queue->maxpriority];
  496. for (;;) {
  497. list_for_each_entry_safe(task, next, head, u.tk_wait.list) {
  498. task->tk_status = status;
  499. __rpc_wake_up_task(task);
  500. }
  501. if (head == &queue->tasks[0])
  502. break;
  503. head--;
  504. }
  505. spin_unlock(&queue->lock);
  506. rcu_read_unlock_bh();
  507. }
  508. static void __rpc_atrun(struct rpc_task *task)
  509. {
  510. rpc_wake_up_task(task);
  511. }
  512. /*
  513. * Run a task at a later time
  514. */
  515. void rpc_delay(struct rpc_task *task, unsigned long delay)
  516. {
  517. task->tk_timeout = delay;
  518. rpc_sleep_on(&delay_queue, task, NULL, __rpc_atrun);
  519. }
  520. /*
  521. * Helper to call task->tk_ops->rpc_call_prepare
  522. */
  523. static void rpc_prepare_task(struct rpc_task *task)
  524. {
  525. lock_kernel();
  526. task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
  527. unlock_kernel();
  528. }
  529. /*
  530. * Helper that calls task->tk_ops->rpc_call_done if it exists
  531. */
  532. void rpc_exit_task(struct rpc_task *task)
  533. {
  534. task->tk_action = NULL;
  535. if (task->tk_ops->rpc_call_done != NULL) {
  536. lock_kernel();
  537. task->tk_ops->rpc_call_done(task, task->tk_calldata);
  538. unlock_kernel();
  539. if (task->tk_action != NULL) {
  540. WARN_ON(RPC_ASSASSINATED(task));
  541. /* Always release the RPC slot and buffer memory */
  542. xprt_release(task);
  543. }
  544. }
  545. }
  546. EXPORT_SYMBOL(rpc_exit_task);
  547. void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
  548. {
  549. if (ops->rpc_release != NULL) {
  550. lock_kernel();
  551. ops->rpc_release(calldata);
  552. unlock_kernel();
  553. }
  554. }
  555. /*
  556. * This is the RPC `scheduler' (or rather, the finite state machine).
  557. */
  558. static void __rpc_execute(struct rpc_task *task)
  559. {
  560. int status = 0;
  561. dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
  562. task->tk_pid, task->tk_flags);
  563. BUG_ON(RPC_IS_QUEUED(task));
  564. for (;;) {
  565. /*
  566. * Garbage collection of pending timers...
  567. */
  568. rpc_delete_timer(task);
  569. /*
  570. * Execute any pending callback.
  571. */
  572. if (RPC_DO_CALLBACK(task)) {
  573. /* Define a callback save pointer */
  574. void (*save_callback)(struct rpc_task *);
  575. /*
  576. * If a callback exists, save it, reset it,
  577. * call it.
  578. * The save is needed to stop from resetting
  579. * another callback set within the callback handler
  580. * - Dave
  581. */
  582. save_callback=task->tk_callback;
  583. task->tk_callback=NULL;
  584. save_callback(task);
  585. }
  586. /*
  587. * Perform the next FSM step.
  588. * tk_action may be NULL when the task has been killed
  589. * by someone else.
  590. */
  591. if (!RPC_IS_QUEUED(task)) {
  592. if (task->tk_action == NULL)
  593. break;
  594. task->tk_action(task);
  595. }
  596. /*
  597. * Lockless check for whether task is sleeping or not.
  598. */
  599. if (!RPC_IS_QUEUED(task))
  600. continue;
  601. rpc_clear_running(task);
  602. if (RPC_IS_ASYNC(task)) {
  603. /* Careful! we may have raced... */
  604. if (RPC_IS_QUEUED(task))
  605. return;
  606. if (rpc_test_and_set_running(task))
  607. return;
  608. continue;
  609. }
  610. /* sync task: sleep here */
  611. dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
  612. /* Note: Caller should be using rpc_clnt_sigmask() */
  613. status = out_of_line_wait_on_bit(&task->tk_runstate,
  614. RPC_TASK_QUEUED, rpc_wait_bit_interruptible,
  615. TASK_INTERRUPTIBLE);
  616. if (status == -ERESTARTSYS) {
  617. /*
  618. * When a sync task receives a signal, it exits with
  619. * -ERESTARTSYS. In order to catch any callbacks that
  620. * clean up after sleeping on some queue, we don't
  621. * break the loop here, but go around once more.
  622. */
  623. dprintk("RPC: %5u got signal\n", task->tk_pid);
  624. task->tk_flags |= RPC_TASK_KILLED;
  625. rpc_exit(task, -ERESTARTSYS);
  626. rpc_wake_up_task(task);
  627. }
  628. rpc_set_running(task);
  629. dprintk("RPC: %5u sync task resuming\n", task->tk_pid);
  630. }
  631. dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status,
  632. task->tk_status);
  633. /* Release all resources associated with the task */
  634. rpc_release_task(task);
  635. }
  636. /*
  637. * User-visible entry point to the scheduler.
  638. *
  639. * This may be called recursively if e.g. an async NFS task updates
  640. * the attributes and finds that dirty pages must be flushed.
  641. * NOTE: Upon exit of this function the task is guaranteed to be
  642. * released. In particular note that tk_release() will have
  643. * been called, so your task memory may have been freed.
  644. */
  645. void rpc_execute(struct rpc_task *task)
  646. {
  647. rpc_set_active(task);
  648. rpc_set_running(task);
  649. __rpc_execute(task);
  650. }
  651. static void rpc_async_schedule(struct work_struct *work)
  652. {
  653. __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
  654. }
  655. struct rpc_buffer {
  656. size_t len;
  657. char data[];
  658. };
  659. /**
  660. * rpc_malloc - allocate an RPC buffer
  661. * @task: RPC task that will use this buffer
  662. * @size: requested byte size
  663. *
  664. * To prevent rpciod from hanging, this allocator never sleeps,
  665. * returning NULL if the request cannot be serviced immediately.
  666. * The caller can arrange to sleep in a way that is safe for rpciod.
  667. *
  668. * Most requests are 'small' (under 2KiB) and can be serviced from a
  669. * mempool, ensuring that NFS reads and writes can always proceed,
  670. * and that there is good locality of reference for these buffers.
  671. *
  672. * In order to avoid memory starvation triggering more writebacks of
  673. * NFS requests, we avoid using GFP_KERNEL.
  674. */
  675. void *rpc_malloc(struct rpc_task *task, size_t size)
  676. {
  677. struct rpc_buffer *buf;
  678. gfp_t gfp = RPC_IS_SWAPPER(task) ? GFP_ATOMIC : GFP_NOWAIT;
  679. size += sizeof(struct rpc_buffer);
  680. if (size <= RPC_BUFFER_MAXSIZE)
  681. buf = mempool_alloc(rpc_buffer_mempool, gfp);
  682. else
  683. buf = kmalloc(size, gfp);
  684. buf->len = size;
  685. dprintk("RPC: %5u allocated buffer of size %zu at %p\n",
  686. task->tk_pid, size, buf);
  687. return &buf->data;
  688. }
  689. /**
  690. * rpc_free - free buffer allocated via rpc_malloc
  691. * @buffer: buffer to free
  692. *
  693. */
  694. void rpc_free(void *buffer)
  695. {
  696. size_t size;
  697. struct rpc_buffer *buf;
  698. if (!buffer)
  699. return;
  700. buf = container_of(buffer, struct rpc_buffer, data);
  701. size = buf->len;
  702. dprintk("RPC: freeing buffer of size %zu at %p\n",
  703. size, buf);
  704. if (size <= RPC_BUFFER_MAXSIZE)
  705. mempool_free(buf, rpc_buffer_mempool);
  706. else
  707. kfree(buf);
  708. }
  709. /*
  710. * Creation and deletion of RPC task structures
  711. */
  712. void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata)
  713. {
  714. memset(task, 0, sizeof(*task));
  715. init_timer(&task->tk_timer);
  716. task->tk_timer.data = (unsigned long) task;
  717. task->tk_timer.function = (void (*)(unsigned long)) rpc_run_timer;
  718. atomic_set(&task->tk_count, 1);
  719. task->tk_client = clnt;
  720. task->tk_flags = flags;
  721. task->tk_ops = tk_ops;
  722. if (tk_ops->rpc_call_prepare != NULL)
  723. task->tk_action = rpc_prepare_task;
  724. task->tk_calldata = calldata;
  725. /* Initialize retry counters */
  726. task->tk_garb_retry = 2;
  727. task->tk_cred_retry = 2;
  728. task->tk_priority = RPC_PRIORITY_NORMAL;
  729. task->tk_cookie = (unsigned long)current;
  730. /* Initialize workqueue for async tasks */
  731. task->tk_workqueue = rpciod_workqueue;
  732. if (clnt) {
  733. atomic_inc(&clnt->cl_users);
  734. if (clnt->cl_softrtry)
  735. task->tk_flags |= RPC_TASK_SOFT;
  736. if (!clnt->cl_intr)
  737. task->tk_flags |= RPC_TASK_NOINTR;
  738. }
  739. BUG_ON(task->tk_ops == NULL);
  740. /* starting timestamp */
  741. task->tk_start = jiffies;
  742. dprintk("RPC: new task initialized, procpid %u\n",
  743. current->pid);
  744. }
  745. static struct rpc_task *
  746. rpc_alloc_task(void)
  747. {
  748. return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
  749. }
  750. static void rpc_free_task(struct rcu_head *rcu)
  751. {
  752. struct rpc_task *task = container_of(rcu, struct rpc_task, u.tk_rcu);
  753. dprintk("RPC: %5u freeing task\n", task->tk_pid);
  754. mempool_free(task, rpc_task_mempool);
  755. }
  756. /*
  757. * Create a new task for the specified client. We have to
  758. * clean up after an allocation failure, as the client may
  759. * have specified "oneshot".
  760. */
  761. struct rpc_task *rpc_new_task(struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata)
  762. {
  763. struct rpc_task *task;
  764. task = rpc_alloc_task();
  765. if (!task)
  766. goto cleanup;
  767. rpc_init_task(task, clnt, flags, tk_ops, calldata);
  768. dprintk("RPC: allocated task %p\n", task);
  769. task->tk_flags |= RPC_TASK_DYNAMIC;
  770. out:
  771. return task;
  772. cleanup:
  773. /* Check whether to release the client */
  774. if (clnt) {
  775. printk("rpc_new_task: failed, users=%d, oneshot=%d\n",
  776. atomic_read(&clnt->cl_users), clnt->cl_oneshot);
  777. atomic_inc(&clnt->cl_users); /* pretend we were used ... */
  778. rpc_release_client(clnt);
  779. }
  780. goto out;
  781. }
  782. void rpc_put_task(struct rpc_task *task)
  783. {
  784. const struct rpc_call_ops *tk_ops = task->tk_ops;
  785. void *calldata = task->tk_calldata;
  786. if (!atomic_dec_and_test(&task->tk_count))
  787. return;
  788. /* Release resources */
  789. if (task->tk_rqstp)
  790. xprt_release(task);
  791. if (task->tk_msg.rpc_cred)
  792. rpcauth_unbindcred(task);
  793. if (task->tk_client) {
  794. rpc_release_client(task->tk_client);
  795. task->tk_client = NULL;
  796. }
  797. if (task->tk_flags & RPC_TASK_DYNAMIC)
  798. call_rcu_bh(&task->u.tk_rcu, rpc_free_task);
  799. rpc_release_calldata(tk_ops, calldata);
  800. }
  801. EXPORT_SYMBOL(rpc_put_task);
  802. static void rpc_release_task(struct rpc_task *task)
  803. {
  804. #ifdef RPC_DEBUG
  805. BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
  806. #endif
  807. dprintk("RPC: %5u release task\n", task->tk_pid);
  808. /* Remove from global task list */
  809. spin_lock(&rpc_sched_lock);
  810. list_del(&task->tk_task);
  811. spin_unlock(&rpc_sched_lock);
  812. BUG_ON (RPC_IS_QUEUED(task));
  813. /* Synchronously delete any running timer */
  814. rpc_delete_timer(task);
  815. #ifdef RPC_DEBUG
  816. task->tk_magic = 0;
  817. #endif
  818. /* Wake up anyone who is waiting for task completion */
  819. rpc_mark_complete_task(task);
  820. rpc_put_task(task);
  821. }
  822. /**
  823. * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
  824. * @clnt: pointer to RPC client
  825. * @flags: RPC flags
  826. * @ops: RPC call ops
  827. * @data: user call data
  828. */
  829. struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags,
  830. const struct rpc_call_ops *ops,
  831. void *data)
  832. {
  833. struct rpc_task *task;
  834. task = rpc_new_task(clnt, flags, ops, data);
  835. if (task == NULL) {
  836. rpc_release_calldata(ops, data);
  837. return ERR_PTR(-ENOMEM);
  838. }
  839. atomic_inc(&task->tk_count);
  840. rpc_execute(task);
  841. return task;
  842. }
  843. EXPORT_SYMBOL(rpc_run_task);
  844. /*
  845. * Kill all tasks for the given client.
  846. * XXX: kill their descendants as well?
  847. */
  848. void rpc_killall_tasks(struct rpc_clnt *clnt)
  849. {
  850. struct rpc_task *rovr;
  851. struct list_head *le;
  852. dprintk("RPC: killing all tasks for client %p\n", clnt);
  853. /*
  854. * Spin lock all_tasks to prevent changes...
  855. */
  856. spin_lock(&rpc_sched_lock);
  857. alltask_for_each(rovr, le, &all_tasks) {
  858. if (! RPC_IS_ACTIVATED(rovr))
  859. continue;
  860. if (!clnt || rovr->tk_client == clnt) {
  861. rovr->tk_flags |= RPC_TASK_KILLED;
  862. rpc_exit(rovr, -EIO);
  863. rpc_wake_up_task(rovr);
  864. }
  865. }
  866. spin_unlock(&rpc_sched_lock);
  867. }
  868. static DECLARE_MUTEX_LOCKED(rpciod_running);
  869. static void rpciod_killall(void)
  870. {
  871. unsigned long flags;
  872. while (!list_empty(&all_tasks)) {
  873. clear_thread_flag(TIF_SIGPENDING);
  874. rpc_killall_tasks(NULL);
  875. flush_workqueue(rpciod_workqueue);
  876. if (!list_empty(&all_tasks)) {
  877. dprintk("RPC: rpciod_killall: waiting for tasks "
  878. "to exit\n");
  879. yield();
  880. }
  881. }
  882. spin_lock_irqsave(&current->sighand->siglock, flags);
  883. recalc_sigpending();
  884. spin_unlock_irqrestore(&current->sighand->siglock, flags);
  885. }
  886. /*
  887. * Start up the rpciod process if it's not already running.
  888. */
  889. int
  890. rpciod_up(void)
  891. {
  892. struct workqueue_struct *wq;
  893. int error = 0;
  894. mutex_lock(&rpciod_mutex);
  895. dprintk("RPC: rpciod_up: users %u\n", rpciod_users);
  896. rpciod_users++;
  897. if (rpciod_workqueue)
  898. goto out;
  899. /*
  900. * If there's no pid, we should be the first user.
  901. */
  902. if (rpciod_users > 1)
  903. printk(KERN_WARNING "rpciod_up: no workqueue, %u users??\n", rpciod_users);
  904. /*
  905. * Create the rpciod thread and wait for it to start.
  906. */
  907. error = -ENOMEM;
  908. wq = create_workqueue("rpciod");
  909. if (wq == NULL) {
  910. printk(KERN_WARNING "rpciod_up: create workqueue failed, error=%d\n", error);
  911. rpciod_users--;
  912. goto out;
  913. }
  914. rpciod_workqueue = wq;
  915. error = 0;
  916. out:
  917. mutex_unlock(&rpciod_mutex);
  918. return error;
  919. }
  920. void
  921. rpciod_down(void)
  922. {
  923. mutex_lock(&rpciod_mutex);
  924. dprintk("RPC: rpciod_down sema %u\n", rpciod_users);
  925. if (rpciod_users) {
  926. if (--rpciod_users)
  927. goto out;
  928. } else
  929. printk(KERN_WARNING "rpciod_down: no users??\n");
  930. if (!rpciod_workqueue) {
  931. dprintk("RPC: rpciod_down: Nothing to do!\n");
  932. goto out;
  933. }
  934. rpciod_killall();
  935. destroy_workqueue(rpciod_workqueue);
  936. rpciod_workqueue = NULL;
  937. out:
  938. mutex_unlock(&rpciod_mutex);
  939. }
  940. #ifdef RPC_DEBUG
  941. void rpc_show_tasks(void)
  942. {
  943. struct list_head *le;
  944. struct rpc_task *t;
  945. spin_lock(&rpc_sched_lock);
  946. if (list_empty(&all_tasks)) {
  947. spin_unlock(&rpc_sched_lock);
  948. return;
  949. }
  950. printk("-pid- proc flgs status -client- -prog- --rqstp- -timeout "
  951. "-rpcwait -action- ---ops--\n");
  952. alltask_for_each(t, le, &all_tasks) {
  953. const char *rpc_waitq = "none";
  954. if (RPC_IS_QUEUED(t))
  955. rpc_waitq = rpc_qname(t->u.tk_wait.rpc_waitq);
  956. printk("%5u %04d %04x %6d %8p %6d %8p %8ld %8s %8p %8p\n",
  957. t->tk_pid,
  958. (t->tk_msg.rpc_proc ? t->tk_msg.rpc_proc->p_proc : -1),
  959. t->tk_flags, t->tk_status,
  960. t->tk_client,
  961. (t->tk_client ? t->tk_client->cl_prog : 0),
  962. t->tk_rqstp, t->tk_timeout,
  963. rpc_waitq,
  964. t->tk_action, t->tk_ops);
  965. }
  966. spin_unlock(&rpc_sched_lock);
  967. }
  968. #endif
  969. void
  970. rpc_destroy_mempool(void)
  971. {
  972. if (rpc_buffer_mempool)
  973. mempool_destroy(rpc_buffer_mempool);
  974. if (rpc_task_mempool)
  975. mempool_destroy(rpc_task_mempool);
  976. if (rpc_task_slabp)
  977. kmem_cache_destroy(rpc_task_slabp);
  978. if (rpc_buffer_slabp)
  979. kmem_cache_destroy(rpc_buffer_slabp);
  980. }
  981. int
  982. rpc_init_mempool(void)
  983. {
  984. rpc_task_slabp = kmem_cache_create("rpc_tasks",
  985. sizeof(struct rpc_task),
  986. 0, SLAB_HWCACHE_ALIGN,
  987. NULL, NULL);
  988. if (!rpc_task_slabp)
  989. goto err_nomem;
  990. rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
  991. RPC_BUFFER_MAXSIZE,
  992. 0, SLAB_HWCACHE_ALIGN,
  993. NULL, NULL);
  994. if (!rpc_buffer_slabp)
  995. goto err_nomem;
  996. rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
  997. rpc_task_slabp);
  998. if (!rpc_task_mempool)
  999. goto err_nomem;
  1000. rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
  1001. rpc_buffer_slabp);
  1002. if (!rpc_buffer_mempool)
  1003. goto err_nomem;
  1004. return 0;
  1005. err_nomem:
  1006. rpc_destroy_mempool();
  1007. return -ENOMEM;
  1008. }