sched.h 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298
  1. /*
  2. * linux/include/linux/sunrpc/sched.h
  3. *
  4. * Scheduling primitives for kernel Sun RPC.
  5. *
  6. * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
  7. */
  8. #ifndef _LINUX_SUNRPC_SCHED_H_
  9. #define _LINUX_SUNRPC_SCHED_H_
  10. #include <linux/timer.h>
  11. #include <linux/sunrpc/types.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/wait.h>
  14. #include <linux/workqueue.h>
  15. #include <linux/sunrpc/xdr.h>
  16. /*
  17. * This is the actual RPC procedure call info.
  18. */
  19. struct rpc_procinfo;
  20. struct rpc_message {
  21. struct rpc_procinfo * rpc_proc; /* Procedure information */
  22. void * rpc_argp; /* Arguments */
  23. void * rpc_resp; /* Result */
  24. struct rpc_cred * rpc_cred; /* Credentials */
  25. };
  26. struct rpc_call_ops;
  27. struct rpc_wait_queue;
  28. struct rpc_wait {
  29. struct list_head list; /* wait queue links */
  30. struct list_head links; /* Links to related tasks */
  31. struct rpc_wait_queue * rpc_waitq; /* RPC wait queue we're on */
  32. };
  33. /*
  34. * This is the RPC task struct
  35. */
  36. struct rpc_task {
  37. #ifdef RPC_DEBUG
  38. unsigned long tk_magic; /* 0xf00baa */
  39. #endif
  40. atomic_t tk_count; /* Reference count */
  41. struct list_head tk_task; /* global list of tasks */
  42. struct rpc_clnt * tk_client; /* RPC client */
  43. struct rpc_rqst * tk_rqstp; /* RPC request */
  44. int tk_status; /* result of last operation */
  45. /*
  46. * RPC call state
  47. */
  48. struct rpc_message tk_msg; /* RPC call info */
  49. __u8 tk_garb_retry;
  50. __u8 tk_cred_retry;
  51. unsigned long tk_cookie; /* Cookie for batching tasks */
  52. /*
  53. * timeout_fn to be executed by timer bottom half
  54. * callback to be executed after waking up
  55. * action next procedure for async tasks
  56. * tk_ops caller callbacks
  57. */
  58. void (*tk_timeout_fn)(struct rpc_task *);
  59. void (*tk_callback)(struct rpc_task *);
  60. void (*tk_action)(struct rpc_task *);
  61. const struct rpc_call_ops *tk_ops;
  62. void * tk_calldata;
  63. /*
  64. * tk_timer is used for async processing by the RPC scheduling
  65. * primitives. You should not access this directly unless
  66. * you have a pathological interest in kernel oopses.
  67. */
  68. struct timer_list tk_timer; /* kernel timer */
  69. unsigned long tk_timeout; /* timeout for rpc_sleep() */
  70. unsigned short tk_flags; /* misc flags */
  71. unsigned char tk_priority : 2;/* Task priority */
  72. unsigned long tk_runstate; /* Task run status */
  73. struct workqueue_struct *tk_workqueue; /* Normally rpciod, but could
  74. * be any workqueue
  75. */
  76. union {
  77. struct work_struct tk_work; /* Async task work queue */
  78. struct rpc_wait tk_wait; /* RPC wait */
  79. } u;
  80. #ifdef RPC_DEBUG
  81. unsigned short tk_pid; /* debugging aid */
  82. #endif
  83. };
  84. #define tk_auth tk_client->cl_auth
  85. #define tk_xprt tk_client->cl_xprt
  86. /* support walking a list of tasks on a wait queue */
  87. #define task_for_each(task, pos, head) \
  88. list_for_each(pos, head) \
  89. if ((task=list_entry(pos, struct rpc_task, u.tk_wait.list)),1)
  90. #define task_for_first(task, head) \
  91. if (!list_empty(head) && \
  92. ((task=list_entry((head)->next, struct rpc_task, u.tk_wait.list)),1))
  93. /* .. and walking list of all tasks */
  94. #define alltask_for_each(task, pos, head) \
  95. list_for_each(pos, head) \
  96. if ((task=list_entry(pos, struct rpc_task, tk_task)),1)
  97. typedef void (*rpc_action)(struct rpc_task *);
  98. struct rpc_call_ops {
  99. void (*rpc_call_prepare)(struct rpc_task *, void *);
  100. void (*rpc_call_done)(struct rpc_task *, void *);
  101. void (*rpc_release)(void *);
  102. };
  103. /*
  104. * RPC task flags
  105. */
  106. #define RPC_TASK_ASYNC 0x0001 /* is an async task */
  107. #define RPC_TASK_SWAPPER 0x0002 /* is swapping in/out */
  108. #define RPC_TASK_CHILD 0x0008 /* is child of other task */
  109. #define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */
  110. #define RPC_TASK_ROOTCREDS 0x0040 /* force root creds */
  111. #define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */
  112. #define RPC_TASK_KILLED 0x0100 /* task was killed */
  113. #define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */
  114. #define RPC_TASK_NOINTR 0x0400 /* uninterruptible task */
  115. #define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
  116. #define RPC_IS_CHILD(t) ((t)->tk_flags & RPC_TASK_CHILD)
  117. #define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
  118. #define RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS)
  119. #define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED)
  120. #define RPC_DO_CALLBACK(t) ((t)->tk_callback != NULL)
  121. #define RPC_IS_SOFT(t) ((t)->tk_flags & RPC_TASK_SOFT)
  122. #define RPC_TASK_UNINTERRUPTIBLE(t) ((t)->tk_flags & RPC_TASK_NOINTR)
  123. #define RPC_TASK_RUNNING 0
  124. #define RPC_TASK_QUEUED 1
  125. #define RPC_TASK_WAKEUP 2
  126. #define RPC_TASK_HAS_TIMER 3
  127. #define RPC_TASK_ACTIVE 4
  128. #define RPC_IS_RUNNING(t) (test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate))
  129. #define rpc_set_running(t) (set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate))
  130. #define rpc_test_and_set_running(t) \
  131. (test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate))
  132. #define rpc_clear_running(t) \
  133. do { \
  134. smp_mb__before_clear_bit(); \
  135. clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate); \
  136. smp_mb__after_clear_bit(); \
  137. } while (0)
  138. #define RPC_IS_QUEUED(t) (test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate))
  139. #define rpc_set_queued(t) (set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate))
  140. #define rpc_clear_queued(t) \
  141. do { \
  142. smp_mb__before_clear_bit(); \
  143. clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate); \
  144. smp_mb__after_clear_bit(); \
  145. } while (0)
  146. #define rpc_start_wakeup(t) \
  147. (test_and_set_bit(RPC_TASK_WAKEUP, &(t)->tk_runstate) == 0)
  148. #define rpc_finish_wakeup(t) \
  149. do { \
  150. smp_mb__before_clear_bit(); \
  151. clear_bit(RPC_TASK_WAKEUP, &(t)->tk_runstate); \
  152. smp_mb__after_clear_bit(); \
  153. } while (0)
  154. #define RPC_IS_ACTIVATED(t) (test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate))
  155. #define rpc_set_active(t) (set_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate))
  156. #define rpc_clear_active(t) \
  157. do { \
  158. smp_mb__before_clear_bit(); \
  159. clear_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate); \
  160. smp_mb__after_clear_bit(); \
  161. } while(0)
  162. /*
  163. * Task priorities.
  164. * Note: if you change these, you must also change
  165. * the task initialization definitions below.
  166. */
  167. #define RPC_PRIORITY_LOW 0
  168. #define RPC_PRIORITY_NORMAL 1
  169. #define RPC_PRIORITY_HIGH 2
  170. #define RPC_NR_PRIORITY (RPC_PRIORITY_HIGH+1)
  171. /*
  172. * RPC synchronization objects
  173. */
  174. struct rpc_wait_queue {
  175. spinlock_t lock;
  176. struct list_head tasks[RPC_NR_PRIORITY]; /* task queue for each priority level */
  177. unsigned long cookie; /* cookie of last task serviced */
  178. unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */
  179. unsigned char priority; /* current priority */
  180. unsigned char count; /* # task groups remaining serviced so far */
  181. unsigned char nr; /* # tasks remaining for cookie */
  182. #ifdef RPC_DEBUG
  183. const char * name;
  184. #endif
  185. };
  186. /*
  187. * This is the # requests to send consecutively
  188. * from a single cookie. The aim is to improve
  189. * performance of NFS operations such as read/write.
  190. */
  191. #define RPC_BATCH_COUNT 16
  192. #ifndef RPC_DEBUG
  193. # define RPC_WAITQ_INIT(var,qname) { \
  194. .lock = SPIN_LOCK_UNLOCKED, \
  195. .tasks = { \
  196. [0] = LIST_HEAD_INIT(var.tasks[0]), \
  197. [1] = LIST_HEAD_INIT(var.tasks[1]), \
  198. [2] = LIST_HEAD_INIT(var.tasks[2]), \
  199. }, \
  200. }
  201. #else
  202. # define RPC_WAITQ_INIT(var,qname) { \
  203. .lock = SPIN_LOCK_UNLOCKED, \
  204. .tasks = { \
  205. [0] = LIST_HEAD_INIT(var.tasks[0]), \
  206. [1] = LIST_HEAD_INIT(var.tasks[1]), \
  207. [2] = LIST_HEAD_INIT(var.tasks[2]), \
  208. }, \
  209. .name = qname, \
  210. }
  211. #endif
  212. # define RPC_WAITQ(var,qname) struct rpc_wait_queue var = RPC_WAITQ_INIT(var,qname)
  213. #define RPC_IS_PRIORITY(q) ((q)->maxpriority > 0)
  214. /*
  215. * Function prototypes
  216. */
  217. struct rpc_task *rpc_new_task(struct rpc_clnt *, int flags,
  218. const struct rpc_call_ops *ops, void *data);
  219. struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags,
  220. const struct rpc_call_ops *ops, void *data);
  221. struct rpc_task *rpc_new_child(struct rpc_clnt *, struct rpc_task *parent);
  222. void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt,
  223. int flags, const struct rpc_call_ops *ops,
  224. void *data);
  225. void rpc_release_task(struct rpc_task *);
  226. void rpc_exit_task(struct rpc_task *);
  227. void rpc_killall_tasks(struct rpc_clnt *);
  228. int rpc_execute(struct rpc_task *);
  229. void rpc_run_child(struct rpc_task *parent, struct rpc_task *child,
  230. rpc_action action);
  231. void rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *);
  232. void rpc_init_wait_queue(struct rpc_wait_queue *, const char *);
  233. void rpc_sleep_on(struct rpc_wait_queue *, struct rpc_task *,
  234. rpc_action action, rpc_action timer);
  235. void rpc_wake_up_task(struct rpc_task *);
  236. void rpc_wake_up(struct rpc_wait_queue *);
  237. struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *);
  238. void rpc_wake_up_status(struct rpc_wait_queue *, int);
  239. void rpc_delay(struct rpc_task *, unsigned long);
  240. void * rpc_malloc(struct rpc_task *, size_t);
  241. void rpc_free(struct rpc_task *);
  242. int rpciod_up(void);
  243. void rpciod_down(void);
  244. void rpciod_wake_up(void);
  245. int __rpc_wait_for_completion_task(struct rpc_task *task, int (*)(void *));
  246. #ifdef RPC_DEBUG
  247. void rpc_show_tasks(void);
  248. #endif
  249. int rpc_init_mempool(void);
  250. void rpc_destroy_mempool(void);
  251. static inline void rpc_exit(struct rpc_task *task, int status)
  252. {
  253. task->tk_status = status;
  254. task->tk_action = rpc_exit_task;
  255. }
  256. static inline int rpc_wait_for_completion_task(struct rpc_task *task)
  257. {
  258. return __rpc_wait_for_completion_task(task, NULL);
  259. }
  260. #ifdef RPC_DEBUG
  261. static inline const char * rpc_qname(struct rpc_wait_queue *q)
  262. {
  263. return ((q && q->name) ? q->name : "unknown");
  264. }
  265. #endif
  266. #endif /* _LINUX_SUNRPC_SCHED_H_ */