sched.h 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290
  1. /*
  2. * linux/include/linux/sunrpc/sched.h
  3. *
  4. * Scheduling primitives for kernel Sun RPC.
  5. *
  6. * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
  7. */
  8. #ifndef _LINUX_SUNRPC_SCHED_H_
  9. #define _LINUX_SUNRPC_SCHED_H_
  10. #include <linux/timer.h>
  11. #include <linux/sunrpc/types.h>
  12. #include <linux/rcupdate.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/wait.h>
  15. #include <linux/workqueue.h>
  16. #include <linux/sunrpc/xdr.h>
  17. /*
  18. * This is the actual RPC procedure call info.
  19. */
  20. struct rpc_procinfo;
  21. struct rpc_message {
  22. struct rpc_procinfo * rpc_proc; /* Procedure information */
  23. void * rpc_argp; /* Arguments */
  24. void * rpc_resp; /* Result */
  25. struct rpc_cred * rpc_cred; /* Credentials */
  26. };
  27. struct rpc_call_ops;
  28. struct rpc_wait_queue;
  29. struct rpc_wait {
  30. struct list_head list; /* wait queue links */
  31. struct list_head links; /* Links to related tasks */
  32. struct rpc_wait_queue * rpc_waitq; /* RPC wait queue we're on */
  33. };
  34. /*
  35. * This is the RPC task struct
  36. */
  37. struct rpc_task {
  38. #ifdef RPC_DEBUG
  39. unsigned long tk_magic; /* 0xf00baa */
  40. #endif
  41. atomic_t tk_count; /* Reference count */
  42. struct list_head tk_task; /* global list of tasks */
  43. struct rpc_clnt * tk_client; /* RPC client */
  44. struct rpc_rqst * tk_rqstp; /* RPC request */
  45. int tk_status; /* result of last operation */
  46. /*
  47. * RPC call state
  48. */
  49. struct rpc_message tk_msg; /* RPC call info */
  50. __u8 tk_garb_retry;
  51. __u8 tk_cred_retry;
  52. unsigned long tk_cookie; /* Cookie for batching tasks */
  53. /*
  54. * timeout_fn to be executed by timer bottom half
  55. * callback to be executed after waking up
  56. * action next procedure for async tasks
  57. * tk_ops caller callbacks
  58. */
  59. void (*tk_timeout_fn)(struct rpc_task *);
  60. void (*tk_callback)(struct rpc_task *);
  61. void (*tk_action)(struct rpc_task *);
  62. const struct rpc_call_ops *tk_ops;
  63. void * tk_calldata;
  64. /*
  65. * tk_timer is used for async processing by the RPC scheduling
  66. * primitives. You should not access this directly unless
  67. * you have a pathological interest in kernel oopses.
  68. */
  69. struct timer_list tk_timer; /* kernel timer */
  70. unsigned long tk_timeout; /* timeout for rpc_sleep() */
  71. unsigned short tk_flags; /* misc flags */
  72. unsigned char tk_priority : 2;/* Task priority */
  73. unsigned long tk_runstate; /* Task run status */
  74. struct workqueue_struct *tk_workqueue; /* Normally rpciod, but could
  75. * be any workqueue
  76. */
  77. union {
  78. struct work_struct tk_work; /* Async task work queue */
  79. struct rpc_wait tk_wait; /* RPC wait */
  80. struct rcu_head tk_rcu; /* for task deletion */
  81. } u;
  82. unsigned short tk_timeouts; /* maj timeouts */
  83. size_t tk_bytes_sent; /* total bytes sent */
  84. unsigned long tk_start; /* RPC task init timestamp */
  85. long tk_rtt; /* round-trip time (jiffies) */
  86. #ifdef RPC_DEBUG
  87. unsigned short tk_pid; /* debugging aid */
  88. #endif
  89. };
  90. #define tk_xprt tk_client->cl_xprt
  91. /* support walking a list of tasks on a wait queue */
  92. #define task_for_each(task, pos, head) \
  93. list_for_each(pos, head) \
  94. if ((task=list_entry(pos, struct rpc_task, u.tk_wait.list)),1)
  95. #define task_for_first(task, head) \
  96. if (!list_empty(head) && \
  97. ((task=list_entry((head)->next, struct rpc_task, u.tk_wait.list)),1))
  98. typedef void (*rpc_action)(struct rpc_task *);
  99. struct rpc_call_ops {
  100. void (*rpc_call_prepare)(struct rpc_task *, void *);
  101. void (*rpc_call_done)(struct rpc_task *, void *);
  102. void (*rpc_release)(void *);
  103. };
  104. /*
  105. * RPC task flags
  106. */
  107. #define RPC_TASK_ASYNC 0x0001 /* is an async task */
  108. #define RPC_TASK_SWAPPER 0x0002 /* is swapping in/out */
  109. #define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */
  110. #define RPC_TASK_ROOTCREDS 0x0040 /* force root creds */
  111. #define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */
  112. #define RPC_TASK_KILLED 0x0100 /* task was killed */
  113. #define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */
  114. #define RPC_TASK_NOINTR 0x0400 /* uninterruptible task */
  115. #define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
  116. #define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
  117. #define RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS)
  118. #define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED)
  119. #define RPC_DO_CALLBACK(t) ((t)->tk_callback != NULL)
  120. #define RPC_IS_SOFT(t) ((t)->tk_flags & RPC_TASK_SOFT)
  121. #define RPC_TASK_UNINTERRUPTIBLE(t) ((t)->tk_flags & RPC_TASK_NOINTR)
  122. #define RPC_TASK_RUNNING 0
  123. #define RPC_TASK_QUEUED 1
  124. #define RPC_TASK_WAKEUP 2
  125. #define RPC_TASK_HAS_TIMER 3
  126. #define RPC_TASK_ACTIVE 4
  127. #define RPC_IS_RUNNING(t) test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
  128. #define rpc_set_running(t) set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
  129. #define rpc_test_and_set_running(t) \
  130. test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
  131. #define rpc_clear_running(t) \
  132. do { \
  133. smp_mb__before_clear_bit(); \
  134. clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate); \
  135. smp_mb__after_clear_bit(); \
  136. } while (0)
  137. #define RPC_IS_QUEUED(t) test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
  138. #define rpc_set_queued(t) set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
  139. #define rpc_clear_queued(t) \
  140. do { \
  141. smp_mb__before_clear_bit(); \
  142. clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate); \
  143. smp_mb__after_clear_bit(); \
  144. } while (0)
  145. #define rpc_start_wakeup(t) \
  146. (test_and_set_bit(RPC_TASK_WAKEUP, &(t)->tk_runstate) == 0)
  147. #define rpc_finish_wakeup(t) \
  148. do { \
  149. smp_mb__before_clear_bit(); \
  150. clear_bit(RPC_TASK_WAKEUP, &(t)->tk_runstate); \
  151. smp_mb__after_clear_bit(); \
  152. } while (0)
  153. #define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate)
  154. /*
  155. * Task priorities.
  156. * Note: if you change these, you must also change
  157. * the task initialization definitions below.
  158. */
  159. #define RPC_PRIORITY_LOW 0
  160. #define RPC_PRIORITY_NORMAL 1
  161. #define RPC_PRIORITY_HIGH 2
  162. #define RPC_NR_PRIORITY (RPC_PRIORITY_HIGH+1)
  163. /*
  164. * RPC synchronization objects
  165. */
  166. struct rpc_wait_queue {
  167. spinlock_t lock;
  168. struct list_head tasks[RPC_NR_PRIORITY]; /* task queue for each priority level */
  169. unsigned long cookie; /* cookie of last task serviced */
  170. unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */
  171. unsigned char priority; /* current priority */
  172. unsigned char count; /* # task groups remaining serviced so far */
  173. unsigned char nr; /* # tasks remaining for cookie */
  174. unsigned short qlen; /* total # tasks waiting in queue */
  175. #ifdef RPC_DEBUG
  176. const char * name;
  177. #endif
  178. };
  179. /*
  180. * This is the # requests to send consecutively
  181. * from a single cookie. The aim is to improve
  182. * performance of NFS operations such as read/write.
  183. */
  184. #define RPC_BATCH_COUNT 16
  185. #ifndef RPC_DEBUG
  186. # define RPC_WAITQ_INIT(var,qname) { \
  187. .lock = __SPIN_LOCK_UNLOCKED(var.lock), \
  188. .tasks = { \
  189. [0] = LIST_HEAD_INIT(var.tasks[0]), \
  190. [1] = LIST_HEAD_INIT(var.tasks[1]), \
  191. [2] = LIST_HEAD_INIT(var.tasks[2]), \
  192. }, \
  193. }
  194. #else
  195. # define RPC_WAITQ_INIT(var,qname) { \
  196. .lock = __SPIN_LOCK_UNLOCKED(var.lock), \
  197. .tasks = { \
  198. [0] = LIST_HEAD_INIT(var.tasks[0]), \
  199. [1] = LIST_HEAD_INIT(var.tasks[1]), \
  200. [2] = LIST_HEAD_INIT(var.tasks[2]), \
  201. }, \
  202. .name = qname, \
  203. }
  204. #endif
  205. # define RPC_WAITQ(var,qname) struct rpc_wait_queue var = RPC_WAITQ_INIT(var,qname)
  206. #define RPC_IS_PRIORITY(q) ((q)->maxpriority > 0)
  207. /*
  208. * Function prototypes
  209. */
  210. struct rpc_task *rpc_new_task(struct rpc_clnt *, int flags,
  211. const struct rpc_call_ops *ops, void *data);
  212. struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags,
  213. const struct rpc_call_ops *ops, void *data);
  214. void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt,
  215. int flags, const struct rpc_call_ops *ops,
  216. void *data);
  217. void rpc_put_task(struct rpc_task *);
  218. void rpc_exit_task(struct rpc_task *);
  219. void rpc_release_calldata(const struct rpc_call_ops *, void *);
  220. void rpc_killall_tasks(struct rpc_clnt *);
  221. void rpc_execute(struct rpc_task *);
  222. void rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *);
  223. void rpc_init_wait_queue(struct rpc_wait_queue *, const char *);
  224. void rpc_sleep_on(struct rpc_wait_queue *, struct rpc_task *,
  225. rpc_action action, rpc_action timer);
  226. void rpc_wake_up_task(struct rpc_task *);
  227. void rpc_wake_up(struct rpc_wait_queue *);
  228. struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *);
  229. void rpc_wake_up_status(struct rpc_wait_queue *, int);
  230. void rpc_delay(struct rpc_task *, unsigned long);
  231. void * rpc_malloc(struct rpc_task *, size_t);
  232. void rpc_free(void *);
  233. int rpciod_up(void);
  234. void rpciod_down(void);
  235. int __rpc_wait_for_completion_task(struct rpc_task *task, int (*)(void *));
  236. #ifdef RPC_DEBUG
  237. void rpc_show_tasks(void);
  238. #endif
  239. int rpc_init_mempool(void);
  240. void rpc_destroy_mempool(void);
  241. extern struct workqueue_struct *rpciod_workqueue;
  242. static inline void rpc_exit(struct rpc_task *task, int status)
  243. {
  244. task->tk_status = status;
  245. task->tk_action = rpc_exit_task;
  246. }
  247. static inline int rpc_wait_for_completion_task(struct rpc_task *task)
  248. {
  249. return __rpc_wait_for_completion_task(task, NULL);
  250. }
  251. #ifdef RPC_DEBUG
  252. static inline const char * rpc_qname(struct rpc_wait_queue *q)
  253. {
  254. return ((q && q->name) ? q->name : "unknown");
  255. }
  256. #endif
  257. #endif /* _LINUX_SUNRPC_SCHED_H_ */