list.h 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796
  1. #ifndef _LINUX_LIST_H
  2. #define _LINUX_LIST_H
  3. #ifdef __KERNEL__
  4. #include <linux/stddef.h>
  5. #include <linux/prefetch.h>
  6. #include <asm/system.h>
  7. /*
  8. * These are non-NULL pointers that will result in page faults
  9. * under normal circumstances, used to verify that nobody uses
  10. * non-initialized list entries.
  11. */
  12. #define LIST_POISON1 ((void *) 0x00100100)
  13. #define LIST_POISON2 ((void *) 0x00200200)
  14. /*
  15. * Simple doubly linked list implementation.
  16. *
  17. * Some of the internal functions ("__xxx") are useful when
  18. * manipulating whole lists rather than single entries, as
  19. * sometimes we already know the next/prev entries and we can
  20. * generate better code by using them directly rather than
  21. * using the generic single-entry routines.
  22. */
  23. struct list_head {
  24. struct list_head *next, *prev;
  25. };
  26. #define LIST_HEAD_INIT(name) { &(name), &(name) }
  27. #define LIST_HEAD(name) \
  28. struct list_head name = LIST_HEAD_INIT(name)
  29. #define INIT_LIST_HEAD(ptr) do { \
  30. (ptr)->next = (ptr); (ptr)->prev = (ptr); \
  31. } while (0)
  32. /*
  33. * Insert a new entry between two known consecutive entries.
  34. *
  35. * This is only for internal list manipulation where we know
  36. * the prev/next entries already!
  37. */
  38. static inline void __list_add(struct list_head *new,
  39. struct list_head *prev,
  40. struct list_head *next)
  41. {
  42. next->prev = new;
  43. new->next = next;
  44. new->prev = prev;
  45. prev->next = new;
  46. }
  47. /**
  48. * list_add - add a new entry
  49. * @new: new entry to be added
  50. * @head: list head to add it after
  51. *
  52. * Insert a new entry after the specified head.
  53. * This is good for implementing stacks.
  54. */
  55. static inline void list_add(struct list_head *new, struct list_head *head)
  56. {
  57. __list_add(new, head, head->next);
  58. }
  59. /**
  60. * list_add_tail - add a new entry
  61. * @new: new entry to be added
  62. * @head: list head to add it before
  63. *
  64. * Insert a new entry before the specified head.
  65. * This is useful for implementing queues.
  66. */
  67. static inline void list_add_tail(struct list_head *new, struct list_head *head)
  68. {
  69. __list_add(new, head->prev, head);
  70. }
  71. /*
  72. * Insert a new entry between two known consecutive entries.
  73. *
  74. * This is only for internal list manipulation where we know
  75. * the prev/next entries already!
  76. */
  77. static inline void __list_add_rcu(struct list_head * new,
  78. struct list_head * prev, struct list_head * next)
  79. {
  80. new->next = next;
  81. new->prev = prev;
  82. smp_wmb();
  83. next->prev = new;
  84. prev->next = new;
  85. }
  86. /**
  87. * list_add_rcu - add a new entry to rcu-protected list
  88. * @new: new entry to be added
  89. * @head: list head to add it after
  90. *
  91. * Insert a new entry after the specified head.
  92. * This is good for implementing stacks.
  93. *
  94. * The caller must take whatever precautions are necessary
  95. * (such as holding appropriate locks) to avoid racing
  96. * with another list-mutation primitive, such as list_add_rcu()
  97. * or list_del_rcu(), running on this same list.
  98. * However, it is perfectly legal to run concurrently with
  99. * the _rcu list-traversal primitives, such as
  100. * list_for_each_entry_rcu().
  101. */
  102. static inline void list_add_rcu(struct list_head *new, struct list_head *head)
  103. {
  104. __list_add_rcu(new, head, head->next);
  105. }
  106. /**
  107. * list_add_tail_rcu - add a new entry to rcu-protected list
  108. * @new: new entry to be added
  109. * @head: list head to add it before
  110. *
  111. * Insert a new entry before the specified head.
  112. * This is useful for implementing queues.
  113. *
  114. * The caller must take whatever precautions are necessary
  115. * (such as holding appropriate locks) to avoid racing
  116. * with another list-mutation primitive, such as list_add_tail_rcu()
  117. * or list_del_rcu(), running on this same list.
  118. * However, it is perfectly legal to run concurrently with
  119. * the _rcu list-traversal primitives, such as
  120. * list_for_each_entry_rcu().
  121. */
  122. static inline void list_add_tail_rcu(struct list_head *new,
  123. struct list_head *head)
  124. {
  125. __list_add_rcu(new, head->prev, head);
  126. }
  127. /*
  128. * Delete a list entry by making the prev/next entries
  129. * point to each other.
  130. *
  131. * This is only for internal list manipulation where we know
  132. * the prev/next entries already!
  133. */
  134. static inline void __list_del(struct list_head * prev, struct list_head * next)
  135. {
  136. next->prev = prev;
  137. prev->next = next;
  138. }
  139. /**
  140. * list_del - deletes entry from list.
  141. * @entry: the element to delete from the list.
  142. * Note: list_empty on entry does not return true after this, the entry is
  143. * in an undefined state.
  144. */
  145. static inline void list_del(struct list_head *entry)
  146. {
  147. __list_del(entry->prev, entry->next);
  148. entry->next = LIST_POISON1;
  149. entry->prev = LIST_POISON2;
  150. }
  151. /**
  152. * list_del_rcu - deletes entry from list without re-initialization
  153. * @entry: the element to delete from the list.
  154. *
  155. * Note: list_empty on entry does not return true after this,
  156. * the entry is in an undefined state. It is useful for RCU based
  157. * lockfree traversal.
  158. *
  159. * In particular, it means that we can not poison the forward
  160. * pointers that may still be used for walking the list.
  161. *
  162. * The caller must take whatever precautions are necessary
  163. * (such as holding appropriate locks) to avoid racing
  164. * with another list-mutation primitive, such as list_del_rcu()
  165. * or list_add_rcu(), running on this same list.
  166. * However, it is perfectly legal to run concurrently with
  167. * the _rcu list-traversal primitives, such as
  168. * list_for_each_entry_rcu().
  169. *
  170. * Note that the caller is not permitted to immediately free
  171. * the newly deleted entry. Instead, either synchronize_rcu()
  172. * or call_rcu() must be used to defer freeing until an RCU
  173. * grace period has elapsed.
  174. */
  175. static inline void list_del_rcu(struct list_head *entry)
  176. {
  177. __list_del(entry->prev, entry->next);
  178. entry->prev = LIST_POISON2;
  179. }
  180. /*
  181. * list_replace_rcu - replace old entry by new one
  182. * @old : the element to be replaced
  183. * @new : the new element to insert
  184. *
  185. * The old entry will be replaced with the new entry atomically.
  186. */
  187. static inline void list_replace_rcu(struct list_head *old,
  188. struct list_head *new)
  189. {
  190. new->next = old->next;
  191. new->prev = old->prev;
  192. smp_wmb();
  193. new->next->prev = new;
  194. new->prev->next = new;
  195. old->prev = LIST_POISON2;
  196. }
  197. /**
  198. * list_del_init - deletes entry from list and reinitialize it.
  199. * @entry: the element to delete from the list.
  200. */
  201. static inline void list_del_init(struct list_head *entry)
  202. {
  203. __list_del(entry->prev, entry->next);
  204. INIT_LIST_HEAD(entry);
  205. }
  206. /**
  207. * list_move - delete from one list and add as another's head
  208. * @list: the entry to move
  209. * @head: the head that will precede our entry
  210. */
  211. static inline void list_move(struct list_head *list, struct list_head *head)
  212. {
  213. __list_del(list->prev, list->next);
  214. list_add(list, head);
  215. }
  216. /**
  217. * list_move_tail - delete from one list and add as another's tail
  218. * @list: the entry to move
  219. * @head: the head that will follow our entry
  220. */
  221. static inline void list_move_tail(struct list_head *list,
  222. struct list_head *head)
  223. {
  224. __list_del(list->prev, list->next);
  225. list_add_tail(list, head);
  226. }
  227. /**
  228. * list_empty - tests whether a list is empty
  229. * @head: the list to test.
  230. */
  231. static inline int list_empty(const struct list_head *head)
  232. {
  233. return head->next == head;
  234. }
  235. /**
  236. * list_empty_careful - tests whether a list is
  237. * empty _and_ checks that no other CPU might be
  238. * in the process of still modifying either member
  239. *
  240. * NOTE: using list_empty_careful() without synchronization
  241. * can only be safe if the only activity that can happen
  242. * to the list entry is list_del_init(). Eg. it cannot be used
  243. * if another CPU could re-list_add() it.
  244. *
  245. * @head: the list to test.
  246. */
  247. static inline int list_empty_careful(const struct list_head *head)
  248. {
  249. struct list_head *next = head->next;
  250. return (next == head) && (next == head->prev);
  251. }
  252. static inline void __list_splice(struct list_head *list,
  253. struct list_head *head)
  254. {
  255. struct list_head *first = list->next;
  256. struct list_head *last = list->prev;
  257. struct list_head *at = head->next;
  258. first->prev = head;
  259. head->next = first;
  260. last->next = at;
  261. at->prev = last;
  262. }
  263. /**
  264. * list_splice - join two lists
  265. * @list: the new list to add.
  266. * @head: the place to add it in the first list.
  267. */
  268. static inline void list_splice(struct list_head *list, struct list_head *head)
  269. {
  270. if (!list_empty(list))
  271. __list_splice(list, head);
  272. }
  273. /**
  274. * list_splice_init - join two lists and reinitialise the emptied list.
  275. * @list: the new list to add.
  276. * @head: the place to add it in the first list.
  277. *
  278. * The list at @list is reinitialised
  279. */
  280. static inline void list_splice_init(struct list_head *list,
  281. struct list_head *head)
  282. {
  283. if (!list_empty(list)) {
  284. __list_splice(list, head);
  285. INIT_LIST_HEAD(list);
  286. }
  287. }
  288. /**
  289. * list_entry - get the struct for this entry
  290. * @ptr: the &struct list_head pointer.
  291. * @type: the type of the struct this is embedded in.
  292. * @member: the name of the list_struct within the struct.
  293. */
  294. #define list_entry(ptr, type, member) \
  295. container_of(ptr, type, member)
  296. /**
  297. * list_for_each - iterate over a list
  298. * @pos: the &struct list_head to use as a loop counter.
  299. * @head: the head for your list.
  300. */
  301. #define list_for_each(pos, head) \
  302. for (pos = (head)->next; prefetch(pos->next), pos != (head); \
  303. pos = pos->next)
  304. /**
  305. * __list_for_each - iterate over a list
  306. * @pos: the &struct list_head to use as a loop counter.
  307. * @head: the head for your list.
  308. *
  309. * This variant differs from list_for_each() in that it's the
  310. * simplest possible list iteration code, no prefetching is done.
  311. * Use this for code that knows the list to be very short (empty
  312. * or 1 entry) most of the time.
  313. */
  314. #define __list_for_each(pos, head) \
  315. for (pos = (head)->next; pos != (head); pos = pos->next)
  316. /**
  317. * list_for_each_prev - iterate over a list backwards
  318. * @pos: the &struct list_head to use as a loop counter.
  319. * @head: the head for your list.
  320. */
  321. #define list_for_each_prev(pos, head) \
  322. for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \
  323. pos = pos->prev)
  324. /**
  325. * list_for_each_safe - iterate over a list safe against removal of list entry
  326. * @pos: the &struct list_head to use as a loop counter.
  327. * @n: another &struct list_head to use as temporary storage
  328. * @head: the head for your list.
  329. */
  330. #define list_for_each_safe(pos, n, head) \
  331. for (pos = (head)->next, n = pos->next; pos != (head); \
  332. pos = n, n = pos->next)
  333. /**
  334. * list_for_each_entry - iterate over list of given type
  335. * @pos: the type * to use as a loop counter.
  336. * @head: the head for your list.
  337. * @member: the name of the list_struct within the struct.
  338. */
  339. #define list_for_each_entry(pos, head, member) \
  340. for (pos = list_entry((head)->next, typeof(*pos), member); \
  341. prefetch(pos->member.next), &pos->member != (head); \
  342. pos = list_entry(pos->member.next, typeof(*pos), member))
  343. /**
  344. * list_for_each_entry_reverse - iterate backwards over list of given type.
  345. * @pos: the type * to use as a loop counter.
  346. * @head: the head for your list.
  347. * @member: the name of the list_struct within the struct.
  348. */
  349. #define list_for_each_entry_reverse(pos, head, member) \
  350. for (pos = list_entry((head)->prev, typeof(*pos), member); \
  351. prefetch(pos->member.prev), &pos->member != (head); \
  352. pos = list_entry(pos->member.prev, typeof(*pos), member))
  353. /**
  354. * list_prepare_entry - prepare a pos entry for use as a start point in
  355. * list_for_each_entry_continue
  356. * @pos: the type * to use as a start point
  357. * @head: the head of the list
  358. * @member: the name of the list_struct within the struct.
  359. */
  360. #define list_prepare_entry(pos, head, member) \
  361. ((pos) ? : list_entry(head, typeof(*pos), member))
  362. /**
  363. * list_for_each_entry_continue - iterate over list of given type
  364. * continuing after existing point
  365. * @pos: the type * to use as a loop counter.
  366. * @head: the head for your list.
  367. * @member: the name of the list_struct within the struct.
  368. */
  369. #define list_for_each_entry_continue(pos, head, member) \
  370. for (pos = list_entry(pos->member.next, typeof(*pos), member); \
  371. prefetch(pos->member.next), &pos->member != (head); \
  372. pos = list_entry(pos->member.next, typeof(*pos), member))
  373. /**
  374. * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
  375. * @pos: the type * to use as a loop counter.
  376. * @n: another type * to use as temporary storage
  377. * @head: the head for your list.
  378. * @member: the name of the list_struct within the struct.
  379. */
  380. #define list_for_each_entry_safe(pos, n, head, member) \
  381. for (pos = list_entry((head)->next, typeof(*pos), member), \
  382. n = list_entry(pos->member.next, typeof(*pos), member); \
  383. &pos->member != (head); \
  384. pos = n, n = list_entry(n->member.next, typeof(*n), member))
  385. /**
  386. * list_for_each_entry_safe_continue - iterate over list of given type
  387. * continuing after existing point safe against removal of list entry
  388. * @pos: the type * to use as a loop counter.
  389. * @n: another type * to use as temporary storage
  390. * @head: the head for your list.
  391. * @member: the name of the list_struct within the struct.
  392. */
  393. #define list_for_each_entry_safe_continue(pos, n, head, member) \
  394. for (pos = list_entry(pos->member.next, typeof(*pos), member), \
  395. n = list_entry(pos->member.next, typeof(*pos), member); \
  396. &pos->member != (head); \
  397. pos = n, n = list_entry(n->member.next, typeof(*n), member))
  398. /**
  399. * list_for_each_rcu - iterate over an rcu-protected list
  400. * @pos: the &struct list_head to use as a loop counter.
  401. * @head: the head for your list.
  402. *
  403. * This list-traversal primitive may safely run concurrently with
  404. * the _rcu list-mutation primitives such as list_add_rcu()
  405. * as long as the traversal is guarded by rcu_read_lock().
  406. */
  407. #define list_for_each_rcu(pos, head) \
  408. for (pos = (head)->next; \
  409. prefetch(rcu_dereference(pos)->next), pos != (head); \
  410. pos = pos->next)
  411. #define __list_for_each_rcu(pos, head) \
  412. for (pos = (head)->next; \
  413. rcu_dereference(pos) != (head); \
  414. pos = pos->next)
  415. /**
  416. * list_for_each_safe_rcu - iterate over an rcu-protected list safe
  417. * against removal of list entry
  418. * @pos: the &struct list_head to use as a loop counter.
  419. * @n: another &struct list_head to use as temporary storage
  420. * @head: the head for your list.
  421. *
  422. * This list-traversal primitive may safely run concurrently with
  423. * the _rcu list-mutation primitives such as list_add_rcu()
  424. * as long as the traversal is guarded by rcu_read_lock().
  425. */
  426. #define list_for_each_safe_rcu(pos, n, head) \
  427. for (pos = (head)->next; \
  428. n = rcu_dereference(pos)->next, pos != (head); \
  429. pos = n)
  430. /**
  431. * list_for_each_entry_rcu - iterate over rcu list of given type
  432. * @pos: the type * to use as a loop counter.
  433. * @head: the head for your list.
  434. * @member: the name of the list_struct within the struct.
  435. *
  436. * This list-traversal primitive may safely run concurrently with
  437. * the _rcu list-mutation primitives such as list_add_rcu()
  438. * as long as the traversal is guarded by rcu_read_lock().
  439. */
  440. #define list_for_each_entry_rcu(pos, head, member) \
  441. for (pos = list_entry((head)->next, typeof(*pos), member); \
  442. prefetch(rcu_dereference(pos)->member.next), \
  443. &pos->member != (head); \
  444. pos = list_entry(pos->member.next, typeof(*pos), member))
  445. /**
  446. * list_for_each_continue_rcu - iterate over an rcu-protected list
  447. * continuing after existing point.
  448. * @pos: the &struct list_head to use as a loop counter.
  449. * @head: the head for your list.
  450. *
  451. * This list-traversal primitive may safely run concurrently with
  452. * the _rcu list-mutation primitives such as list_add_rcu()
  453. * as long as the traversal is guarded by rcu_read_lock().
  454. */
  455. #define list_for_each_continue_rcu(pos, head) \
  456. for ((pos) = (pos)->next; \
  457. prefetch(rcu_dereference((pos))->next), (pos) != (head); \
  458. (pos) = (pos)->next)
  459. /*
  460. * Double linked lists with a single pointer list head.
  461. * Mostly useful for hash tables where the two pointer list head is
  462. * too wasteful.
  463. * You lose the ability to access the tail in O(1).
  464. */
  465. struct hlist_head {
  466. struct hlist_node *first;
  467. };
  468. struct hlist_node {
  469. struct hlist_node *next, **pprev;
  470. };
  471. #define HLIST_HEAD_INIT { .first = NULL }
  472. #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
  473. #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
  474. #define INIT_HLIST_NODE(ptr) ((ptr)->next = NULL, (ptr)->pprev = NULL)
  475. static inline int hlist_unhashed(const struct hlist_node *h)
  476. {
  477. return !h->pprev;
  478. }
  479. static inline int hlist_empty(const struct hlist_head *h)
  480. {
  481. return !h->first;
  482. }
  483. static inline void __hlist_del(struct hlist_node *n)
  484. {
  485. struct hlist_node *next = n->next;
  486. struct hlist_node **pprev = n->pprev;
  487. *pprev = next;
  488. if (next)
  489. next->pprev = pprev;
  490. }
  491. static inline void hlist_del(struct hlist_node *n)
  492. {
  493. __hlist_del(n);
  494. n->next = LIST_POISON1;
  495. n->pprev = LIST_POISON2;
  496. }
  497. /**
  498. * hlist_del_rcu - deletes entry from hash list without re-initialization
  499. * @n: the element to delete from the hash list.
  500. *
  501. * Note: list_unhashed() on entry does not return true after this,
  502. * the entry is in an undefined state. It is useful for RCU based
  503. * lockfree traversal.
  504. *
  505. * In particular, it means that we can not poison the forward
  506. * pointers that may still be used for walking the hash list.
  507. *
  508. * The caller must take whatever precautions are necessary
  509. * (such as holding appropriate locks) to avoid racing
  510. * with another list-mutation primitive, such as hlist_add_head_rcu()
  511. * or hlist_del_rcu(), running on this same list.
  512. * However, it is perfectly legal to run concurrently with
  513. * the _rcu list-traversal primitives, such as
  514. * hlist_for_each_entry().
  515. */
  516. static inline void hlist_del_rcu(struct hlist_node *n)
  517. {
  518. __hlist_del(n);
  519. n->pprev = LIST_POISON2;
  520. }
  521. static inline void hlist_del_init(struct hlist_node *n)
  522. {
  523. if (n->pprev) {
  524. __hlist_del(n);
  525. INIT_HLIST_NODE(n);
  526. }
  527. }
  528. /*
  529. * hlist_replace_rcu - replace old entry by new one
  530. * @old : the element to be replaced
  531. * @new : the new element to insert
  532. *
  533. * The old entry will be replaced with the new entry atomically.
  534. */
  535. static inline void hlist_replace_rcu(struct hlist_node *old,
  536. struct hlist_node *new)
  537. {
  538. struct hlist_node *next = old->next;
  539. new->next = next;
  540. new->pprev = old->pprev;
  541. smp_wmb();
  542. if (next)
  543. new->next->pprev = &new->next;
  544. *new->pprev = new;
  545. old->pprev = LIST_POISON2;
  546. }
  547. static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
  548. {
  549. struct hlist_node *first = h->first;
  550. n->next = first;
  551. if (first)
  552. first->pprev = &n->next;
  553. h->first = n;
  554. n->pprev = &h->first;
  555. }
  556. /**
  557. * hlist_add_head_rcu - adds the specified element to the specified hlist,
  558. * while permitting racing traversals.
  559. * @n: the element to add to the hash list.
  560. * @h: the list to add to.
  561. *
  562. * The caller must take whatever precautions are necessary
  563. * (such as holding appropriate locks) to avoid racing
  564. * with another list-mutation primitive, such as hlist_add_head_rcu()
  565. * or hlist_del_rcu(), running on this same list.
  566. * However, it is perfectly legal to run concurrently with
  567. * the _rcu list-traversal primitives, such as
  568. * hlist_for_each_entry_rcu(), used to prevent memory-consistency
  569. * problems on Alpha CPUs. Regardless of the type of CPU, the
  570. * list-traversal primitive must be guarded by rcu_read_lock().
  571. */
  572. static inline void hlist_add_head_rcu(struct hlist_node *n,
  573. struct hlist_head *h)
  574. {
  575. struct hlist_node *first = h->first;
  576. n->next = first;
  577. n->pprev = &h->first;
  578. smp_wmb();
  579. if (first)
  580. first->pprev = &n->next;
  581. h->first = n;
  582. }
  583. /* next must be != NULL */
  584. static inline void hlist_add_before(struct hlist_node *n,
  585. struct hlist_node *next)
  586. {
  587. n->pprev = next->pprev;
  588. n->next = next;
  589. next->pprev = &n->next;
  590. *(n->pprev) = n;
  591. }
  592. static inline void hlist_add_after(struct hlist_node *n,
  593. struct hlist_node *next)
  594. {
  595. next->next = n->next;
  596. n->next = next;
  597. next->pprev = &n->next;
  598. if(next->next)
  599. next->next->pprev = &next->next;
  600. }
  601. /**
  602. * hlist_add_before_rcu - adds the specified element to the specified hlist
  603. * before the specified node while permitting racing traversals.
  604. * @n: the new element to add to the hash list.
  605. * @next: the existing element to add the new element before.
  606. *
  607. * The caller must take whatever precautions are necessary
  608. * (such as holding appropriate locks) to avoid racing
  609. * with another list-mutation primitive, such as hlist_add_head_rcu()
  610. * or hlist_del_rcu(), running on this same list.
  611. * However, it is perfectly legal to run concurrently with
  612. * the _rcu list-traversal primitives, such as
  613. * hlist_for_each_entry_rcu(), used to prevent memory-consistency
  614. * problems on Alpha CPUs.
  615. */
  616. static inline void hlist_add_before_rcu(struct hlist_node *n,
  617. struct hlist_node *next)
  618. {
  619. n->pprev = next->pprev;
  620. n->next = next;
  621. smp_wmb();
  622. next->pprev = &n->next;
  623. *(n->pprev) = n;
  624. }
  625. /**
  626. * hlist_add_after_rcu - adds the specified element to the specified hlist
  627. * after the specified node while permitting racing traversals.
  628. * @prev: the existing element to add the new element after.
  629. * @n: the new element to add to the hash list.
  630. *
  631. * The caller must take whatever precautions are necessary
  632. * (such as holding appropriate locks) to avoid racing
  633. * with another list-mutation primitive, such as hlist_add_head_rcu()
  634. * or hlist_del_rcu(), running on this same list.
  635. * However, it is perfectly legal to run concurrently with
  636. * the _rcu list-traversal primitives, such as
  637. * hlist_for_each_entry_rcu(), used to prevent memory-consistency
  638. * problems on Alpha CPUs.
  639. */
  640. static inline void hlist_add_after_rcu(struct hlist_node *prev,
  641. struct hlist_node *n)
  642. {
  643. n->next = prev->next;
  644. n->pprev = &prev->next;
  645. smp_wmb();
  646. prev->next = n;
  647. if (n->next)
  648. n->next->pprev = &n->next;
  649. }
  650. #define hlist_entry(ptr, type, member) container_of(ptr,type,member)
  651. #define hlist_for_each(pos, head) \
  652. for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
  653. pos = pos->next)
  654. #define hlist_for_each_safe(pos, n, head) \
  655. for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
  656. pos = n)
  657. /**
  658. * hlist_for_each_entry - iterate over list of given type
  659. * @tpos: the type * to use as a loop counter.
  660. * @pos: the &struct hlist_node to use as a loop counter.
  661. * @head: the head for your list.
  662. * @member: the name of the hlist_node within the struct.
  663. */
  664. #define hlist_for_each_entry(tpos, pos, head, member) \
  665. for (pos = (head)->first; \
  666. pos && ({ prefetch(pos->next); 1;}) && \
  667. ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
  668. pos = pos->next)
  669. /**
  670. * hlist_for_each_entry_continue - iterate over a hlist continuing after existing point
  671. * @tpos: the type * to use as a loop counter.
  672. * @pos: the &struct hlist_node to use as a loop counter.
  673. * @member: the name of the hlist_node within the struct.
  674. */
  675. #define hlist_for_each_entry_continue(tpos, pos, member) \
  676. for (pos = (pos)->next; \
  677. pos && ({ prefetch(pos->next); 1;}) && \
  678. ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
  679. pos = pos->next)
  680. /**
  681. * hlist_for_each_entry_from - iterate over a hlist continuing from existing point
  682. * @tpos: the type * to use as a loop counter.
  683. * @pos: the &struct hlist_node to use as a loop counter.
  684. * @member: the name of the hlist_node within the struct.
  685. */
  686. #define hlist_for_each_entry_from(tpos, pos, member) \
  687. for (; pos && ({ prefetch(pos->next); 1;}) && \
  688. ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
  689. pos = pos->next)
  690. /**
  691. * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
  692. * @tpos: the type * to use as a loop counter.
  693. * @pos: the &struct hlist_node to use as a loop counter.
  694. * @n: another &struct hlist_node to use as temporary storage
  695. * @head: the head for your list.
  696. * @member: the name of the hlist_node within the struct.
  697. */
  698. #define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
  699. for (pos = (head)->first; \
  700. pos && ({ n = pos->next; 1; }) && \
  701. ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
  702. pos = n)
  703. /**
  704. * hlist_for_each_entry_rcu - iterate over rcu list of given type
  705. * @tpos: the type * to use as a loop counter.
  706. * @pos: the &struct hlist_node to use as a loop counter.
  707. * @head: the head for your list.
  708. * @member: the name of the hlist_node within the struct.
  709. *
  710. * This list-traversal primitive may safely run concurrently with
  711. * the _rcu list-mutation primitives such as hlist_add_head_rcu()
  712. * as long as the traversal is guarded by rcu_read_lock().
  713. */
  714. #define hlist_for_each_entry_rcu(tpos, pos, head, member) \
  715. for (pos = (head)->first; \
  716. rcu_dereference(pos) && ({ prefetch(pos->next); 1;}) && \
  717. ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
  718. pos = pos->next)
  719. #else
  720. #warning "don't include kernel headers in userspace"
  721. #endif /* __KERNEL__ */
  722. #endif