list.h 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840
  1. #ifndef _LINUX_LIST_H
  2. #define _LINUX_LIST_H
  3. #ifdef __KERNEL__
  4. #include <linux/stddef.h>
  5. #include <linux/prefetch.h>
  6. #include <asm/system.h>
  7. /*
  8. * These are non-NULL pointers that will result in page faults
  9. * under normal circumstances, used to verify that nobody uses
  10. * non-initialized list entries.
  11. */
  12. #define LIST_POISON1 ((void *) 0x00100100)
  13. #define LIST_POISON2 ((void *) 0x00200200)
  14. /*
  15. * Simple doubly linked list implementation.
  16. *
  17. * Some of the internal functions ("__xxx") are useful when
  18. * manipulating whole lists rather than single entries, as
  19. * sometimes we already know the next/prev entries and we can
  20. * generate better code by using them directly rather than
  21. * using the generic single-entry routines.
  22. */
  23. struct list_head {
  24. struct list_head *next, *prev;
  25. };
  26. #define LIST_HEAD_INIT(name) { &(name), &(name) }
  27. #define LIST_HEAD(name) \
  28. struct list_head name = LIST_HEAD_INIT(name)
  29. static inline void INIT_LIST_HEAD(struct list_head *list)
  30. {
  31. list->next = list;
  32. list->prev = list;
  33. }
  34. /*
  35. * Insert a new entry between two known consecutive entries.
  36. *
  37. * This is only for internal list manipulation where we know
  38. * the prev/next entries already!
  39. */
  40. static inline void __list_add(struct list_head *new,
  41. struct list_head *prev,
  42. struct list_head *next)
  43. {
  44. next->prev = new;
  45. new->next = next;
  46. new->prev = prev;
  47. prev->next = new;
  48. }
  49. /**
  50. * list_add - add a new entry
  51. * @new: new entry to be added
  52. * @head: list head to add it after
  53. *
  54. * Insert a new entry after the specified head.
  55. * This is good for implementing stacks.
  56. */
  57. static inline void list_add(struct list_head *new, struct list_head *head)
  58. {
  59. __list_add(new, head, head->next);
  60. }
  61. /**
  62. * list_add_tail - add a new entry
  63. * @new: new entry to be added
  64. * @head: list head to add it before
  65. *
  66. * Insert a new entry before the specified head.
  67. * This is useful for implementing queues.
  68. */
  69. static inline void list_add_tail(struct list_head *new, struct list_head *head)
  70. {
  71. __list_add(new, head->prev, head);
  72. }
  73. /*
  74. * Insert a new entry between two known consecutive entries.
  75. *
  76. * This is only for internal list manipulation where we know
  77. * the prev/next entries already!
  78. */
  79. static inline void __list_add_rcu(struct list_head * new,
  80. struct list_head * prev, struct list_head * next)
  81. {
  82. new->next = next;
  83. new->prev = prev;
  84. smp_wmb();
  85. next->prev = new;
  86. prev->next = new;
  87. }
  88. /**
  89. * list_add_rcu - add a new entry to rcu-protected list
  90. * @new: new entry to be added
  91. * @head: list head to add it after
  92. *
  93. * Insert a new entry after the specified head.
  94. * This is good for implementing stacks.
  95. *
  96. * The caller must take whatever precautions are necessary
  97. * (such as holding appropriate locks) to avoid racing
  98. * with another list-mutation primitive, such as list_add_rcu()
  99. * or list_del_rcu(), running on this same list.
  100. * However, it is perfectly legal to run concurrently with
  101. * the _rcu list-traversal primitives, such as
  102. * list_for_each_entry_rcu().
  103. */
  104. static inline void list_add_rcu(struct list_head *new, struct list_head *head)
  105. {
  106. __list_add_rcu(new, head, head->next);
  107. }
  108. /**
  109. * list_add_tail_rcu - add a new entry to rcu-protected list
  110. * @new: new entry to be added
  111. * @head: list head to add it before
  112. *
  113. * Insert a new entry before the specified head.
  114. * This is useful for implementing queues.
  115. *
  116. * The caller must take whatever precautions are necessary
  117. * (such as holding appropriate locks) to avoid racing
  118. * with another list-mutation primitive, such as list_add_tail_rcu()
  119. * or list_del_rcu(), running on this same list.
  120. * However, it is perfectly legal to run concurrently with
  121. * the _rcu list-traversal primitives, such as
  122. * list_for_each_entry_rcu().
  123. */
  124. static inline void list_add_tail_rcu(struct list_head *new,
  125. struct list_head *head)
  126. {
  127. __list_add_rcu(new, head->prev, head);
  128. }
  129. /*
  130. * Delete a list entry by making the prev/next entries
  131. * point to each other.
  132. *
  133. * This is only for internal list manipulation where we know
  134. * the prev/next entries already!
  135. */
  136. static inline void __list_del(struct list_head * prev, struct list_head * next)
  137. {
  138. next->prev = prev;
  139. prev->next = next;
  140. }
  141. /**
  142. * list_del - deletes entry from list.
  143. * @entry: the element to delete from the list.
  144. * Note: list_empty on entry does not return true after this, the entry is
  145. * in an undefined state.
  146. */
  147. static inline void list_del(struct list_head *entry)
  148. {
  149. __list_del(entry->prev, entry->next);
  150. entry->next = LIST_POISON1;
  151. entry->prev = LIST_POISON2;
  152. }
  153. /**
  154. * list_del_rcu - deletes entry from list without re-initialization
  155. * @entry: the element to delete from the list.
  156. *
  157. * Note: list_empty on entry does not return true after this,
  158. * the entry is in an undefined state. It is useful for RCU based
  159. * lockfree traversal.
  160. *
  161. * In particular, it means that we can not poison the forward
  162. * pointers that may still be used for walking the list.
  163. *
  164. * The caller must take whatever precautions are necessary
  165. * (such as holding appropriate locks) to avoid racing
  166. * with another list-mutation primitive, such as list_del_rcu()
  167. * or list_add_rcu(), running on this same list.
  168. * However, it is perfectly legal to run concurrently with
  169. * the _rcu list-traversal primitives, such as
  170. * list_for_each_entry_rcu().
  171. *
  172. * Note that the caller is not permitted to immediately free
  173. * the newly deleted entry. Instead, either synchronize_rcu()
  174. * or call_rcu() must be used to defer freeing until an RCU
  175. * grace period has elapsed.
  176. */
  177. static inline void list_del_rcu(struct list_head *entry)
  178. {
  179. __list_del(entry->prev, entry->next);
  180. entry->prev = LIST_POISON2;
  181. }
  182. /*
  183. * list_replace_rcu - replace old entry by new one
  184. * @old : the element to be replaced
  185. * @new : the new element to insert
  186. *
  187. * The old entry will be replaced with the new entry atomically.
  188. */
  189. static inline void list_replace_rcu(struct list_head *old,
  190. struct list_head *new)
  191. {
  192. new->next = old->next;
  193. new->prev = old->prev;
  194. smp_wmb();
  195. new->next->prev = new;
  196. new->prev->next = new;
  197. old->prev = LIST_POISON2;
  198. }
  199. /**
  200. * list_del_init - deletes entry from list and reinitialize it.
  201. * @entry: the element to delete from the list.
  202. */
  203. static inline void list_del_init(struct list_head *entry)
  204. {
  205. __list_del(entry->prev, entry->next);
  206. INIT_LIST_HEAD(entry);
  207. }
  208. /**
  209. * list_move - delete from one list and add as another's head
  210. * @list: the entry to move
  211. * @head: the head that will precede our entry
  212. */
  213. static inline void list_move(struct list_head *list, struct list_head *head)
  214. {
  215. __list_del(list->prev, list->next);
  216. list_add(list, head);
  217. }
  218. /**
  219. * list_move_tail - delete from one list and add as another's tail
  220. * @list: the entry to move
  221. * @head: the head that will follow our entry
  222. */
  223. static inline void list_move_tail(struct list_head *list,
  224. struct list_head *head)
  225. {
  226. __list_del(list->prev, list->next);
  227. list_add_tail(list, head);
  228. }
  229. /**
  230. * list_empty - tests whether a list is empty
  231. * @head: the list to test.
  232. */
  233. static inline int list_empty(const struct list_head *head)
  234. {
  235. return head->next == head;
  236. }
  237. /**
  238. * list_empty_careful - tests whether a list is
  239. * empty _and_ checks that no other CPU might be
  240. * in the process of still modifying either member
  241. *
  242. * NOTE: using list_empty_careful() without synchronization
  243. * can only be safe if the only activity that can happen
  244. * to the list entry is list_del_init(). Eg. it cannot be used
  245. * if another CPU could re-list_add() it.
  246. *
  247. * @head: the list to test.
  248. */
  249. static inline int list_empty_careful(const struct list_head *head)
  250. {
  251. struct list_head *next = head->next;
  252. return (next == head) && (next == head->prev);
  253. }
  254. static inline void __list_splice(struct list_head *list,
  255. struct list_head *head)
  256. {
  257. struct list_head *first = list->next;
  258. struct list_head *last = list->prev;
  259. struct list_head *at = head->next;
  260. first->prev = head;
  261. head->next = first;
  262. last->next = at;
  263. at->prev = last;
  264. }
  265. /**
  266. * list_splice - join two lists
  267. * @list: the new list to add.
  268. * @head: the place to add it in the first list.
  269. */
  270. static inline void list_splice(struct list_head *list, struct list_head *head)
  271. {
  272. if (!list_empty(list))
  273. __list_splice(list, head);
  274. }
  275. /**
  276. * list_splice_init - join two lists and reinitialise the emptied list.
  277. * @list: the new list to add.
  278. * @head: the place to add it in the first list.
  279. *
  280. * The list at @list is reinitialised
  281. */
  282. static inline void list_splice_init(struct list_head *list,
  283. struct list_head *head)
  284. {
  285. if (!list_empty(list)) {
  286. __list_splice(list, head);
  287. INIT_LIST_HEAD(list);
  288. }
  289. }
  290. /**
  291. * list_entry - get the struct for this entry
  292. * @ptr: the &struct list_head pointer.
  293. * @type: the type of the struct this is embedded in.
  294. * @member: the name of the list_struct within the struct.
  295. */
  296. #define list_entry(ptr, type, member) \
  297. container_of(ptr, type, member)
  298. /**
  299. * list_for_each - iterate over a list
  300. * @pos: the &struct list_head to use as a loop counter.
  301. * @head: the head for your list.
  302. */
  303. #define list_for_each(pos, head) \
  304. for (pos = (head)->next; prefetch(pos->next), pos != (head); \
  305. pos = pos->next)
  306. /**
  307. * __list_for_each - iterate over a list
  308. * @pos: the &struct list_head to use as a loop counter.
  309. * @head: the head for your list.
  310. *
  311. * This variant differs from list_for_each() in that it's the
  312. * simplest possible list iteration code, no prefetching is done.
  313. * Use this for code that knows the list to be very short (empty
  314. * or 1 entry) most of the time.
  315. */
  316. #define __list_for_each(pos, head) \
  317. for (pos = (head)->next; pos != (head); pos = pos->next)
  318. /**
  319. * list_for_each_prev - iterate over a list backwards
  320. * @pos: the &struct list_head to use as a loop counter.
  321. * @head: the head for your list.
  322. */
  323. #define list_for_each_prev(pos, head) \
  324. for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \
  325. pos = pos->prev)
  326. /**
  327. * list_for_each_safe - iterate over a list safe against removal of list entry
  328. * @pos: the &struct list_head to use as a loop counter.
  329. * @n: another &struct list_head to use as temporary storage
  330. * @head: the head for your list.
  331. */
  332. #define list_for_each_safe(pos, n, head) \
  333. for (pos = (head)->next, n = pos->next; pos != (head); \
  334. pos = n, n = pos->next)
  335. /**
  336. * list_for_each_entry - iterate over list of given type
  337. * @pos: the type * to use as a loop counter.
  338. * @head: the head for your list.
  339. * @member: the name of the list_struct within the struct.
  340. */
  341. #define list_for_each_entry(pos, head, member) \
  342. for (pos = list_entry((head)->next, typeof(*pos), member); \
  343. prefetch(pos->member.next), &pos->member != (head); \
  344. pos = list_entry(pos->member.next, typeof(*pos), member))
  345. /**
  346. * list_for_each_entry_reverse - iterate backwards over list of given type.
  347. * @pos: the type * to use as a loop counter.
  348. * @head: the head for your list.
  349. * @member: the name of the list_struct within the struct.
  350. */
  351. #define list_for_each_entry_reverse(pos, head, member) \
  352. for (pos = list_entry((head)->prev, typeof(*pos), member); \
  353. prefetch(pos->member.prev), &pos->member != (head); \
  354. pos = list_entry(pos->member.prev, typeof(*pos), member))
  355. /**
  356. * list_prepare_entry - prepare a pos entry for use as a start point in
  357. * list_for_each_entry_continue
  358. * @pos: the type * to use as a start point
  359. * @head: the head of the list
  360. * @member: the name of the list_struct within the struct.
  361. */
  362. #define list_prepare_entry(pos, head, member) \
  363. ((pos) ? : list_entry(head, typeof(*pos), member))
  364. /**
  365. * list_for_each_entry_continue - iterate over list of given type
  366. * continuing after existing point
  367. * @pos: the type * to use as a loop counter.
  368. * @head: the head for your list.
  369. * @member: the name of the list_struct within the struct.
  370. */
  371. #define list_for_each_entry_continue(pos, head, member) \
  372. for (pos = list_entry(pos->member.next, typeof(*pos), member); \
  373. prefetch(pos->member.next), &pos->member != (head); \
  374. pos = list_entry(pos->member.next, typeof(*pos), member))
  375. /**
  376. * list_for_each_entry_from - iterate over list of given type
  377. * continuing from existing point
  378. * @pos: the type * to use as a loop counter.
  379. * @head: the head for your list.
  380. * @member: the name of the list_struct within the struct.
  381. */
  382. #define list_for_each_entry_from(pos, head, member) \
  383. for (; prefetch(pos->member.next), &pos->member != (head); \
  384. pos = list_entry(pos->member.next, typeof(*pos), member))
  385. /**
  386. * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
  387. * @pos: the type * to use as a loop counter.
  388. * @n: another type * to use as temporary storage
  389. * @head: the head for your list.
  390. * @member: the name of the list_struct within the struct.
  391. */
  392. #define list_for_each_entry_safe(pos, n, head, member) \
  393. for (pos = list_entry((head)->next, typeof(*pos), member), \
  394. n = list_entry(pos->member.next, typeof(*pos), member); \
  395. &pos->member != (head); \
  396. pos = n, n = list_entry(n->member.next, typeof(*n), member))
  397. /**
  398. * list_for_each_entry_safe_continue - iterate over list of given type
  399. * continuing after existing point safe against removal of list entry
  400. * @pos: the type * to use as a loop counter.
  401. * @n: another type * to use as temporary storage
  402. * @head: the head for your list.
  403. * @member: the name of the list_struct within the struct.
  404. */
  405. #define list_for_each_entry_safe_continue(pos, n, head, member) \
  406. for (pos = list_entry(pos->member.next, typeof(*pos), member), \
  407. n = list_entry(pos->member.next, typeof(*pos), member); \
  408. &pos->member != (head); \
  409. pos = n, n = list_entry(n->member.next, typeof(*n), member))
  410. /**
  411. * list_for_each_entry_safe_from - iterate over list of given type
  412. * from existing point safe against removal of list entry
  413. * @pos: the type * to use as a loop counter.
  414. * @n: another type * to use as temporary storage
  415. * @head: the head for your list.
  416. * @member: the name of the list_struct within the struct.
  417. */
  418. #define list_for_each_entry_safe_from(pos, n, head, member) \
  419. for (n = list_entry(pos->member.next, typeof(*pos), member); \
  420. &pos->member != (head); \
  421. pos = n, n = list_entry(n->member.next, typeof(*n), member))
  422. /**
  423. * list_for_each_entry_safe_reverse - iterate backwards over list of given type safe against
  424. * removal of list entry
  425. * @pos: the type * to use as a loop counter.
  426. * @n: another type * to use as temporary storage
  427. * @head: the head for your list.
  428. * @member: the name of the list_struct within the struct.
  429. */
  430. #define list_for_each_entry_safe_reverse(pos, n, head, member) \
  431. for (pos = list_entry((head)->prev, typeof(*pos), member), \
  432. n = list_entry(pos->member.prev, typeof(*pos), member); \
  433. &pos->member != (head); \
  434. pos = n, n = list_entry(n->member.prev, typeof(*n), member))
  435. /**
  436. * list_for_each_rcu - iterate over an rcu-protected list
  437. * @pos: the &struct list_head to use as a loop counter.
  438. * @head: the head for your list.
  439. *
  440. * This list-traversal primitive may safely run concurrently with
  441. * the _rcu list-mutation primitives such as list_add_rcu()
  442. * as long as the traversal is guarded by rcu_read_lock().
  443. */
  444. #define list_for_each_rcu(pos, head) \
  445. for (pos = (head)->next; \
  446. prefetch(rcu_dereference(pos)->next), pos != (head); \
  447. pos = pos->next)
  448. #define __list_for_each_rcu(pos, head) \
  449. for (pos = (head)->next; \
  450. rcu_dereference(pos) != (head); \
  451. pos = pos->next)
  452. /**
  453. * list_for_each_safe_rcu - iterate over an rcu-protected list safe
  454. * against removal of list entry
  455. * @pos: the &struct list_head to use as a loop counter.
  456. * @n: another &struct list_head to use as temporary storage
  457. * @head: the head for your list.
  458. *
  459. * This list-traversal primitive may safely run concurrently with
  460. * the _rcu list-mutation primitives such as list_add_rcu()
  461. * as long as the traversal is guarded by rcu_read_lock().
  462. */
  463. #define list_for_each_safe_rcu(pos, n, head) \
  464. for (pos = (head)->next; \
  465. n = rcu_dereference(pos)->next, pos != (head); \
  466. pos = n)
  467. /**
  468. * list_for_each_entry_rcu - iterate over rcu list of given type
  469. * @pos: the type * to use as a loop counter.
  470. * @head: the head for your list.
  471. * @member: the name of the list_struct within the struct.
  472. *
  473. * This list-traversal primitive may safely run concurrently with
  474. * the _rcu list-mutation primitives such as list_add_rcu()
  475. * as long as the traversal is guarded by rcu_read_lock().
  476. */
  477. #define list_for_each_entry_rcu(pos, head, member) \
  478. for (pos = list_entry((head)->next, typeof(*pos), member); \
  479. prefetch(rcu_dereference(pos)->member.next), \
  480. &pos->member != (head); \
  481. pos = list_entry(pos->member.next, typeof(*pos), member))
  482. /**
  483. * list_for_each_continue_rcu - iterate over an rcu-protected list
  484. * continuing after existing point.
  485. * @pos: the &struct list_head to use as a loop counter.
  486. * @head: the head for your list.
  487. *
  488. * This list-traversal primitive may safely run concurrently with
  489. * the _rcu list-mutation primitives such as list_add_rcu()
  490. * as long as the traversal is guarded by rcu_read_lock().
  491. */
  492. #define list_for_each_continue_rcu(pos, head) \
  493. for ((pos) = (pos)->next; \
  494. prefetch(rcu_dereference((pos))->next), (pos) != (head); \
  495. (pos) = (pos)->next)
  496. /*
  497. * Double linked lists with a single pointer list head.
  498. * Mostly useful for hash tables where the two pointer list head is
  499. * too wasteful.
  500. * You lose the ability to access the tail in O(1).
  501. */
  502. struct hlist_head {
  503. struct hlist_node *first;
  504. };
  505. struct hlist_node {
  506. struct hlist_node *next, **pprev;
  507. };
  508. #define HLIST_HEAD_INIT { .first = NULL }
  509. #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
  510. #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
  511. static inline void INIT_HLIST_NODE(struct hlist_node *h)
  512. {
  513. h->next = NULL;
  514. h->pprev = NULL;
  515. }
  516. static inline int hlist_unhashed(const struct hlist_node *h)
  517. {
  518. return !h->pprev;
  519. }
  520. static inline int hlist_empty(const struct hlist_head *h)
  521. {
  522. return !h->first;
  523. }
  524. static inline void __hlist_del(struct hlist_node *n)
  525. {
  526. struct hlist_node *next = n->next;
  527. struct hlist_node **pprev = n->pprev;
  528. *pprev = next;
  529. if (next)
  530. next->pprev = pprev;
  531. }
  532. static inline void hlist_del(struct hlist_node *n)
  533. {
  534. __hlist_del(n);
  535. n->next = LIST_POISON1;
  536. n->pprev = LIST_POISON2;
  537. }
  538. /**
  539. * hlist_del_rcu - deletes entry from hash list without re-initialization
  540. * @n: the element to delete from the hash list.
  541. *
  542. * Note: list_unhashed() on entry does not return true after this,
  543. * the entry is in an undefined state. It is useful for RCU based
  544. * lockfree traversal.
  545. *
  546. * In particular, it means that we can not poison the forward
  547. * pointers that may still be used for walking the hash list.
  548. *
  549. * The caller must take whatever precautions are necessary
  550. * (such as holding appropriate locks) to avoid racing
  551. * with another list-mutation primitive, such as hlist_add_head_rcu()
  552. * or hlist_del_rcu(), running on this same list.
  553. * However, it is perfectly legal to run concurrently with
  554. * the _rcu list-traversal primitives, such as
  555. * hlist_for_each_entry().
  556. */
  557. static inline void hlist_del_rcu(struct hlist_node *n)
  558. {
  559. __hlist_del(n);
  560. n->pprev = LIST_POISON2;
  561. }
  562. static inline void hlist_del_init(struct hlist_node *n)
  563. {
  564. if (n->pprev) {
  565. __hlist_del(n);
  566. INIT_HLIST_NODE(n);
  567. }
  568. }
  569. /*
  570. * hlist_replace_rcu - replace old entry by new one
  571. * @old : the element to be replaced
  572. * @new : the new element to insert
  573. *
  574. * The old entry will be replaced with the new entry atomically.
  575. */
  576. static inline void hlist_replace_rcu(struct hlist_node *old,
  577. struct hlist_node *new)
  578. {
  579. struct hlist_node *next = old->next;
  580. new->next = next;
  581. new->pprev = old->pprev;
  582. smp_wmb();
  583. if (next)
  584. new->next->pprev = &new->next;
  585. *new->pprev = new;
  586. old->pprev = LIST_POISON2;
  587. }
  588. static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
  589. {
  590. struct hlist_node *first = h->first;
  591. n->next = first;
  592. if (first)
  593. first->pprev = &n->next;
  594. h->first = n;
  595. n->pprev = &h->first;
  596. }
  597. /**
  598. * hlist_add_head_rcu - adds the specified element to the specified hlist,
  599. * while permitting racing traversals.
  600. * @n: the element to add to the hash list.
  601. * @h: the list to add to.
  602. *
  603. * The caller must take whatever precautions are necessary
  604. * (such as holding appropriate locks) to avoid racing
  605. * with another list-mutation primitive, such as hlist_add_head_rcu()
  606. * or hlist_del_rcu(), running on this same list.
  607. * However, it is perfectly legal to run concurrently with
  608. * the _rcu list-traversal primitives, such as
  609. * hlist_for_each_entry_rcu(), used to prevent memory-consistency
  610. * problems on Alpha CPUs. Regardless of the type of CPU, the
  611. * list-traversal primitive must be guarded by rcu_read_lock().
  612. */
  613. static inline void hlist_add_head_rcu(struct hlist_node *n,
  614. struct hlist_head *h)
  615. {
  616. struct hlist_node *first = h->first;
  617. n->next = first;
  618. n->pprev = &h->first;
  619. smp_wmb();
  620. if (first)
  621. first->pprev = &n->next;
  622. h->first = n;
  623. }
  624. /* next must be != NULL */
  625. static inline void hlist_add_before(struct hlist_node *n,
  626. struct hlist_node *next)
  627. {
  628. n->pprev = next->pprev;
  629. n->next = next;
  630. next->pprev = &n->next;
  631. *(n->pprev) = n;
  632. }
  633. static inline void hlist_add_after(struct hlist_node *n,
  634. struct hlist_node *next)
  635. {
  636. next->next = n->next;
  637. n->next = next;
  638. next->pprev = &n->next;
  639. if(next->next)
  640. next->next->pprev = &next->next;
  641. }
  642. /**
  643. * hlist_add_before_rcu - adds the specified element to the specified hlist
  644. * before the specified node while permitting racing traversals.
  645. * @n: the new element to add to the hash list.
  646. * @next: the existing element to add the new element before.
  647. *
  648. * The caller must take whatever precautions are necessary
  649. * (such as holding appropriate locks) to avoid racing
  650. * with another list-mutation primitive, such as hlist_add_head_rcu()
  651. * or hlist_del_rcu(), running on this same list.
  652. * However, it is perfectly legal to run concurrently with
  653. * the _rcu list-traversal primitives, such as
  654. * hlist_for_each_entry_rcu(), used to prevent memory-consistency
  655. * problems on Alpha CPUs.
  656. */
  657. static inline void hlist_add_before_rcu(struct hlist_node *n,
  658. struct hlist_node *next)
  659. {
  660. n->pprev = next->pprev;
  661. n->next = next;
  662. smp_wmb();
  663. next->pprev = &n->next;
  664. *(n->pprev) = n;
  665. }
  666. /**
  667. * hlist_add_after_rcu - adds the specified element to the specified hlist
  668. * after the specified node while permitting racing traversals.
  669. * @prev: the existing element to add the new element after.
  670. * @n: the new element to add to the hash list.
  671. *
  672. * The caller must take whatever precautions are necessary
  673. * (such as holding appropriate locks) to avoid racing
  674. * with another list-mutation primitive, such as hlist_add_head_rcu()
  675. * or hlist_del_rcu(), running on this same list.
  676. * However, it is perfectly legal to run concurrently with
  677. * the _rcu list-traversal primitives, such as
  678. * hlist_for_each_entry_rcu(), used to prevent memory-consistency
  679. * problems on Alpha CPUs.
  680. */
  681. static inline void hlist_add_after_rcu(struct hlist_node *prev,
  682. struct hlist_node *n)
  683. {
  684. n->next = prev->next;
  685. n->pprev = &prev->next;
  686. smp_wmb();
  687. prev->next = n;
  688. if (n->next)
  689. n->next->pprev = &n->next;
  690. }
  691. #define hlist_entry(ptr, type, member) container_of(ptr,type,member)
  692. #define hlist_for_each(pos, head) \
  693. for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
  694. pos = pos->next)
  695. #define hlist_for_each_safe(pos, n, head) \
  696. for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
  697. pos = n)
  698. /**
  699. * hlist_for_each_entry - iterate over list of given type
  700. * @tpos: the type * to use as a loop counter.
  701. * @pos: the &struct hlist_node to use as a loop counter.
  702. * @head: the head for your list.
  703. * @member: the name of the hlist_node within the struct.
  704. */
  705. #define hlist_for_each_entry(tpos, pos, head, member) \
  706. for (pos = (head)->first; \
  707. pos && ({ prefetch(pos->next); 1;}) && \
  708. ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
  709. pos = pos->next)
  710. /**
  711. * hlist_for_each_entry_continue - iterate over a hlist continuing after existing point
  712. * @tpos: the type * to use as a loop counter.
  713. * @pos: the &struct hlist_node to use as a loop counter.
  714. * @member: the name of the hlist_node within the struct.
  715. */
  716. #define hlist_for_each_entry_continue(tpos, pos, member) \
  717. for (pos = (pos)->next; \
  718. pos && ({ prefetch(pos->next); 1;}) && \
  719. ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
  720. pos = pos->next)
  721. /**
  722. * hlist_for_each_entry_from - iterate over a hlist continuing from existing point
  723. * @tpos: the type * to use as a loop counter.
  724. * @pos: the &struct hlist_node to use as a loop counter.
  725. * @member: the name of the hlist_node within the struct.
  726. */
  727. #define hlist_for_each_entry_from(tpos, pos, member) \
  728. for (; pos && ({ prefetch(pos->next); 1;}) && \
  729. ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
  730. pos = pos->next)
  731. /**
  732. * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
  733. * @tpos: the type * to use as a loop counter.
  734. * @pos: the &struct hlist_node to use as a loop counter.
  735. * @n: another &struct hlist_node to use as temporary storage
  736. * @head: the head for your list.
  737. * @member: the name of the hlist_node within the struct.
  738. */
  739. #define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
  740. for (pos = (head)->first; \
  741. pos && ({ n = pos->next; 1; }) && \
  742. ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
  743. pos = n)
  744. /**
  745. * hlist_for_each_entry_rcu - iterate over rcu list of given type
  746. * @tpos: the type * to use as a loop counter.
  747. * @pos: the &struct hlist_node to use as a loop counter.
  748. * @head: the head for your list.
  749. * @member: the name of the hlist_node within the struct.
  750. *
  751. * This list-traversal primitive may safely run concurrently with
  752. * the _rcu list-mutation primitives such as hlist_add_head_rcu()
  753. * as long as the traversal is guarded by rcu_read_lock().
  754. */
  755. #define hlist_for_each_entry_rcu(tpos, pos, head, member) \
  756. for (pos = (head)->first; \
  757. rcu_dereference(pos) && ({ prefetch(pos->next); 1;}) && \
  758. ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
  759. pos = pos->next)
  760. #else
  761. #warning "don't include kernel headers in userspace"
  762. #endif /* __KERNEL__ */
  763. #endif