list.h 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889
  1. #ifndef _LINUX_LIST_H
  2. #define _LINUX_LIST_H
  3. #ifdef __KERNEL__
  4. #include <linux/stddef.h>
  5. #include <linux/poison.h>
  6. #include <linux/prefetch.h>
  7. #include <asm/system.h>
  8. /*
  9. * Simple doubly linked list implementation.
  10. *
  11. * Some of the internal functions ("__xxx") are useful when
  12. * manipulating whole lists rather than single entries, as
  13. * sometimes we already know the next/prev entries and we can
  14. * generate better code by using them directly rather than
  15. * using the generic single-entry routines.
  16. */
  17. struct list_head {
  18. struct list_head *next, *prev;
  19. };
  20. #define LIST_HEAD_INIT(name) { &(name), &(name) }
  21. #define LIST_HEAD(name) \
  22. struct list_head name = LIST_HEAD_INIT(name)
  23. static inline void INIT_LIST_HEAD(struct list_head *list)
  24. {
  25. list->next = list;
  26. list->prev = list;
  27. }
  28. /*
  29. * Insert a new entry between two known consecutive entries.
  30. *
  31. * This is only for internal list manipulation where we know
  32. * the prev/next entries already!
  33. */
  34. static inline void __list_add(struct list_head *new,
  35. struct list_head *prev,
  36. struct list_head *next)
  37. {
  38. next->prev = new;
  39. new->next = next;
  40. new->prev = prev;
  41. prev->next = new;
  42. }
  43. /**
  44. * list_add - add a new entry
  45. * @new: new entry to be added
  46. * @head: list head to add it after
  47. *
  48. * Insert a new entry after the specified head.
  49. * This is good for implementing stacks.
  50. */
  51. static inline void list_add(struct list_head *new, struct list_head *head)
  52. {
  53. __list_add(new, head, head->next);
  54. }
  55. /**
  56. * list_add_tail - add a new entry
  57. * @new: new entry to be added
  58. * @head: list head to add it before
  59. *
  60. * Insert a new entry before the specified head.
  61. * This is useful for implementing queues.
  62. */
  63. static inline void list_add_tail(struct list_head *new, struct list_head *head)
  64. {
  65. __list_add(new, head->prev, head);
  66. }
  67. /*
  68. * Insert a new entry between two known consecutive entries.
  69. *
  70. * This is only for internal list manipulation where we know
  71. * the prev/next entries already!
  72. */
  73. static inline void __list_add_rcu(struct list_head * new,
  74. struct list_head * prev, struct list_head * next)
  75. {
  76. new->next = next;
  77. new->prev = prev;
  78. smp_wmb();
  79. next->prev = new;
  80. prev->next = new;
  81. }
  82. /**
  83. * list_add_rcu - add a new entry to rcu-protected list
  84. * @new: new entry to be added
  85. * @head: list head to add it after
  86. *
  87. * Insert a new entry after the specified head.
  88. * This is good for implementing stacks.
  89. *
  90. * The caller must take whatever precautions are necessary
  91. * (such as holding appropriate locks) to avoid racing
  92. * with another list-mutation primitive, such as list_add_rcu()
  93. * or list_del_rcu(), running on this same list.
  94. * However, it is perfectly legal to run concurrently with
  95. * the _rcu list-traversal primitives, such as
  96. * list_for_each_entry_rcu().
  97. */
  98. static inline void list_add_rcu(struct list_head *new, struct list_head *head)
  99. {
  100. __list_add_rcu(new, head, head->next);
  101. }
  102. /**
  103. * list_add_tail_rcu - add a new entry to rcu-protected list
  104. * @new: new entry to be added
  105. * @head: list head to add it before
  106. *
  107. * Insert a new entry before the specified head.
  108. * This is useful for implementing queues.
  109. *
  110. * The caller must take whatever precautions are necessary
  111. * (such as holding appropriate locks) to avoid racing
  112. * with another list-mutation primitive, such as list_add_tail_rcu()
  113. * or list_del_rcu(), running on this same list.
  114. * However, it is perfectly legal to run concurrently with
  115. * the _rcu list-traversal primitives, such as
  116. * list_for_each_entry_rcu().
  117. */
  118. static inline void list_add_tail_rcu(struct list_head *new,
  119. struct list_head *head)
  120. {
  121. __list_add_rcu(new, head->prev, head);
  122. }
  123. /*
  124. * Delete a list entry by making the prev/next entries
  125. * point to each other.
  126. *
  127. * This is only for internal list manipulation where we know
  128. * the prev/next entries already!
  129. */
  130. static inline void __list_del(struct list_head * prev, struct list_head * next)
  131. {
  132. next->prev = prev;
  133. prev->next = next;
  134. }
  135. /**
  136. * list_del - deletes entry from list.
  137. * @entry: the element to delete from the list.
  138. * Note: list_empty on entry does not return true after this, the entry is
  139. * in an undefined state.
  140. */
  141. static inline void list_del(struct list_head *entry)
  142. {
  143. __list_del(entry->prev, entry->next);
  144. entry->next = LIST_POISON1;
  145. entry->prev = LIST_POISON2;
  146. }
  147. /**
  148. * list_del_rcu - deletes entry from list without re-initialization
  149. * @entry: the element to delete from the list.
  150. *
  151. * Note: list_empty on entry does not return true after this,
  152. * the entry is in an undefined state. It is useful for RCU based
  153. * lockfree traversal.
  154. *
  155. * In particular, it means that we can not poison the forward
  156. * pointers that may still be used for walking the list.
  157. *
  158. * The caller must take whatever precautions are necessary
  159. * (such as holding appropriate locks) to avoid racing
  160. * with another list-mutation primitive, such as list_del_rcu()
  161. * or list_add_rcu(), running on this same list.
  162. * However, it is perfectly legal to run concurrently with
  163. * the _rcu list-traversal primitives, such as
  164. * list_for_each_entry_rcu().
  165. *
  166. * Note that the caller is not permitted to immediately free
  167. * the newly deleted entry. Instead, either synchronize_rcu()
  168. * or call_rcu() must be used to defer freeing until an RCU
  169. * grace period has elapsed.
  170. */
  171. static inline void list_del_rcu(struct list_head *entry)
  172. {
  173. __list_del(entry->prev, entry->next);
  174. entry->prev = LIST_POISON2;
  175. }
  176. /**
  177. * list_replace - replace old entry by new one
  178. * @old : the element to be replaced
  179. * @new : the new element to insert
  180. * Note: if 'old' was empty, it will be overwritten.
  181. */
  182. static inline void list_replace(struct list_head *old,
  183. struct list_head *new)
  184. {
  185. new->next = old->next;
  186. new->next->prev = new;
  187. new->prev = old->prev;
  188. new->prev->next = new;
  189. }
  190. static inline void list_replace_init(struct list_head *old,
  191. struct list_head *new)
  192. {
  193. list_replace(old, new);
  194. INIT_LIST_HEAD(old);
  195. }
  196. /*
  197. * list_replace_rcu - replace old entry by new one
  198. * @old : the element to be replaced
  199. * @new : the new element to insert
  200. *
  201. * The old entry will be replaced with the new entry atomically.
  202. * Note: 'old' should not be empty.
  203. */
  204. static inline void list_replace_rcu(struct list_head *old,
  205. struct list_head *new)
  206. {
  207. new->next = old->next;
  208. new->prev = old->prev;
  209. smp_wmb();
  210. new->next->prev = new;
  211. new->prev->next = new;
  212. old->prev = LIST_POISON2;
  213. }
  214. /**
  215. * list_del_init - deletes entry from list and reinitialize it.
  216. * @entry: the element to delete from the list.
  217. */
  218. static inline void list_del_init(struct list_head *entry)
  219. {
  220. __list_del(entry->prev, entry->next);
  221. INIT_LIST_HEAD(entry);
  222. }
  223. /**
  224. * list_move - delete from one list and add as another's head
  225. * @list: the entry to move
  226. * @head: the head that will precede our entry
  227. */
  228. static inline void list_move(struct list_head *list, struct list_head *head)
  229. {
  230. __list_del(list->prev, list->next);
  231. list_add(list, head);
  232. }
  233. /**
  234. * list_move_tail - delete from one list and add as another's tail
  235. * @list: the entry to move
  236. * @head: the head that will follow our entry
  237. */
  238. static inline void list_move_tail(struct list_head *list,
  239. struct list_head *head)
  240. {
  241. __list_del(list->prev, list->next);
  242. list_add_tail(list, head);
  243. }
  244. /**
  245. * list_is_last - tests whether @list is the last entry in list @head
  246. * @list: the entry to test
  247. * @head: the head of the list
  248. */
  249. static inline int list_is_last(const struct list_head *list,
  250. const struct list_head *head)
  251. {
  252. return list->next == head;
  253. }
  254. /**
  255. * list_empty - tests whether a list is empty
  256. * @head: the list to test.
  257. */
  258. static inline int list_empty(const struct list_head *head)
  259. {
  260. return head->next == head;
  261. }
  262. /**
  263. * list_empty_careful - tests whether a list is empty and not being modified
  264. * @head: the list to test
  265. *
  266. * Description:
  267. * tests whether a list is empty _and_ checks that no other CPU might be
  268. * in the process of modifying either member (next or prev)
  269. *
  270. * NOTE: using list_empty_careful() without synchronization
  271. * can only be safe if the only activity that can happen
  272. * to the list entry is list_del_init(). Eg. it cannot be used
  273. * if another CPU could re-list_add() it.
  274. */
  275. static inline int list_empty_careful(const struct list_head *head)
  276. {
  277. struct list_head *next = head->next;
  278. return (next == head) && (next == head->prev);
  279. }
  280. static inline void __list_splice(struct list_head *list,
  281. struct list_head *head)
  282. {
  283. struct list_head *first = list->next;
  284. struct list_head *last = list->prev;
  285. struct list_head *at = head->next;
  286. first->prev = head;
  287. head->next = first;
  288. last->next = at;
  289. at->prev = last;
  290. }
  291. /**
  292. * list_splice - join two lists
  293. * @list: the new list to add.
  294. * @head: the place to add it in the first list.
  295. */
  296. static inline void list_splice(struct list_head *list, struct list_head *head)
  297. {
  298. if (!list_empty(list))
  299. __list_splice(list, head);
  300. }
  301. /**
  302. * list_splice_init - join two lists and reinitialise the emptied list.
  303. * @list: the new list to add.
  304. * @head: the place to add it in the first list.
  305. *
  306. * The list at @list is reinitialised
  307. */
  308. static inline void list_splice_init(struct list_head *list,
  309. struct list_head *head)
  310. {
  311. if (!list_empty(list)) {
  312. __list_splice(list, head);
  313. INIT_LIST_HEAD(list);
  314. }
  315. }
  316. /**
  317. * list_entry - get the struct for this entry
  318. * @ptr: the &struct list_head pointer.
  319. * @type: the type of the struct this is embedded in.
  320. * @member: the name of the list_struct within the struct.
  321. */
  322. #define list_entry(ptr, type, member) \
  323. container_of(ptr, type, member)
  324. /**
  325. * list_for_each - iterate over a list
  326. * @pos: the &struct list_head to use as a loop cursor.
  327. * @head: the head for your list.
  328. */
  329. #define list_for_each(pos, head) \
  330. for (pos = (head)->next; prefetch(pos->next), pos != (head); \
  331. pos = pos->next)
  332. /**
  333. * __list_for_each - iterate over a list
  334. * @pos: the &struct list_head to use as a loop cursor.
  335. * @head: the head for your list.
  336. *
  337. * This variant differs from list_for_each() in that it's the
  338. * simplest possible list iteration code, no prefetching is done.
  339. * Use this for code that knows the list to be very short (empty
  340. * or 1 entry) most of the time.
  341. */
  342. #define __list_for_each(pos, head) \
  343. for (pos = (head)->next; pos != (head); pos = pos->next)
  344. /**
  345. * list_for_each_prev - iterate over a list backwards
  346. * @pos: the &struct list_head to use as a loop cursor.
  347. * @head: the head for your list.
  348. */
  349. #define list_for_each_prev(pos, head) \
  350. for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \
  351. pos = pos->prev)
  352. /**
  353. * list_for_each_safe - iterate over a list safe against removal of list entry
  354. * @pos: the &struct list_head to use as a loop cursor.
  355. * @n: another &struct list_head to use as temporary storage
  356. * @head: the head for your list.
  357. */
  358. #define list_for_each_safe(pos, n, head) \
  359. for (pos = (head)->next, n = pos->next; pos != (head); \
  360. pos = n, n = pos->next)
  361. /**
  362. * list_for_each_entry - iterate over list of given type
  363. * @pos: the type * to use as a loop cursor.
  364. * @head: the head for your list.
  365. * @member: the name of the list_struct within the struct.
  366. */
  367. #define list_for_each_entry(pos, head, member) \
  368. for (pos = list_entry((head)->next, typeof(*pos), member); \
  369. prefetch(pos->member.next), &pos->member != (head); \
  370. pos = list_entry(pos->member.next, typeof(*pos), member))
  371. /**
  372. * list_for_each_entry_reverse - iterate backwards over list of given type.
  373. * @pos: the type * to use as a loop cursor.
  374. * @head: the head for your list.
  375. * @member: the name of the list_struct within the struct.
  376. */
  377. #define list_for_each_entry_reverse(pos, head, member) \
  378. for (pos = list_entry((head)->prev, typeof(*pos), member); \
  379. prefetch(pos->member.prev), &pos->member != (head); \
  380. pos = list_entry(pos->member.prev, typeof(*pos), member))
  381. /**
  382. * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue
  383. * @pos: the type * to use as a start point
  384. * @head: the head of the list
  385. * @member: the name of the list_struct within the struct.
  386. *
  387. * Prepares a pos entry for use as a start point in list_for_each_entry_continue.
  388. */
  389. #define list_prepare_entry(pos, head, member) \
  390. ((pos) ? : list_entry(head, typeof(*pos), member))
  391. /**
  392. * list_for_each_entry_continue - continue iteration over list of given type
  393. * @pos: the type * to use as a loop cursor.
  394. * @head: the head for your list.
  395. * @member: the name of the list_struct within the struct.
  396. *
  397. * Continue to iterate over list of given type, continuing after
  398. * the current position.
  399. */
  400. #define list_for_each_entry_continue(pos, head, member) \
  401. for (pos = list_entry(pos->member.next, typeof(*pos), member); \
  402. prefetch(pos->member.next), &pos->member != (head); \
  403. pos = list_entry(pos->member.next, typeof(*pos), member))
  404. /**
  405. * list_for_each_entry_from - iterate over list of given type from the current point
  406. * @pos: the type * to use as a loop cursor.
  407. * @head: the head for your list.
  408. * @member: the name of the list_struct within the struct.
  409. *
  410. * Iterate over list of given type, continuing from current position.
  411. */
  412. #define list_for_each_entry_from(pos, head, member) \
  413. for (; prefetch(pos->member.next), &pos->member != (head); \
  414. pos = list_entry(pos->member.next, typeof(*pos), member))
  415. /**
  416. * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
  417. * @pos: the type * to use as a loop cursor.
  418. * @n: another type * to use as temporary storage
  419. * @head: the head for your list.
  420. * @member: the name of the list_struct within the struct.
  421. */
  422. #define list_for_each_entry_safe(pos, n, head, member) \
  423. for (pos = list_entry((head)->next, typeof(*pos), member), \
  424. n = list_entry(pos->member.next, typeof(*pos), member); \
  425. &pos->member != (head); \
  426. pos = n, n = list_entry(n->member.next, typeof(*n), member))
  427. /**
  428. * list_for_each_entry_safe_continue
  429. * @pos: the type * to use as a loop cursor.
  430. * @n: another type * to use as temporary storage
  431. * @head: the head for your list.
  432. * @member: the name of the list_struct within the struct.
  433. *
  434. * Iterate over list of given type, continuing after current point,
  435. * safe against removal of list entry.
  436. */
  437. #define list_for_each_entry_safe_continue(pos, n, head, member) \
  438. for (pos = list_entry(pos->member.next, typeof(*pos), member), \
  439. n = list_entry(pos->member.next, typeof(*pos), member); \
  440. &pos->member != (head); \
  441. pos = n, n = list_entry(n->member.next, typeof(*n), member))
  442. /**
  443. * list_for_each_entry_safe_from
  444. * @pos: the type * to use as a loop cursor.
  445. * @n: another type * to use as temporary storage
  446. * @head: the head for your list.
  447. * @member: the name of the list_struct within the struct.
  448. *
  449. * Iterate over list of given type from current point, safe against
  450. * removal of list entry.
  451. */
  452. #define list_for_each_entry_safe_from(pos, n, head, member) \
  453. for (n = list_entry(pos->member.next, typeof(*pos), member); \
  454. &pos->member != (head); \
  455. pos = n, n = list_entry(n->member.next, typeof(*n), member))
  456. /**
  457. * list_for_each_entry_safe_reverse
  458. * @pos: the type * to use as a loop cursor.
  459. * @n: another type * to use as temporary storage
  460. * @head: the head for your list.
  461. * @member: the name of the list_struct within the struct.
  462. *
  463. * Iterate backwards over list of given type, safe against removal
  464. * of list entry.
  465. */
  466. #define list_for_each_entry_safe_reverse(pos, n, head, member) \
  467. for (pos = list_entry((head)->prev, typeof(*pos), member), \
  468. n = list_entry(pos->member.prev, typeof(*pos), member); \
  469. &pos->member != (head); \
  470. pos = n, n = list_entry(n->member.prev, typeof(*n), member))
  471. /**
  472. * list_for_each_rcu - iterate over an rcu-protected list
  473. * @pos: the &struct list_head to use as a loop cursor.
  474. * @head: the head for your list.
  475. *
  476. * This list-traversal primitive may safely run concurrently with
  477. * the _rcu list-mutation primitives such as list_add_rcu()
  478. * as long as the traversal is guarded by rcu_read_lock().
  479. */
  480. #define list_for_each_rcu(pos, head) \
  481. for (pos = (head)->next; \
  482. prefetch(rcu_dereference(pos)->next), pos != (head); \
  483. pos = pos->next)
  484. #define __list_for_each_rcu(pos, head) \
  485. for (pos = (head)->next; \
  486. rcu_dereference(pos) != (head); \
  487. pos = pos->next)
  488. /**
  489. * list_for_each_safe_rcu
  490. * @pos: the &struct list_head to use as a loop cursor.
  491. * @n: another &struct list_head to use as temporary storage
  492. * @head: the head for your list.
  493. *
  494. * Iterate over an rcu-protected list, safe against removal of list entry.
  495. *
  496. * This list-traversal primitive may safely run concurrently with
  497. * the _rcu list-mutation primitives such as list_add_rcu()
  498. * as long as the traversal is guarded by rcu_read_lock().
  499. */
  500. #define list_for_each_safe_rcu(pos, n, head) \
  501. for (pos = (head)->next; \
  502. n = rcu_dereference(pos)->next, pos != (head); \
  503. pos = n)
  504. /**
  505. * list_for_each_entry_rcu - iterate over rcu list of given type
  506. * @pos: the type * to use as a loop cursor.
  507. * @head: the head for your list.
  508. * @member: the name of the list_struct within the struct.
  509. *
  510. * This list-traversal primitive may safely run concurrently with
  511. * the _rcu list-mutation primitives such as list_add_rcu()
  512. * as long as the traversal is guarded by rcu_read_lock().
  513. */
  514. #define list_for_each_entry_rcu(pos, head, member) \
  515. for (pos = list_entry((head)->next, typeof(*pos), member); \
  516. prefetch(rcu_dereference(pos)->member.next), \
  517. &pos->member != (head); \
  518. pos = list_entry(pos->member.next, typeof(*pos), member))
  519. /**
  520. * list_for_each_continue_rcu
  521. * @pos: the &struct list_head to use as a loop cursor.
  522. * @head: the head for your list.
  523. *
  524. * Iterate over an rcu-protected list, continuing after current point.
  525. *
  526. * This list-traversal primitive may safely run concurrently with
  527. * the _rcu list-mutation primitives such as list_add_rcu()
  528. * as long as the traversal is guarded by rcu_read_lock().
  529. */
  530. #define list_for_each_continue_rcu(pos, head) \
  531. for ((pos) = (pos)->next; \
  532. prefetch(rcu_dereference((pos))->next), (pos) != (head); \
  533. (pos) = (pos)->next)
  534. /*
  535. * Double linked lists with a single pointer list head.
  536. * Mostly useful for hash tables where the two pointer list head is
  537. * too wasteful.
  538. * You lose the ability to access the tail in O(1).
  539. */
  540. struct hlist_head {
  541. struct hlist_node *first;
  542. };
  543. struct hlist_node {
  544. struct hlist_node *next, **pprev;
  545. };
  546. #define HLIST_HEAD_INIT { .first = NULL }
  547. #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
  548. #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
  549. static inline void INIT_HLIST_NODE(struct hlist_node *h)
  550. {
  551. h->next = NULL;
  552. h->pprev = NULL;
  553. }
  554. static inline int hlist_unhashed(const struct hlist_node *h)
  555. {
  556. return !h->pprev;
  557. }
  558. static inline int hlist_empty(const struct hlist_head *h)
  559. {
  560. return !h->first;
  561. }
  562. static inline void __hlist_del(struct hlist_node *n)
  563. {
  564. struct hlist_node *next = n->next;
  565. struct hlist_node **pprev = n->pprev;
  566. *pprev = next;
  567. if (next)
  568. next->pprev = pprev;
  569. }
  570. static inline void hlist_del(struct hlist_node *n)
  571. {
  572. __hlist_del(n);
  573. n->next = LIST_POISON1;
  574. n->pprev = LIST_POISON2;
  575. }
  576. /**
  577. * hlist_del_rcu - deletes entry from hash list without re-initialization
  578. * @n: the element to delete from the hash list.
  579. *
  580. * Note: list_unhashed() on entry does not return true after this,
  581. * the entry is in an undefined state. It is useful for RCU based
  582. * lockfree traversal.
  583. *
  584. * In particular, it means that we can not poison the forward
  585. * pointers that may still be used for walking the hash list.
  586. *
  587. * The caller must take whatever precautions are necessary
  588. * (such as holding appropriate locks) to avoid racing
  589. * with another list-mutation primitive, such as hlist_add_head_rcu()
  590. * or hlist_del_rcu(), running on this same list.
  591. * However, it is perfectly legal to run concurrently with
  592. * the _rcu list-traversal primitives, such as
  593. * hlist_for_each_entry().
  594. */
  595. static inline void hlist_del_rcu(struct hlist_node *n)
  596. {
  597. __hlist_del(n);
  598. n->pprev = LIST_POISON2;
  599. }
  600. static inline void hlist_del_init(struct hlist_node *n)
  601. {
  602. if (!hlist_unhashed(n)) {
  603. __hlist_del(n);
  604. INIT_HLIST_NODE(n);
  605. }
  606. }
  607. /*
  608. * hlist_replace_rcu - replace old entry by new one
  609. * @old : the element to be replaced
  610. * @new : the new element to insert
  611. *
  612. * The old entry will be replaced with the new entry atomically.
  613. */
  614. static inline void hlist_replace_rcu(struct hlist_node *old,
  615. struct hlist_node *new)
  616. {
  617. struct hlist_node *next = old->next;
  618. new->next = next;
  619. new->pprev = old->pprev;
  620. smp_wmb();
  621. if (next)
  622. new->next->pprev = &new->next;
  623. *new->pprev = new;
  624. old->pprev = LIST_POISON2;
  625. }
  626. static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
  627. {
  628. struct hlist_node *first = h->first;
  629. n->next = first;
  630. if (first)
  631. first->pprev = &n->next;
  632. h->first = n;
  633. n->pprev = &h->first;
  634. }
  635. /**
  636. * hlist_add_head_rcu
  637. * @n: the element to add to the hash list.
  638. * @h: the list to add to.
  639. *
  640. * Description:
  641. * Adds the specified element to the specified hlist,
  642. * while permitting racing traversals.
  643. *
  644. * The caller must take whatever precautions are necessary
  645. * (such as holding appropriate locks) to avoid racing
  646. * with another list-mutation primitive, such as hlist_add_head_rcu()
  647. * or hlist_del_rcu(), running on this same list.
  648. * However, it is perfectly legal to run concurrently with
  649. * the _rcu list-traversal primitives, such as
  650. * hlist_for_each_entry_rcu(), used to prevent memory-consistency
  651. * problems on Alpha CPUs. Regardless of the type of CPU, the
  652. * list-traversal primitive must be guarded by rcu_read_lock().
  653. */
  654. static inline void hlist_add_head_rcu(struct hlist_node *n,
  655. struct hlist_head *h)
  656. {
  657. struct hlist_node *first = h->first;
  658. n->next = first;
  659. n->pprev = &h->first;
  660. smp_wmb();
  661. if (first)
  662. first->pprev = &n->next;
  663. h->first = n;
  664. }
  665. /* next must be != NULL */
  666. static inline void hlist_add_before(struct hlist_node *n,
  667. struct hlist_node *next)
  668. {
  669. n->pprev = next->pprev;
  670. n->next = next;
  671. next->pprev = &n->next;
  672. *(n->pprev) = n;
  673. }
  674. static inline void hlist_add_after(struct hlist_node *n,
  675. struct hlist_node *next)
  676. {
  677. next->next = n->next;
  678. n->next = next;
  679. next->pprev = &n->next;
  680. if(next->next)
  681. next->next->pprev = &next->next;
  682. }
  683. /**
  684. * hlist_add_before_rcu
  685. * @n: the new element to add to the hash list.
  686. * @next: the existing element to add the new element before.
  687. *
  688. * Description:
  689. * Adds the specified element to the specified hlist
  690. * before the specified node while permitting racing traversals.
  691. *
  692. * The caller must take whatever precautions are necessary
  693. * (such as holding appropriate locks) to avoid racing
  694. * with another list-mutation primitive, such as hlist_add_head_rcu()
  695. * or hlist_del_rcu(), running on this same list.
  696. * However, it is perfectly legal to run concurrently with
  697. * the _rcu list-traversal primitives, such as
  698. * hlist_for_each_entry_rcu(), used to prevent memory-consistency
  699. * problems on Alpha CPUs.
  700. */
  701. static inline void hlist_add_before_rcu(struct hlist_node *n,
  702. struct hlist_node *next)
  703. {
  704. n->pprev = next->pprev;
  705. n->next = next;
  706. smp_wmb();
  707. next->pprev = &n->next;
  708. *(n->pprev) = n;
  709. }
  710. /**
  711. * hlist_add_after_rcu
  712. * @prev: the existing element to add the new element after.
  713. * @n: the new element to add to the hash list.
  714. *
  715. * Description:
  716. * Adds the specified element to the specified hlist
  717. * after the specified node while permitting racing traversals.
  718. *
  719. * The caller must take whatever precautions are necessary
  720. * (such as holding appropriate locks) to avoid racing
  721. * with another list-mutation primitive, such as hlist_add_head_rcu()
  722. * or hlist_del_rcu(), running on this same list.
  723. * However, it is perfectly legal to run concurrently with
  724. * the _rcu list-traversal primitives, such as
  725. * hlist_for_each_entry_rcu(), used to prevent memory-consistency
  726. * problems on Alpha CPUs.
  727. */
  728. static inline void hlist_add_after_rcu(struct hlist_node *prev,
  729. struct hlist_node *n)
  730. {
  731. n->next = prev->next;
  732. n->pprev = &prev->next;
  733. smp_wmb();
  734. prev->next = n;
  735. if (n->next)
  736. n->next->pprev = &n->next;
  737. }
  738. #define hlist_entry(ptr, type, member) container_of(ptr,type,member)
  739. #define hlist_for_each(pos, head) \
  740. for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
  741. pos = pos->next)
  742. #define hlist_for_each_safe(pos, n, head) \
  743. for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
  744. pos = n)
  745. /**
  746. * hlist_for_each_entry - iterate over list of given type
  747. * @tpos: the type * to use as a loop cursor.
  748. * @pos: the &struct hlist_node to use as a loop cursor.
  749. * @head: the head for your list.
  750. * @member: the name of the hlist_node within the struct.
  751. */
  752. #define hlist_for_each_entry(tpos, pos, head, member) \
  753. for (pos = (head)->first; \
  754. pos && ({ prefetch(pos->next); 1;}) && \
  755. ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
  756. pos = pos->next)
  757. /**
  758. * hlist_for_each_entry_continue - iterate over a hlist continuing after current point
  759. * @tpos: the type * to use as a loop cursor.
  760. * @pos: the &struct hlist_node to use as a loop cursor.
  761. * @member: the name of the hlist_node within the struct.
  762. */
  763. #define hlist_for_each_entry_continue(tpos, pos, member) \
  764. for (pos = (pos)->next; \
  765. pos && ({ prefetch(pos->next); 1;}) && \
  766. ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
  767. pos = pos->next)
  768. /**
  769. * hlist_for_each_entry_from - iterate over a hlist continuing from current point
  770. * @tpos: the type * to use as a loop cursor.
  771. * @pos: the &struct hlist_node to use as a loop cursor.
  772. * @member: the name of the hlist_node within the struct.
  773. */
  774. #define hlist_for_each_entry_from(tpos, pos, member) \
  775. for (; pos && ({ prefetch(pos->next); 1;}) && \
  776. ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
  777. pos = pos->next)
  778. /**
  779. * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
  780. * @tpos: the type * to use as a loop cursor.
  781. * @pos: the &struct hlist_node to use as a loop cursor.
  782. * @n: another &struct hlist_node to use as temporary storage
  783. * @head: the head for your list.
  784. * @member: the name of the hlist_node within the struct.
  785. */
  786. #define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
  787. for (pos = (head)->first; \
  788. pos && ({ n = pos->next; 1; }) && \
  789. ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
  790. pos = n)
  791. /**
  792. * hlist_for_each_entry_rcu - iterate over rcu list of given type
  793. * @tpos: the type * to use as a loop cursor.
  794. * @pos: the &struct hlist_node to use as a loop cursor.
  795. * @head: the head for your list.
  796. * @member: the name of the hlist_node within the struct.
  797. *
  798. * This list-traversal primitive may safely run concurrently with
  799. * the _rcu list-mutation primitives such as hlist_add_head_rcu()
  800. * as long as the traversal is guarded by rcu_read_lock().
  801. */
  802. #define hlist_for_each_entry_rcu(tpos, pos, head, member) \
  803. for (pos = (head)->first; \
  804. rcu_dereference(pos) && ({ prefetch(pos->next); 1;}) && \
  805. ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
  806. pos = pos->next)
  807. #else
  808. #warning "don't include kernel headers in userspace"
  809. #endif /* __KERNEL__ */
  810. #endif