1/*
2 * WARNING: Do *NOT* ever include this file, only for internal use!
3 */
4#ifndef _LINUX_LIST_H
5#define _LINUX_LIST_H
6
7#undef offsetof
8#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
9
10/**
11 * container_of - cast a member of a structure out to the containing structure
12 *
13 * @ptr:	the pointer to the member.
14 * @type:	the type of the container struct this is embedded in.
15 * @member:	the name of the member within the struct.
16 *
17 */
18#define container_of(ptr, type, member) ({			\
19        const typeof( ((type *)0)->member ) *__mptr = (ptr);	\
20        (type *)( (char *)__mptr - offsetof(type,member) );})
21
22/*
23 * Check at compile time that something is of a particular type.
24 * Always evaluates to 1 so you may use it easily in comparisons.
25 */
26#define typecheck(type,x) \
27({	type __dummy; \
28	typeof(x) __dummy2; \
29	(void)(&__dummy == &__dummy2); \
30	1; \
31})
32
33#define prefetch(x)		1
34
35/* empty define to make this work in userspace -HW */
36#ifndef smp_wmb
37#define smp_wmb()
38#endif
39
40/*
41 * These are non-NULL pointers that will result in page faults
42 * under normal circumstances, used to verify that nobody uses
43 * non-initialized list entries.
44 */
45#define LIST_POISON1  ((void *) 0x00100100)
46#define LIST_POISON2  ((void *) 0x00200200)
47
48/*
49 * Simple doubly linked list implementation.
50 *
51 * Some of the internal functions ("__xxx") are useful when
52 * manipulating whole lists rather than single entries, as
53 * sometimes we already know the next/prev entries and we can
54 * generate better code by using them directly rather than
55 * using the generic single-entry routines.
56 */
57
58struct list_head {
59	struct list_head *next, *prev;
60};
61
62#define LIST_HEAD_INIT(name) { &(name), &(name) }
63
64#define LIST_HEAD(name) \
65	struct list_head name = LIST_HEAD_INIT(name)
66
67#define INIT_LIST_HEAD(ptr) do { \
68	(ptr)->next = (ptr); (ptr)->prev = (ptr); \
69} while (0)
70
71/*
72 * Insert a new entry between two known consecutive entries.
73 *
74 * This is only for internal list manipulation where we know
75 * the prev/next entries already!
76 */
77static inline void __list_add(struct list_head *new,
78			      struct list_head *prev,
79			      struct list_head *next)
80{
81	next->prev = new;
82	new->next = next;
83	new->prev = prev;
84	prev->next = new;
85}
86
87/**
88 * list_add - add a new entry
89 * @new: new entry to be added
90 * @head: list head to add it after
91 *
92 * Insert a new entry after the specified head.
93 * This is good for implementing stacks.
94 */
95static inline void list_add(struct list_head *new, struct list_head *head)
96{
97	__list_add(new, head, head->next);
98}
99
100/**
101 * list_add_tail - add a new entry
102 * @new: new entry to be added
103 * @head: list head to add it before
104 *
105 * Insert a new entry before the specified head.
106 * This is useful for implementing queues.
107 */
108static inline void list_add_tail(struct list_head *new, struct list_head *head)
109{
110	__list_add(new, head->prev, head);
111}
112
113/*
114 * Insert a new entry between two known consecutive entries.
115 *
116 * This is only for internal list manipulation where we know
117 * the prev/next entries already!
118 */
119static inline void __list_add_rcu(struct list_head * new,
120		struct list_head * prev, struct list_head * next)
121{
122	new->next = next;
123	new->prev = prev;
124	smp_wmb();
125	next->prev = new;
126	prev->next = new;
127}
128
129/**
130 * list_add_rcu - add a new entry to rcu-protected list
131 * @new: new entry to be added
132 * @head: list head to add it after
133 *
134 * Insert a new entry after the specified head.
135 * This is good for implementing stacks.
136 *
137 * The caller must take whatever precautions are necessary
138 * (such as holding appropriate locks) to avoid racing
139 * with another list-mutation primitive, such as list_add_rcu()
140 * or list_del_rcu(), running on this same list.
141 * However, it is perfectly legal to run concurrently with
142 * the _rcu list-traversal primitives, such as
143 * list_for_each_entry_rcu().
144 */
145static inline void list_add_rcu(struct list_head *new, struct list_head *head)
146{
147	__list_add_rcu(new, head, head->next);
148}
149
150/**
151 * list_add_tail_rcu - add a new entry to rcu-protected list
152 * @new: new entry to be added
153 * @head: list head to add it before
154 *
155 * Insert a new entry before the specified head.
156 * This is useful for implementing queues.
157 *
158 * The caller must take whatever precautions are necessary
159 * (such as holding appropriate locks) to avoid racing
160 * with another list-mutation primitive, such as list_add_tail_rcu()
161 * or list_del_rcu(), running on this same list.
162 * However, it is perfectly legal to run concurrently with
163 * the _rcu list-traversal primitives, such as
164 * list_for_each_entry_rcu().
165 */
166static inline void list_add_tail_rcu(struct list_head *new,
167					struct list_head *head)
168{
169	__list_add_rcu(new, head->prev, head);
170}
171
172/*
173 * Delete a list entry by making the prev/next entries
174 * point to each other.
175 *
176 * This is only for internal list manipulation where we know
177 * the prev/next entries already!
178 */
179static inline void __list_del(struct list_head * prev, struct list_head * next)
180{
181	next->prev = prev;
182	prev->next = next;
183}
184
185/**
186 * list_del - deletes entry from list.
187 * @entry: the element to delete from the list.
188 * Note: list_empty on entry does not return true after this, the entry is
189 * in an undefined state.
190 */
191static inline void list_del(struct list_head *entry)
192{
193	__list_del(entry->prev, entry->next);
194	entry->next = LIST_POISON1;
195	entry->prev = LIST_POISON2;
196}
197
198/**
199 * list_del_rcu - deletes entry from list without re-initialization
200 * @entry: the element to delete from the list.
201 *
202 * Note: list_empty on entry does not return true after this,
203 * the entry is in an undefined state. It is useful for RCU based
204 * lockfree traversal.
205 *
206 * In particular, it means that we can not poison the forward
207 * pointers that may still be used for walking the list.
208 *
209 * The caller must take whatever precautions are necessary
210 * (such as holding appropriate locks) to avoid racing
211 * with another list-mutation primitive, such as list_del_rcu()
212 * or list_add_rcu(), running on this same list.
213 * However, it is perfectly legal to run concurrently with
214 * the _rcu list-traversal primitives, such as
215 * list_for_each_entry_rcu().
216 *
217 * Note that the caller is not permitted to immediately free
218 * the newly deleted entry.  Instead, either synchronize_kernel()
219 * or call_rcu() must be used to defer freeing until an RCU
220 * grace period has elapsed.
221 */
222static inline void list_del_rcu(struct list_head *entry)
223{
224	__list_del(entry->prev, entry->next);
225	entry->prev = LIST_POISON2;
226}
227
228/**
229 * list_del_init - deletes entry from list and reinitialize it.
230 * @entry: the element to delete from the list.
231 */
232static inline void list_del_init(struct list_head *entry)
233{
234	__list_del(entry->prev, entry->next);
235	INIT_LIST_HEAD(entry);
236}
237
238/**
239 * list_move - delete from one list and add as another's head
240 * @list: the entry to move
241 * @head: the head that will precede our entry
242 */
243static inline void list_move(struct list_head *list, struct list_head *head)
244{
245        __list_del(list->prev, list->next);
246        list_add(list, head);
247}
248
249/**
250 * list_move_tail - delete from one list and add as another's tail
251 * @list: the entry to move
252 * @head: the head that will follow our entry
253 */
254static inline void list_move_tail(struct list_head *list,
255				  struct list_head *head)
256{
257        __list_del(list->prev, list->next);
258        list_add_tail(list, head);
259}
260
261/**
262 * list_empty - tests whether a list is empty
263 * @head: the list to test.
264 */
265static inline int list_empty(const struct list_head *head)
266{
267	return head->next == head;
268}
269
270/**
271 * list_empty_careful - tests whether a list is
272 * empty _and_ checks that no other CPU might be
273 * in the process of still modifying either member
274 *
275 * NOTE: using list_empty_careful() without synchronization
276 * can only be safe if the only activity that can happen
277 * to the list entry is list_del_init(). Eg. it cannot be used
278 * if another CPU could re-list_add() it.
279 *
280 * @head: the list to test.
281 */
282static inline int list_empty_careful(const struct list_head *head)
283{
284	struct list_head *next = head->next;
285	return (next == head) && (next == head->prev);
286}
287
288static inline void __list_splice(struct list_head *list,
289				 struct list_head *head)
290{
291	struct list_head *first = list->next;
292	struct list_head *last = list->prev;
293	struct list_head *at = head->next;
294
295	first->prev = head;
296	head->next = first;
297
298	last->next = at;
299	at->prev = last;
300}
301
302/**
303 * list_splice - join two lists
304 * @list: the new list to add.
305 * @head: the place to add it in the first list.
306 */
307static inline void list_splice(struct list_head *list, struct list_head *head)
308{
309	if (!list_empty(list))
310		__list_splice(list, head);
311}
312
313/**
314 * list_splice_init - join two lists and reinitialise the emptied list.
315 * @list: the new list to add.
316 * @head: the place to add it in the first list.
317 *
318 * The list at @list is reinitialised
319 */
320static inline void list_splice_init(struct list_head *list,
321				    struct list_head *head)
322{
323	if (!list_empty(list)) {
324		__list_splice(list, head);
325		INIT_LIST_HEAD(list);
326	}
327}
328
329/**
330 * list_entry - get the struct for this entry
331 * @ptr:	the &struct list_head pointer.
332 * @type:	the type of the struct this is embedded in.
333 * @member:	the name of the list_struct within the struct.
334 */
335#define list_entry(ptr, type, member) \
336	container_of(ptr, type, member)
337
338/**
339 * list_for_each	-	iterate over a list
340 * @pos:	the &struct list_head to use as a loop counter.
341 * @head:	the head for your list.
342 */
343#define list_for_each(pos, head) \
344	for (pos = (head)->next, prefetch(pos->next); pos != (head); \
345        	pos = pos->next, prefetch(pos->next))
346
347/**
348 * __list_for_each	-	iterate over a list
349 * @pos:	the &struct list_head to use as a loop counter.
350 * @head:	the head for your list.
351 *
352 * This variant differs from list_for_each() in that it's the
353 * simplest possible list iteration code, no prefetching is done.
354 * Use this for code that knows the list to be very short (empty
355 * or 1 entry) most of the time.
356 */
357#define __list_for_each(pos, head) \
358	for (pos = (head)->next; pos != (head); pos = pos->next)
359
360/**
361 * list_for_each_prev	-	iterate over a list backwards
362 * @pos:	the &struct list_head to use as a loop counter.
363 * @head:	the head for your list.
364 */
365#define list_for_each_prev(pos, head) \
366	for (pos = (head)->prev, prefetch(pos->prev); pos != (head); \
367        	pos = pos->prev, prefetch(pos->prev))
368
369/**
370 * list_for_each_safe	-	iterate over a list safe against removal of list entry
371 * @pos:	the &struct list_head to use as a loop counter.
372 * @n:		another &struct list_head to use as temporary storage
373 * @head:	the head for your list.
374 */
375#define list_for_each_safe(pos, n, head) \
376	for (pos = (head)->next, n = pos->next; pos != (head); \
377		pos = n, n = pos->next)
378
379/**
380 * list_for_each_entry	-	iterate over list of given type
381 * @pos:	the type * to use as a loop counter.
382 * @head:	the head for your list.
383 * @member:	the name of the list_struct within the struct.
384 */
385#define list_for_each_entry(pos, head, member)				\
386	for (pos = list_entry((head)->next, typeof(*pos), member),	\
387		     prefetch(pos->member.next);			\
388	     &pos->member != (head); 					\
389	     pos = list_entry(pos->member.next, typeof(*pos), member),	\
390		     prefetch(pos->member.next))
391
392/**
393 * list_for_each_entry_reverse - iterate backwards over list of given type.
394 * @pos:	the type * to use as a loop counter.
395 * @head:	the head for your list.
396 * @member:	the name of the list_struct within the struct.
397 */
398#define list_for_each_entry_reverse(pos, head, member)			\
399	for (pos = list_entry((head)->prev, typeof(*pos), member),	\
400		     prefetch(pos->member.prev);			\
401	     &pos->member != (head); 					\
402	     pos = list_entry(pos->member.prev, typeof(*pos), member),	\
403		     prefetch(pos->member.prev))
404
405/**
406 * list_prepare_entry - prepare a pos entry for use as a start point in
407 *			list_for_each_entry_continue
408 * @pos:	the type * to use as a start point
409 * @head:	the head of the list
410 * @member:	the name of the list_struct within the struct.
411 */
412#define list_prepare_entry(pos, head, member) \
413	((pos) ? : list_entry(head, typeof(*pos), member))
414
415/**
416 * list_for_each_entry_continue -	iterate over list of given type
417 *			continuing after existing point
418 * @pos:	the type * to use as a loop counter.
419 * @head:	the head for your list.
420 * @member:	the name of the list_struct within the struct.
421 */
422#define list_for_each_entry_continue(pos, head, member) 		\
423	for (pos = list_entry(pos->member.next, typeof(*pos), member),	\
424		     prefetch(pos->member.next);			\
425	     &pos->member != (head);					\
426	     pos = list_entry(pos->member.next, typeof(*pos), member),	\
427		     prefetch(pos->member.next))
428
429/**
430 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
431 * @pos:	the type * to use as a loop counter.
432 * @n:		another type * to use as temporary storage
433 * @head:	the head for your list.
434 * @member:	the name of the list_struct within the struct.
435 */
436#define list_for_each_entry_safe(pos, n, head, member)			\
437	for (pos = list_entry((head)->next, typeof(*pos), member),	\
438		n = list_entry(pos->member.next, typeof(*pos), member);	\
439	     &pos->member != (head); 					\
440	     pos = n, n = list_entry(n->member.next, typeof(*n), member))
441
442/**
443 * list_for_each_rcu	-	iterate over an rcu-protected list
444 * @pos:	the &struct list_head to use as a loop counter.
445 * @head:	the head for your list.
446 *
447 * This list-traversal primitive may safely run concurrently with
448 * the _rcu list-mutation primitives such as list_add_rcu()
449 * as long as the traversal is guarded by rcu_read_lock().
450 */
451#define list_for_each_rcu(pos, head) \
452	for (pos = (head)->next, prefetch(pos->next); pos != (head); \
453        	pos = pos->next, ({ smp_read_barrier_depends(); 0;}), prefetch(pos->next))
454
455#define __list_for_each_rcu(pos, head) \
456	for (pos = (head)->next; pos != (head); \
457        	pos = pos->next, ({ smp_read_barrier_depends(); 0;}))
458
459/**
460 * list_for_each_safe_rcu	-	iterate over an rcu-protected list safe
461 *					against removal of list entry
462 * @pos:	the &struct list_head to use as a loop counter.
463 * @n:		another &struct list_head to use as temporary storage
464 * @head:	the head for your list.
465 *
466 * This list-traversal primitive may safely run concurrently with
467 * the _rcu list-mutation primitives such as list_add_rcu()
468 * as long as the traversal is guarded by rcu_read_lock().
469 */
470#define list_for_each_safe_rcu(pos, n, head) \
471	for (pos = (head)->next, n = pos->next; pos != (head); \
472		pos = n, ({ smp_read_barrier_depends(); 0;}), n = pos->next)
473
474/**
475 * list_for_each_entry_rcu	-	iterate over rcu list of given type
476 * @pos:	the type * to use as a loop counter.
477 * @head:	the head for your list.
478 * @member:	the name of the list_struct within the struct.
479 *
480 * This list-traversal primitive may safely run concurrently with
481 * the _rcu list-mutation primitives such as list_add_rcu()
482 * as long as the traversal is guarded by rcu_read_lock().
483 */
484#define list_for_each_entry_rcu(pos, head, member)			\
485	for (pos = list_entry((head)->next, typeof(*pos), member),	\
486		     prefetch(pos->member.next);			\
487	     &pos->member != (head); 					\
488	     pos = list_entry(pos->member.next, typeof(*pos), member),	\
489		     ({ smp_read_barrier_depends(); 0;}),		\
490		     prefetch(pos->member.next))
491
492
493/**
494 * list_for_each_continue_rcu	-	iterate over an rcu-protected list
495 *			continuing after existing point.
496 * @pos:	the &struct list_head to use as a loop counter.
497 * @head:	the head for your list.
498 *
499 * This list-traversal primitive may safely run concurrently with
500 * the _rcu list-mutation primitives such as list_add_rcu()
501 * as long as the traversal is guarded by rcu_read_lock().
502 */
503#define list_for_each_continue_rcu(pos, head) \
504	for ((pos) = (pos)->next, prefetch((pos)->next); (pos) != (head); \
505        	(pos) = (pos)->next, ({ smp_read_barrier_depends(); 0;}), prefetch((pos)->next))
506
507/*
508 * Double linked lists with a single pointer list head.
509 * Mostly useful for hash tables where the two pointer list head is
510 * too wasteful.
511 * You lose the ability to access the tail in O(1).
512 */
513
514struct hlist_head {
515	struct hlist_node *first;
516};
517
518struct hlist_node {
519	struct hlist_node *next, **pprev;
520};
521
522#define HLIST_HEAD_INIT { .first = NULL }
523#define HLIST_HEAD(name) struct hlist_head name = {  .first = NULL }
524#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
525#define INIT_HLIST_NODE(ptr) ((ptr)->next = NULL, (ptr)->pprev = NULL)
526
527static inline int hlist_unhashed(const struct hlist_node *h)
528{
529	return !h->pprev;
530}
531
532static inline int hlist_empty(const struct hlist_head *h)
533{
534	return !h->first;
535}
536
537static inline void __hlist_del(struct hlist_node *n)
538{
539	struct hlist_node *next = n->next;
540	struct hlist_node **pprev = n->pprev;
541	*pprev = next;
542	if (next)
543		next->pprev = pprev;
544}
545
546static inline void hlist_del(struct hlist_node *n)
547{
548	__hlist_del(n);
549	n->next = LIST_POISON1;
550	n->pprev = LIST_POISON2;
551}
552
553/**
554 * hlist_del_rcu - deletes entry from hash list without re-initialization
555 * @n: the element to delete from the hash list.
556 *
557 * Note: list_unhashed() on entry does not return true after this,
558 * the entry is in an undefined state. It is useful for RCU based
559 * lockfree traversal.
560 *
561 * In particular, it means that we can not poison the forward
562 * pointers that may still be used for walking the hash list.
563 *
564 * The caller must take whatever precautions are necessary
565 * (such as holding appropriate locks) to avoid racing
566 * with another list-mutation primitive, such as hlist_add_head_rcu()
567 * or hlist_del_rcu(), running on this same list.
568 * However, it is perfectly legal to run concurrently with
569 * the _rcu list-traversal primitives, such as
570 * hlist_for_each_entry().
571 */
572static inline void hlist_del_rcu(struct hlist_node *n)
573{
574	__hlist_del(n);
575	n->pprev = LIST_POISON2;
576}
577
578static inline void hlist_del_init(struct hlist_node *n)
579{
580	if (n->pprev)  {
581		__hlist_del(n);
582		INIT_HLIST_NODE(n);
583	}
584}
585
586#define hlist_del_rcu_init hlist_del_init
587
588static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
589{
590	struct hlist_node *first = h->first;
591	n->next = first;
592	if (first)
593		first->pprev = &n->next;
594	h->first = n;
595	n->pprev = &h->first;
596}
597
598
599/**
600 * hlist_add_head_rcu - adds the specified element to the specified hlist,
601 * while permitting racing traversals.
602 * @n: the element to add to the hash list.
603 * @h: the list to add to.
604 *
605 * The caller must take whatever precautions are necessary
606 * (such as holding appropriate locks) to avoid racing
607 * with another list-mutation primitive, such as hlist_add_head_rcu()
608 * or hlist_del_rcu(), running on this same list.
609 * However, it is perfectly legal to run concurrently with
610 * the _rcu list-traversal primitives, such as
611 * hlist_for_each_entry(), but only if smp_read_barrier_depends()
612 * is used to prevent memory-consistency problems on Alpha CPUs.
613 * Regardless of the type of CPU, the list-traversal primitive
614 * must be guarded by rcu_read_lock().
615 *
616 * OK, so why don't we have an hlist_for_each_entry_rcu()???
617 */
618static inline void hlist_add_head_rcu(struct hlist_node *n,
619					struct hlist_head *h)
620{
621	struct hlist_node *first = h->first;
622	n->next = first;
623	n->pprev = &h->first;
624	smp_wmb();
625	if (first)
626		first->pprev = &n->next;
627	h->first = n;
628}
629
630/* next must be != NULL */
631static inline void hlist_add_before(struct hlist_node *n,
632					struct hlist_node *next)
633{
634	n->pprev = next->pprev;
635	n->next = next;
636	next->pprev = &n->next;
637	*(n->pprev) = n;
638}
639
640static inline void hlist_add_after(struct hlist_node *n,
641					struct hlist_node *next)
642{
643	next->next = n->next;
644	n->next = next;
645	next->pprev = &n->next;
646
647	if(next->next)
648		next->next->pprev  = &next->next;
649}
650
651#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
652
653#define hlist_for_each(pos, head) \
654	for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
655	     pos = pos->next)
656
657#define hlist_for_each_safe(pos, n, head) \
658	for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
659	     pos = n)
660
661/**
662 * hlist_for_each_entry	- iterate over list of given type
663 * @tpos:	the type * to use as a loop counter.
664 * @pos:	the &struct hlist_node to use as a loop counter.
665 * @head:	the head for your list.
666 * @member:	the name of the hlist_node within the struct.
667 */
668#define hlist_for_each_entry(tpos, pos, head, member)			 \
669	for (pos = (head)->first;					 \
670	     pos && ({ prefetch(pos->next); 1;}) &&			 \
671		({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
672	     pos = pos->next)
673
674/**
675 * hlist_for_each_entry_continue - iterate over a hlist continuing after existing point
676 * @tpos:	the type * to use as a loop counter.
677 * @pos:	the &struct hlist_node to use as a loop counter.
678 * @member:	the name of the hlist_node within the struct.
679 */
680#define hlist_for_each_entry_continue(tpos, pos, member)		 \
681	for (pos = (pos)->next;						 \
682	     pos && ({ prefetch(pos->next); 1;}) &&			 \
683		({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
684	     pos = pos->next)
685
686/**
687 * hlist_for_each_entry_from - iterate over a hlist continuing from existing point
688 * @tpos:	the type * to use as a loop counter.
689 * @pos:	the &struct hlist_node to use as a loop counter.
690 * @member:	the name of the hlist_node within the struct.
691 */
692#define hlist_for_each_entry_from(tpos, pos, member)			 \
693	for (; pos && ({ prefetch(pos->next); 1;}) &&			 \
694		({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
695	     pos = pos->next)
696
697/**
698 * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
699 * @tpos:	the type * to use as a loop counter.
700 * @pos:	the &struct hlist_node to use as a loop counter.
701 * @n:		another &struct hlist_node to use as temporary storage
702 * @head:	the head for your list.
703 * @member:	the name of the hlist_node within the struct.
704 */
705#define hlist_for_each_entry_safe(tpos, pos, n, head, member) 		 \
706	for (pos = (head)->first;					 \
707	     pos && ({ n = pos->next; 1; }) && 				 \
708		({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
709	     pos = n)
710
711/**
712 * hlist_for_each_entry_rcu - iterate over rcu list of given type
713 * @pos:	the type * to use as a loop counter.
714 * @pos:	the &struct hlist_node to use as a loop counter.
715 * @head:	the head for your list.
716 * @member:	the name of the hlist_node within the struct.
717 *
718 * This list-traversal primitive may safely run concurrently with
719 * the _rcu list-mutation primitives such as hlist_add_rcu()
720 * as long as the traversal is guarded by rcu_read_lock().
721 */
722#define hlist_for_each_entry_rcu(tpos, pos, head, member)		 \
723	for (pos = (head)->first;					 \
724	     pos && ({ prefetch(pos->next); 1;}) &&			 \
725		({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
726	     pos = pos->next, ({ smp_read_barrier_depends(); 0; }) )
727
728#endif
729