krb5 commit: Rename k5-queue.h macros

Tom Yu tlyu at mit.edu
Wed Jul 27 15:43:33 EDT 2016


https://github.com/krb5/krb5/commit/cab7f315ef45875910adea5da554a994cab2aafe
commit cab7f315ef45875910adea5da554a994cab2aafe
Author: Tom Yu <tlyu at mit.edu>
Date:   Wed Jul 27 13:19:51 2016 -0400

    Rename k5-queue.h macros
    
    Some BSD-derived systems (e.g., FreeBSD and Mac OS X) inappropriately
    include sys/queue.h from some non-kernel network-related headers that
    we include (net/if.h is one example).  Because our k5-queue.h is a
    copy from a BSD sys/queue.h, many compilers will warn about macro
    redefinitions on those systems.  Rename the queue macros to have a K5_
    prefix.
    
    Also delete the QUEUEDEBUG macros because they are only useful for
    kernel use on the BSD systems where this header originated.
    
    ticket: 8466 (new)

 src/include/k5-queue.h |  351 ++++++++++++++++-------------------------------
 src/kdc/replay.c       |   26 ++--
 src/lib/krad/attrset.c |   24 ++--
 src/lib/krad/client.c  |   12 +-
 src/lib/krad/remote.c  |   30 ++--
 5 files changed, 166 insertions(+), 277 deletions(-)

diff --git a/src/include/k5-queue.h b/src/include/k5-queue.h
index e70401a..ad6d866 100644
--- a/src/include/k5-queue.h
+++ b/src/include/k5-queue.h
@@ -90,15 +90,15 @@
 /*
  * List definitions.
  */
-#define	LIST_HEAD(name, type)						\
+#define	K5_LIST_HEAD(name, type)					\
 struct name {								\
 	struct type *lh_first;	/* first element */			\
 }
 
-#define	LIST_HEAD_INITIALIZER(head)					\
+#define	K5_LIST_HEAD_INITIALIZER(head)					\
 	{ NULL }
 
-#define	LIST_ENTRY(type)						\
+#define	K5_LIST_ENTRY(type)						\
 struct {								\
 	struct type *le_next;	/* next element */			\
 	struct type **le_prev;	/* address of previous next element */	\
@@ -107,33 +107,11 @@ struct {								\
 /*
  * List functions.
  */
-#if defined(_KERNEL) && defined(QUEUEDEBUG)
-#define	QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field)			\
-	if ((head)->lh_first &&						\
-	    (head)->lh_first->field.le_prev != &(head)->lh_first)	\
-		panic("LIST_INSERT_HEAD %p %s:%d", (head), __FILE__, __LINE__);
-#define	QUEUEDEBUG_LIST_OP(elm, field)					\
-	if ((elm)->field.le_next &&					\
-	    (elm)->field.le_next->field.le_prev !=			\
-	    &(elm)->field.le_next)					\
-		panic("LIST_* forw %p %s:%d", (elm), __FILE__, __LINE__);\
-	if (*(elm)->field.le_prev != (elm))				\
-		panic("LIST_* back %p %s:%d", (elm), __FILE__, __LINE__);
-#define	QUEUEDEBUG_LIST_POSTREMOVE(elm, field)				\
-	(elm)->field.le_next = (void *)1L;				\
-	(elm)->field.le_prev = (void *)1L;
-#else
-#define	QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field)
-#define	QUEUEDEBUG_LIST_OP(elm, field)
-#define	QUEUEDEBUG_LIST_POSTREMOVE(elm, field)
-#endif
-
-#define	LIST_INIT(head) do {						\
+#define	K5_LIST_INIT(head) do {						\
 	(head)->lh_first = NULL;					\
 } while (/*CONSTCOND*/0)
 
-#define	LIST_INSERT_AFTER(listelm, elm, field) do {			\
-	QUEUEDEBUG_LIST_OP((listelm), field)				\
+#define	K5_LIST_INSERT_AFTER(listelm, elm, field) do {			\
 	if (((elm)->field.le_next = (listelm)->field.le_next) != NULL)	\
 		(listelm)->field.le_next->field.le_prev =		\
 		    &(elm)->field.le_next;				\
@@ -141,60 +119,56 @@ struct {								\
 	(elm)->field.le_prev = &(listelm)->field.le_next;		\
 } while (/*CONSTCOND*/0)
 
-#define	LIST_INSERT_BEFORE(listelm, elm, field) do {			\
-	QUEUEDEBUG_LIST_OP((listelm), field)				\
+#define	K5_LIST_INSERT_BEFORE(listelm, elm, field) do {			\
 	(elm)->field.le_prev = (listelm)->field.le_prev;		\
 	(elm)->field.le_next = (listelm);				\
 	*(listelm)->field.le_prev = (elm);				\
 	(listelm)->field.le_prev = &(elm)->field.le_next;		\
 } while (/*CONSTCOND*/0)
 
-#define	LIST_INSERT_HEAD(head, elm, field) do {				\
-	QUEUEDEBUG_LIST_INSERT_HEAD((head), (elm), field)		\
+#define	K5_LIST_INSERT_HEAD(head, elm, field) do {			\
 	if (((elm)->field.le_next = (head)->lh_first) != NULL)		\
 		(head)->lh_first->field.le_prev = &(elm)->field.le_next;\
 	(head)->lh_first = (elm);					\
 	(elm)->field.le_prev = &(head)->lh_first;			\
 } while (/*CONSTCOND*/0)
 
-#define	LIST_REMOVE(elm, field) do {					\
-	QUEUEDEBUG_LIST_OP((elm), field)				\
+#define	K5_LIST_REMOVE(elm, field) do {					\
 	if ((elm)->field.le_next != NULL)				\
 		(elm)->field.le_next->field.le_prev = 			\
 		    (elm)->field.le_prev;				\
 	*(elm)->field.le_prev = (elm)->field.le_next;			\
-	QUEUEDEBUG_LIST_POSTREMOVE((elm), field)			\
 } while (/*CONSTCOND*/0)
 
-#define	LIST_FOREACH(var, head, field)					\
+#define	K5_LIST_FOREACH(var, head, field)				\
 	for ((var) = ((head)->lh_first);				\
 		(var);							\
 		(var) = ((var)->field.le_next))
 
-#define	LIST_FOREACH_SAFE(var, head, field, tvar)			\
-	for ((var) = LIST_FIRST((head));				\
-		(var) && ((tvar) = LIST_NEXT((var), field), 1);		\
+#define	K5_LIST_FOREACH_SAFE(var, head, field, tvar)			\
+	for ((var) = K5_LIST_FIRST((head));				\
+		(var) && ((tvar) = K5_LIST_NEXT((var), field), 1);	\
 		(var) = (tvar))
 /*
  * List access methods.
  */
-#define	LIST_EMPTY(head)		((head)->lh_first == NULL)
-#define	LIST_FIRST(head)		((head)->lh_first)
-#define	LIST_NEXT(elm, field)		((elm)->field.le_next)
+#define	K5_LIST_EMPTY(head)		((head)->lh_first == NULL)
+#define	K5_LIST_FIRST(head)		((head)->lh_first)
+#define	K5_LIST_NEXT(elm, field)	((elm)->field.le_next)
 
 
 /*
  * Singly-linked List definitions.
  */
-#define	SLIST_HEAD(name, type)						\
+#define	K5_SLIST_HEAD(name, type)					\
 struct name {								\
 	struct type *slh_first;	/* first element */			\
 }
 
-#define	SLIST_HEAD_INITIALIZER(head)					\
+#define	K5_SLIST_HEAD_INITIALIZER(head)					\
 	{ NULL }
 
-#define	SLIST_ENTRY(type)						\
+#define	K5_SLIST_ENTRY(type)						\
 struct {								\
 	struct type *sle_next;	/* next element */			\
 }
@@ -202,27 +176,27 @@ struct {								\
 /*
  * Singly-linked List functions.
  */
-#define	SLIST_INIT(head) do {						\
+#define	K5_SLIST_INIT(head) do {					\
 	(head)->slh_first = NULL;					\
 } while (/*CONSTCOND*/0)
 
-#define	SLIST_INSERT_AFTER(slistelm, elm, field) do {			\
+#define	K5_SLIST_INSERT_AFTER(slistelm, elm, field) do {		\
 	(elm)->field.sle_next = (slistelm)->field.sle_next;		\
 	(slistelm)->field.sle_next = (elm);				\
 } while (/*CONSTCOND*/0)
 
-#define	SLIST_INSERT_HEAD(head, elm, field) do {			\
+#define	K5_SLIST_INSERT_HEAD(head, elm, field) do {			\
 	(elm)->field.sle_next = (head)->slh_first;			\
 	(head)->slh_first = (elm);					\
 } while (/*CONSTCOND*/0)
 
-#define	SLIST_REMOVE_HEAD(head, field) do {				\
+#define	K5_SLIST_REMOVE_HEAD(head, field) do {				\
 	(head)->slh_first = (head)->slh_first->field.sle_next;		\
 } while (/*CONSTCOND*/0)
 
-#define	SLIST_REMOVE(head, elm, type, field) do {			\
+#define	K5_SLIST_REMOVE(head, elm, type, field) do {			\
 	if ((head)->slh_first == (elm)) {				\
-		SLIST_REMOVE_HEAD((head), field);			\
+		K5_SLIST_REMOVE_HEAD((head), field);			\
 	}								\
 	else {								\
 		struct type *curelm = (head)->slh_first;		\
@@ -233,40 +207,40 @@ struct {								\
 	}								\
 } while (/*CONSTCOND*/0)
 
-#define	SLIST_REMOVE_AFTER(slistelm, field) do {			\
+#define	K5_SLIST_REMOVE_AFTER(slistelm, field) do {			\
 	(slistelm)->field.sle_next =					\
-	    SLIST_NEXT(SLIST_NEXT((slistelm), field), field);		\
+	    K5_SLIST_NEXT(K5_SLIST_NEXT((slistelm), field), field);	\
 } while (/*CONSTCOND*/0)
 
-#define	SLIST_FOREACH(var, head, field)					\
+#define	K5_SLIST_FOREACH(var, head, field)				\
 	for((var) = (head)->slh_first; (var); (var) = (var)->field.sle_next)
 
-#define	SLIST_FOREACH_SAFE(var, head, field, tvar)			\
-	for ((var) = SLIST_FIRST((head));				\
-	    (var) && ((tvar) = SLIST_NEXT((var), field), 1);		\
+#define	K5_SLIST_FOREACH_SAFE(var, head, field, tvar)			\
+	for ((var) = K5_SLIST_FIRST((head));				\
+	    (var) && ((tvar) = K5_SLIST_NEXT((var), field), 1);		\
 	    (var) = (tvar))
 
 /*
  * Singly-linked List access methods.
  */
-#define	SLIST_EMPTY(head)	((head)->slh_first == NULL)
-#define	SLIST_FIRST(head)	((head)->slh_first)
-#define	SLIST_NEXT(elm, field)	((elm)->field.sle_next)
+#define	K5_SLIST_EMPTY(head)	((head)->slh_first == NULL)
+#define	K5_SLIST_FIRST(head)	((head)->slh_first)
+#define	K5_SLIST_NEXT(elm, field)	((elm)->field.sle_next)
 
 
 /*
  * Singly-linked Tail queue declarations.
  */
-#define	STAILQ_HEAD(name, type)					\
+#define	K5_STAILQ_HEAD(name, type)					\
 struct name {								\
 	struct type *stqh_first;	/* first element */			\
 	struct type **stqh_last;	/* addr of last next element */		\
 }
 
-#define	STAILQ_HEAD_INITIALIZER(head)					\
+#define	K5_STAILQ_HEAD_INITIALIZER(head)				\
 	{ NULL, &(head).stqh_first }
 
-#define	STAILQ_ENTRY(type)						\
+#define	K5_STAILQ_ENTRY(type)						\
 struct {								\
 	struct type *stqe_next;	/* next element */			\
 }
@@ -274,40 +248,40 @@ struct {								\
 /*
  * Singly-linked Tail queue functions.
  */
-#define	STAILQ_INIT(head) do {						\
+#define	K5_STAILQ_INIT(head) do {					\
 	(head)->stqh_first = NULL;					\
 	(head)->stqh_last = &(head)->stqh_first;				\
 } while (/*CONSTCOND*/0)
 
-#define	STAILQ_INSERT_HEAD(head, elm, field) do {			\
+#define	K5_STAILQ_INSERT_HEAD(head, elm, field) do {			\
 	if (((elm)->field.stqe_next = (head)->stqh_first) == NULL)	\
 		(head)->stqh_last = &(elm)->field.stqe_next;		\
 	(head)->stqh_first = (elm);					\
 } while (/*CONSTCOND*/0)
 
-#define	STAILQ_INSERT_TAIL(head, elm, field) do {			\
+#define	K5_STAILQ_INSERT_TAIL(head, elm, field) do {			\
 	(elm)->field.stqe_next = NULL;					\
 	*(head)->stqh_last = (elm);					\
 	(head)->stqh_last = &(elm)->field.stqe_next;			\
 } while (/*CONSTCOND*/0)
 
-#define	STAILQ_INSERT_AFTER(head, listelm, elm, field) do {		\
+#define	K5_STAILQ_INSERT_AFTER(head, listelm, elm, field) do {		\
 	if (((elm)->field.stqe_next = (listelm)->field.stqe_next) == NULL)\
 		(head)->stqh_last = &(elm)->field.stqe_next;		\
 	(listelm)->field.stqe_next = (elm);				\
 } while (/*CONSTCOND*/0)
 
-#define	STAILQ_REMOVE_HEAD(head, field) do {				\
+#define	K5_STAILQ_REMOVE_HEAD(head, field) do {				\
 	if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) == NULL) \
 		(head)->stqh_last = &(head)->stqh_first;			\
 } while (/*CONSTCOND*/0)
 
-#define	STAILQ_REMOVE(head, elm, type, field) do {			\
+#define	K5_STAILQ_REMOVE(head, elm, type, field) do {			\
 	if ((head)->stqh_first == (elm)) {				\
-		STAILQ_REMOVE_HEAD((head), field);			\
+		K5_STAILQ_REMOVE_HEAD((head), field);			\
 	} else {							\
 		struct type *curelm = (head)->stqh_first;		\
-		while (curelm->field.stqe_next != (elm))			\
+		while (curelm->field.stqe_next != (elm))		\
 			curelm = curelm->field.stqe_next;		\
 		if ((curelm->field.stqe_next =				\
 			curelm->field.stqe_next->field.stqe_next) == NULL) \
@@ -315,26 +289,26 @@ struct {								\
 	}								\
 } while (/*CONSTCOND*/0)
 
-#define	STAILQ_FOREACH(var, head, field)				\
+#define	K5_STAILQ_FOREACH(var, head, field)				\
 	for ((var) = ((head)->stqh_first);				\
 		(var);							\
 		(var) = ((var)->field.stqe_next))
 
-#define	STAILQ_FOREACH_SAFE(var, head, field, tvar)			\
-	for ((var) = STAILQ_FIRST((head));				\
-	    (var) && ((tvar) = STAILQ_NEXT((var), field), 1);		\
+#define	K5_STAILQ_FOREACH_SAFE(var, head, field, tvar)			\
+	for ((var) = K5_STAILQ_FIRST((head));				\
+	    (var) && ((tvar) = K5_STAILQ_NEXT((var), field), 1);	\
 	    (var) = (tvar))
 
-#define	STAILQ_CONCAT(head1, head2) do {				\
-	if (!STAILQ_EMPTY((head2))) {					\
+#define	K5_STAILQ_CONCAT(head1, head2) do {				\
+	if (!K5_STAILQ_EMPTY((head2))) {				\
 		*(head1)->stqh_last = (head2)->stqh_first;		\
 		(head1)->stqh_last = (head2)->stqh_last;		\
-		STAILQ_INIT((head2));					\
+		K5_STAILQ_INIT((head2));				\
 	}								\
 } while (/*CONSTCOND*/0)
 
-#define	STAILQ_LAST(head, type, field)					\
-	(STAILQ_EMPTY((head)) ?						\
+#define	K5_STAILQ_LAST(head, type, field)				\
+	(K5_STAILQ_EMPTY((head)) ?					\
 		NULL :							\
 	        ((struct type *)(void *)				\
 		((char *)((head)->stqh_last) - offsetof(struct type, field))))
@@ -342,24 +316,24 @@ struct {								\
 /*
  * Singly-linked Tail queue access methods.
  */
-#define	STAILQ_EMPTY(head)	((head)->stqh_first == NULL)
-#define	STAILQ_FIRST(head)	((head)->stqh_first)
-#define	STAILQ_NEXT(elm, field)	((elm)->field.stqe_next)
+#define	K5_STAILQ_EMPTY(head)	((head)->stqh_first == NULL)
+#define	K5_STAILQ_FIRST(head)	((head)->stqh_first)
+#define	K5_STAILQ_NEXT(elm, field)	((elm)->field.stqe_next)
 
 
 /*
  * Simple queue definitions.
  */
-#define	SIMPLEQ_HEAD(name, type)					\
+#define	K5_SIMPLEQ_HEAD(name, type)					\
 struct name {								\
 	struct type *sqh_first;	/* first element */			\
 	struct type **sqh_last;	/* addr of last next element */		\
 }
 
-#define	SIMPLEQ_HEAD_INITIALIZER(head)					\
+#define	K5_SIMPLEQ_HEAD_INITIALIZER(head)				\
 	{ NULL, &(head).sqh_first }
 
-#define	SIMPLEQ_ENTRY(type)						\
+#define	K5_SIMPLEQ_ENTRY(type)						\
 struct {								\
 	struct type *sqe_next;	/* next element */			\
 }
@@ -367,37 +341,37 @@ struct {								\
 /*
  * Simple queue functions.
  */
-#define	SIMPLEQ_INIT(head) do {						\
+#define	K5_SIMPLEQ_INIT(head) do {					\
 	(head)->sqh_first = NULL;					\
 	(head)->sqh_last = &(head)->sqh_first;				\
 } while (/*CONSTCOND*/0)
 
-#define	SIMPLEQ_INSERT_HEAD(head, elm, field) do {			\
+#define	K5_SIMPLEQ_INSERT_HEAD(head, elm, field) do {			\
 	if (((elm)->field.sqe_next = (head)->sqh_first) == NULL)	\
 		(head)->sqh_last = &(elm)->field.sqe_next;		\
 	(head)->sqh_first = (elm);					\
 } while (/*CONSTCOND*/0)
 
-#define	SIMPLEQ_INSERT_TAIL(head, elm, field) do {			\
+#define	K5_SIMPLEQ_INSERT_TAIL(head, elm, field) do {			\
 	(elm)->field.sqe_next = NULL;					\
 	*(head)->sqh_last = (elm);					\
 	(head)->sqh_last = &(elm)->field.sqe_next;			\
 } while (/*CONSTCOND*/0)
 
-#define	SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do {		\
+#define	K5_SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do {		\
 	if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
 		(head)->sqh_last = &(elm)->field.sqe_next;		\
 	(listelm)->field.sqe_next = (elm);				\
 } while (/*CONSTCOND*/0)
 
-#define	SIMPLEQ_REMOVE_HEAD(head, field) do {				\
+#define	K5_SIMPLEQ_REMOVE_HEAD(head, field) do {			\
 	if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \
 		(head)->sqh_last = &(head)->sqh_first;			\
 } while (/*CONSTCOND*/0)
 
-#define	SIMPLEQ_REMOVE(head, elm, type, field) do {			\
+#define	K5_SIMPLEQ_REMOVE(head, elm, type, field) do {			\
 	if ((head)->sqh_first == (elm)) {				\
-		SIMPLEQ_REMOVE_HEAD((head), field);			\
+		K5_SIMPLEQ_REMOVE_HEAD((head), field);			\
 	} else {							\
 		struct type *curelm = (head)->sqh_first;		\
 		while (curelm->field.sqe_next != (elm))			\
@@ -408,26 +382,26 @@ struct {								\
 	}								\
 } while (/*CONSTCOND*/0)
 
-#define	SIMPLEQ_FOREACH(var, head, field)				\
+#define	K5_SIMPLEQ_FOREACH(var, head, field)				\
 	for ((var) = ((head)->sqh_first);				\
 		(var);							\
 		(var) = ((var)->field.sqe_next))
 
-#define	SIMPLEQ_FOREACH_SAFE(var, head, field, next)			\
+#define	K5_SIMPLEQ_FOREACH_SAFE(var, head, field, next)			\
 	for ((var) = ((head)->sqh_first);				\
 		(var) && ((next = ((var)->field.sqe_next)), 1);		\
 		(var) = (next))
 
-#define	SIMPLEQ_CONCAT(head1, head2) do {				\
-	if (!SIMPLEQ_EMPTY((head2))) {					\
+#define	K5_SIMPLEQ_CONCAT(head1, head2) do {				\
+	if (!K5_SIMPLEQ_EMPTY((head2))) {				\
 		*(head1)->sqh_last = (head2)->sqh_first;		\
 		(head1)->sqh_last = (head2)->sqh_last;		\
-		SIMPLEQ_INIT((head2));					\
+		K5_SIMPLEQ_INIT((head2));				\
 	}								\
 } while (/*CONSTCOND*/0)
 
-#define	SIMPLEQ_LAST(head, type, field)					\
-	(SIMPLEQ_EMPTY((head)) ?						\
+#define	K5_SIMPLEQ_LAST(head, type, field)				\
+	(K5_SIMPLEQ_EMPTY((head)) ?					\
 		NULL :							\
 	        ((struct type *)(void *)				\
 		((char *)((head)->sqh_last) - offsetof(struct type, field))))
@@ -435,72 +409,40 @@ struct {								\
 /*
  * Simple queue access methods.
  */
-#define	SIMPLEQ_EMPTY(head)		((head)->sqh_first == NULL)
-#define	SIMPLEQ_FIRST(head)		((head)->sqh_first)
-#define	SIMPLEQ_NEXT(elm, field)	((elm)->field.sqe_next)
+#define	K5_SIMPLEQ_EMPTY(head)		((head)->sqh_first == NULL)
+#define	K5_SIMPLEQ_FIRST(head)		((head)->sqh_first)
+#define	K5_SIMPLEQ_NEXT(elm, field)	((elm)->field.sqe_next)
 
 
 /*
  * Tail queue definitions.
  */
-#define	_TAILQ_HEAD(name, type, qual)					\
+#define	_K5_TAILQ_HEAD(name, type, qual)				\
 struct name {								\
 	qual type *tqh_first;		/* first element */		\
 	qual type *qual *tqh_last;	/* addr of last next element */	\
 }
-#define TAILQ_HEAD(name, type)	_TAILQ_HEAD(name, struct type,)
+#define K5_TAILQ_HEAD(name, type)	_K5_TAILQ_HEAD(name, struct type,)
 
-#define	TAILQ_HEAD_INITIALIZER(head)					\
+#define	K5_TAILQ_HEAD_INITIALIZER(head)					\
 	{ NULL, &(head).tqh_first }
 
-#define	_TAILQ_ENTRY(type, qual)					\
+#define	_K5_TAILQ_ENTRY(type, qual)					\
 struct {								\
 	qual type *tqe_next;		/* next element */		\
 	qual type *qual *tqe_prev;	/* address of previous next element */\
 }
-#define TAILQ_ENTRY(type)	_TAILQ_ENTRY(struct type,)
+#define K5_TAILQ_ENTRY(type)	_K5_TAILQ_ENTRY(struct type,)
 
 /*
  * Tail queue functions.
  */
-#if defined(_KERNEL) && defined(QUEUEDEBUG)
-#define	QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field)			\
-	if ((head)->tqh_first &&					\
-	    (head)->tqh_first->field.tqe_prev != &(head)->tqh_first)	\
-		panic("TAILQ_INSERT_HEAD %p %s:%d", (head), __FILE__, __LINE__);
-#define	QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field)			\
-	if (*(head)->tqh_last != NULL)					\
-		panic("TAILQ_INSERT_TAIL %p %s:%d", (head), __FILE__, __LINE__);
-#define	QUEUEDEBUG_TAILQ_OP(elm, field)					\
-	if ((elm)->field.tqe_next &&					\
-	    (elm)->field.tqe_next->field.tqe_prev !=			\
-	    &(elm)->field.tqe_next)					\
-		panic("TAILQ_* forw %p %s:%d", (elm), __FILE__, __LINE__);\
-	if (*(elm)->field.tqe_prev != (elm))				\
-		panic("TAILQ_* back %p %s:%d", (elm), __FILE__, __LINE__);
-#define	QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field)			\
-	if ((elm)->field.tqe_next == NULL &&				\
-	    (head)->tqh_last != &(elm)->field.tqe_next)			\
-		panic("TAILQ_PREREMOVE head %p elm %p %s:%d",		\
-		      (head), (elm), __FILE__, __LINE__);
-#define	QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field)				\
-	(elm)->field.tqe_next = (void *)1L;				\
-	(elm)->field.tqe_prev = (void *)1L;
-#else
-#define	QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field)
-#define	QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field)
-#define	QUEUEDEBUG_TAILQ_OP(elm, field)
-#define	QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field)
-#define	QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field)
-#endif
-
-#define	TAILQ_INIT(head) do {						\
+#define	K5_TAILQ_INIT(head) do {					\
 	(head)->tqh_first = NULL;					\
 	(head)->tqh_last = &(head)->tqh_first;				\
 } while (/*CONSTCOND*/0)
 
-#define	TAILQ_INSERT_HEAD(head, elm, field) do {			\
-	QUEUEDEBUG_TAILQ_INSERT_HEAD((head), (elm), field)		\
+#define	K5_TAILQ_INSERT_HEAD(head, elm, field) do {			\
 	if (((elm)->field.tqe_next = (head)->tqh_first) != NULL)	\
 		(head)->tqh_first->field.tqe_prev =			\
 		    &(elm)->field.tqe_next;				\
@@ -510,16 +452,14 @@ struct {								\
 	(elm)->field.tqe_prev = &(head)->tqh_first;			\
 } while (/*CONSTCOND*/0)
 
-#define	TAILQ_INSERT_TAIL(head, elm, field) do {			\
-	QUEUEDEBUG_TAILQ_INSERT_TAIL((head), (elm), field)		\
+#define	K5_TAILQ_INSERT_TAIL(head, elm, field) do {			\
 	(elm)->field.tqe_next = NULL;					\
 	(elm)->field.tqe_prev = (head)->tqh_last;			\
 	*(head)->tqh_last = (elm);					\
 	(head)->tqh_last = &(elm)->field.tqe_next;			\
 } while (/*CONSTCOND*/0)
 
-#define	TAILQ_INSERT_AFTER(head, listelm, elm, field) do {		\
-	QUEUEDEBUG_TAILQ_OP((listelm), field)				\
+#define	K5_TAILQ_INSERT_AFTER(head, listelm, elm, field) do {		\
 	if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
 		(elm)->field.tqe_next->field.tqe_prev = 		\
 		    &(elm)->field.tqe_next;				\
@@ -529,119 +469,77 @@ struct {								\
 	(elm)->field.tqe_prev = &(listelm)->field.tqe_next;		\
 } while (/*CONSTCOND*/0)
 
-#define	TAILQ_INSERT_BEFORE(listelm, elm, field) do {			\
-	QUEUEDEBUG_TAILQ_OP((listelm), field)				\
+#define	K5_TAILQ_INSERT_BEFORE(listelm, elm, field) do {		\
 	(elm)->field.tqe_prev = (listelm)->field.tqe_prev;		\
 	(elm)->field.tqe_next = (listelm);				\
 	*(listelm)->field.tqe_prev = (elm);				\
 	(listelm)->field.tqe_prev = &(elm)->field.tqe_next;		\
 } while (/*CONSTCOND*/0)
 
-#define	TAILQ_REMOVE(head, elm, field) do {				\
-	QUEUEDEBUG_TAILQ_PREREMOVE((head), (elm), field)		\
-	QUEUEDEBUG_TAILQ_OP((elm), field)				\
+#define	K5_TAILQ_REMOVE(head, elm, field) do {				\
 	if (((elm)->field.tqe_next) != NULL)				\
 		(elm)->field.tqe_next->field.tqe_prev = 		\
 		    (elm)->field.tqe_prev;				\
 	else								\
 		(head)->tqh_last = (elm)->field.tqe_prev;		\
 	*(elm)->field.tqe_prev = (elm)->field.tqe_next;			\
-	QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field);			\
 } while (/*CONSTCOND*/0)
 
-#define	TAILQ_FOREACH(var, head, field)					\
+#define	K5_TAILQ_FOREACH(var, head, field)				\
 	for ((var) = ((head)->tqh_first);				\
 		(var);							\
 		(var) = ((var)->field.tqe_next))
 
-#define	TAILQ_FOREACH_SAFE(var, head, field, next)			\
+#define	K5_TAILQ_FOREACH_SAFE(var, head, field, next)			\
 	for ((var) = ((head)->tqh_first);				\
-	        (var) != NULL && ((next) = TAILQ_NEXT(var, field), 1);	\
+	        (var) != NULL && ((next) = K5_TAILQ_NEXT(var, field), 1);	\
 		(var) = (next))
 
-#define	TAILQ_FOREACH_REVERSE(var, head, headname, field)		\
+#define	K5_TAILQ_FOREACH_REVERSE(var, head, headname, field)		\
 	for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last));	\
 		(var);							\
 		(var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)))
 
-#define	TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, prev)	\
-	for ((var) = TAILQ_LAST((head), headname);			\
-		(var) && ((prev) = TAILQ_PREV((var), headname, field), 1);\
+#define	K5_TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, prev)	\
+	for ((var) = K5_TAILQ_LAST((head), headname);			\
+		(var) && ((prev) = K5_TAILQ_PREV((var), headname, field), 1);\
 		(var) = (prev))
 
-#define	TAILQ_CONCAT(head1, head2, field) do {				\
-	if (!TAILQ_EMPTY(head2)) {					\
+#define	K5_TAILQ_CONCAT(head1, head2, field) do {			\
+	if (!K5_TAILQ_EMPTY(head2)) {					\
 		*(head1)->tqh_last = (head2)->tqh_first;		\
 		(head2)->tqh_first->field.tqe_prev = (head1)->tqh_last;	\
 		(head1)->tqh_last = (head2)->tqh_last;			\
-		TAILQ_INIT((head2));					\
+		K5_TAILQ_INIT((head2));					\
 	}								\
 } while (/*CONSTCOND*/0)
 
 /*
  * Tail queue access methods.
  */
-#define	TAILQ_EMPTY(head)		((head)->tqh_first == NULL)
-#define	TAILQ_FIRST(head)		((head)->tqh_first)
-#define	TAILQ_NEXT(elm, field)		((elm)->field.tqe_next)
+#define	K5_TAILQ_EMPTY(head)		((head)->tqh_first == NULL)
+#define	K5_TAILQ_FIRST(head)		((head)->tqh_first)
+#define	K5_TAILQ_NEXT(elm, field)	((elm)->field.tqe_next)
 
-#define	TAILQ_LAST(head, headname) \
+#define	K5_TAILQ_LAST(head, headname) \
 	(*(((struct headname *)((head)->tqh_last))->tqh_last))
-#define	TAILQ_PREV(elm, headname, field) \
+#define	K5_TAILQ_PREV(elm, headname, field) \
 	(*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
 
 
 /*
  * Circular queue definitions.
  */
-#if defined(_KERNEL) && defined(QUEUEDEBUG)
-#define QUEUEDEBUG_CIRCLEQ_HEAD(head, field)				\
-	if ((head)->cqh_first != (void *)(head) &&			\
-	    (head)->cqh_first->field.cqe_prev != (void *)(head))	\
-		panic("CIRCLEQ head forw %p %s:%d", (head),		\
-		      __FILE__, __LINE__);				\
-	if ((head)->cqh_last != (void *)(head) &&			\
-	    (head)->cqh_last->field.cqe_next != (void *)(head))		\
-		panic("CIRCLEQ head back %p %s:%d", (head),		\
-		      __FILE__, __LINE__);
-#define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field)			\
-	if ((elm)->field.cqe_next == (void *)(head)) {			\
-		if ((head)->cqh_last != (elm))				\
-			panic("CIRCLEQ elm last %p %s:%d", (elm),	\
-			      __FILE__, __LINE__);			\
-	} else {							\
-		if ((elm)->field.cqe_next->field.cqe_prev != (elm))	\
-			panic("CIRCLEQ elm forw %p %s:%d", (elm),	\
-			      __FILE__, __LINE__);			\
-	}								\
-	if ((elm)->field.cqe_prev == (void *)(head)) {			\
-		if ((head)->cqh_first != (elm))				\
-			panic("CIRCLEQ elm first %p %s:%d", (elm),	\
-			      __FILE__, __LINE__);			\
-	} else {							\
-		if ((elm)->field.cqe_prev->field.cqe_next != (elm))	\
-			panic("CIRCLEQ elm prev %p %s:%d", (elm),	\
-			      __FILE__, __LINE__);			\
-	}
-#define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field)			\
-	(elm)->field.cqe_next = (void *)1L;				\
-	(elm)->field.cqe_prev = (void *)1L;
-#else
-#define QUEUEDEBUG_CIRCLEQ_HEAD(head, field)
-#define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field)
-#define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field)
-#endif
-
-#define	CIRCLEQ_HEAD(name, type)					\
+#define	K5_CIRCLEQ_HEAD(name, type)					\
 struct name {								\
 	struct type *cqh_first;		/* first element */		\
 	struct type *cqh_last;		/* last element */		\
 }
 
-#define	CIRCLEQ_HEAD_INITIALIZER(head)					\
+#define	K5_CIRCLEQ_HEAD_INITIALIZER(head)				\
 	{ (void *)&head, (void *)&head }
 
-#define	CIRCLEQ_ENTRY(type)						\
+#define	K5_CIRCLEQ_ENTRY(type)						\
 struct {								\
 	struct type *cqe_next;		/* next element */		\
 	struct type *cqe_prev;		/* previous element */		\
@@ -650,14 +548,12 @@ struct {								\
 /*
  * Circular queue functions.
  */
-#define	CIRCLEQ_INIT(head) do {						\
+#define	K5_CIRCLEQ_INIT(head) do {					\
 	(head)->cqh_first = (void *)(head);				\
 	(head)->cqh_last = (void *)(head);				\
 } while (/*CONSTCOND*/0)
 
-#define	CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do {		\
-	QUEUEDEBUG_CIRCLEQ_HEAD((head), field)				\
-	QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field)		\
+#define	K5_CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do {		\
 	(elm)->field.cqe_next = (listelm)->field.cqe_next;		\
 	(elm)->field.cqe_prev = (listelm);				\
 	if ((listelm)->field.cqe_next == (void *)(head))		\
@@ -667,9 +563,7 @@ struct {								\
 	(listelm)->field.cqe_next = (elm);				\
 } while (/*CONSTCOND*/0)
 
-#define	CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do {		\
-	QUEUEDEBUG_CIRCLEQ_HEAD((head), field)				\
-	QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field)		\
+#define	K5_CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do {	\
 	(elm)->field.cqe_next = (listelm);				\
 	(elm)->field.cqe_prev = (listelm)->field.cqe_prev;		\
 	if ((listelm)->field.cqe_prev == (void *)(head))		\
@@ -679,8 +573,7 @@ struct {								\
 	(listelm)->field.cqe_prev = (elm);				\
 } while (/*CONSTCOND*/0)
 
-#define	CIRCLEQ_INSERT_HEAD(head, elm, field) do {			\
-	QUEUEDEBUG_CIRCLEQ_HEAD((head), field)				\
+#define	K5_CIRCLEQ_INSERT_HEAD(head, elm, field) do {			\
 	(elm)->field.cqe_next = (head)->cqh_first;			\
 	(elm)->field.cqe_prev = (void *)(head);				\
 	if ((head)->cqh_last == (void *)(head))				\
@@ -690,8 +583,7 @@ struct {								\
 	(head)->cqh_first = (elm);					\
 } while (/*CONSTCOND*/0)
 
-#define	CIRCLEQ_INSERT_TAIL(head, elm, field) do {			\
-	QUEUEDEBUG_CIRCLEQ_HEAD((head), field)				\
+#define	K5_CIRCLEQ_INSERT_TAIL(head, elm, field) do {			\
 	(elm)->field.cqe_next = (void *)(head);				\
 	(elm)->field.cqe_prev = (head)->cqh_last;			\
 	if ((head)->cqh_first == (void *)(head))			\
@@ -701,9 +593,7 @@ struct {								\
 	(head)->cqh_last = (elm);					\
 } while (/*CONSTCOND*/0)
 
-#define	CIRCLEQ_REMOVE(head, elm, field) do {				\
-	QUEUEDEBUG_CIRCLEQ_HEAD((head), field)				\
-	QUEUEDEBUG_CIRCLEQ_ELM((head), (elm), field)			\
+#define	K5_CIRCLEQ_REMOVE(head, elm, field) do {			\
 	if ((elm)->field.cqe_next == (void *)(head))			\
 		(head)->cqh_last = (elm)->field.cqe_prev;		\
 	else								\
@@ -714,15 +604,14 @@ struct {								\
 	else								\
 		(elm)->field.cqe_prev->field.cqe_next =			\
 		    (elm)->field.cqe_next;				\
-	QUEUEDEBUG_CIRCLEQ_POSTREMOVE((elm), field)			\
 } while (/*CONSTCOND*/0)
 
-#define	CIRCLEQ_FOREACH(var, head, field)				\
+#define	K5_CIRCLEQ_FOREACH(var, head, field)				\
 	for ((var) = ((head)->cqh_first);				\
 		(var) != (const void *)(head);				\
 		(var) = ((var)->field.cqe_next))
 
-#define	CIRCLEQ_FOREACH_REVERSE(var, head, field)			\
+#define	K5_CIRCLEQ_FOREACH_REVERSE(var, head, field)			\
 	for ((var) = ((head)->cqh_last);				\
 		(var) != (const void *)(head);				\
 		(var) = ((var)->field.cqe_prev))
@@ -730,17 +619,17 @@ struct {								\
 /*
  * Circular queue access methods.
  */
-#define	CIRCLEQ_EMPTY(head)		((head)->cqh_first == (void *)(head))
-#define	CIRCLEQ_FIRST(head)		((head)->cqh_first)
-#define	CIRCLEQ_LAST(head)		((head)->cqh_last)
-#define	CIRCLEQ_NEXT(elm, field)	((elm)->field.cqe_next)
-#define	CIRCLEQ_PREV(elm, field)	((elm)->field.cqe_prev)
+#define	K5_CIRCLEQ_EMPTY(head)		((head)->cqh_first == (void *)(head))
+#define	K5_CIRCLEQ_FIRST(head)		((head)->cqh_first)
+#define	K5_CIRCLEQ_LAST(head)		((head)->cqh_last)
+#define	K5_CIRCLEQ_NEXT(elm, field)	((elm)->field.cqe_next)
+#define	K5_CIRCLEQ_PREV(elm, field)	((elm)->field.cqe_prev)
 
-#define CIRCLEQ_LOOP_NEXT(head, elm, field)				\
+#define K5_CIRCLEQ_LOOP_NEXT(head, elm, field)				\
 	(((elm)->field.cqe_next == (void *)(head))			\
 	    ? ((head)->cqh_first)					\
 	    : (elm->field.cqe_next))
-#define CIRCLEQ_LOOP_PREV(head, elm, field)				\
+#define K5_CIRCLEQ_LOOP_PREV(head, elm, field)				\
 	(((elm)->field.cqe_prev == (void *)(head))			\
 	    ? ((head)->cqh_last)					\
 	    : (elm->field.cqe_prev))
diff --git a/src/kdc/replay.c b/src/kdc/replay.c
index 3eee6e8..76fd772 100644
--- a/src/kdc/replay.c
+++ b/src/kdc/replay.c
@@ -32,8 +32,8 @@
 #ifndef NOCACHE
 
 struct entry {
-    LIST_ENTRY(entry) bucket_links;
-    TAILQ_ENTRY(entry) expire_links;
+    K5_LIST_ENTRY(entry) bucket_links;
+    K5_TAILQ_ENTRY(entry) expire_links;
     int num_hits;
     krb5_timestamp timein;
     krb5_data req_packet;
@@ -47,8 +47,8 @@ struct entry {
 #define LOOKASIDE_MAX_SIZE (10 * 1024 * 1024)
 #endif
 
-LIST_HEAD(entry_list, entry);
-TAILQ_HEAD(entry_queue, entry);
+K5_LIST_HEAD(entry_list, entry);
+K5_TAILQ_HEAD(entry_queue, entry);
 
 static struct entry_list hash_table[LOOKASIDE_HASH_SIZE];
 static struct entry_queue expiration_queue;
@@ -115,8 +115,8 @@ static void
 discard_entry(krb5_context context, struct entry *entry)
 {
     total_size -= entry_size(&entry->req_packet, &entry->reply_packet);
-    LIST_REMOVE(entry, bucket_links);
-    TAILQ_REMOVE(&expiration_queue, entry, expire_links);
+    K5_LIST_REMOVE(entry, bucket_links);
+    K5_TAILQ_REMOVE(&expiration_queue, entry, expire_links);
     krb5_free_data_contents(context, &entry->req_packet);
     krb5_free_data_contents(context, &entry->reply_packet);
     free(entry);
@@ -129,7 +129,7 @@ find_entry(krb5_data *req_packet)
     krb5_ui_4 hash = murmurhash3(req_packet);
     struct entry *e;
 
-    LIST_FOREACH(e, &hash_table[hash], bucket_links) {
+    K5_LIST_FOREACH(e, &hash_table[hash], bucket_links) {
         if (data_eq(e->req_packet, *req_packet))
             return e;
     }
@@ -144,8 +144,8 @@ kdc_init_lookaside(krb5_context context)
     int i;
 
     for (i = 0; i < LOOKASIDE_HASH_SIZE; i++)
-        LIST_INIT(&hash_table[i]);
-    TAILQ_INIT(&expiration_queue);
+        K5_LIST_INIT(&hash_table[i]);
+    K5_TAILQ_INIT(&expiration_queue);
     return krb5_c_random_make_octets(context, &d);
 }
 
@@ -196,7 +196,7 @@ kdc_insert_lookaside(krb5_context kcontext, krb5_data *req_packet,
         return;
 
     /* Purge stale entries and limit the total size of the entries. */
-    TAILQ_FOREACH_SAFE(e, &expiration_queue, expire_links, next) {
+    K5_TAILQ_FOREACH_SAFE(e, &expiration_queue, expire_links, next) {
         if (!STALE(e, timenow) && total_size + esize <= LOOKASIDE_MAX_SIZE)
             break;
         max_hits_per_entry = max(max_hits_per_entry, e->num_hits);
@@ -220,8 +220,8 @@ kdc_insert_lookaside(krb5_context kcontext, krb5_data *req_packet,
         return;
     }
 
-    TAILQ_INSERT_TAIL(&expiration_queue, e, expire_links);
-    LIST_INSERT_HEAD(&hash_table[hash], e, bucket_links);
+    K5_TAILQ_INSERT_TAIL(&expiration_queue, e, expire_links);
+    K5_LIST_INSERT_HEAD(&hash_table[hash], e, bucket_links);
     num_entries++;
     total_size += esize;
     return;
@@ -233,7 +233,7 @@ kdc_free_lookaside(krb5_context kcontext)
 {
     struct entry *e, *next;
 
-    TAILQ_FOREACH_SAFE(e, &expiration_queue, expire_links, next) {
+    K5_TAILQ_FOREACH_SAFE(e, &expiration_queue, expire_links, next) {
         discard_entry(kcontext, e);
     }
 }
diff --git a/src/lib/krad/attrset.c b/src/lib/krad/attrset.c
index fbd0621..03c6137 100644
--- a/src/lib/krad/attrset.c
+++ b/src/lib/krad/attrset.c
@@ -33,11 +33,11 @@
 
 #include <string.h>
 
-TAILQ_HEAD(attr_head, attr_st);
+K5_TAILQ_HEAD(attr_head, attr_st);
 
 typedef struct attr_st attr;
 struct attr_st {
-    TAILQ_ENTRY(attr_st) list;
+    K5_TAILQ_ENTRY(attr_st) list;
     krad_attr type;
     krb5_data attr;
     char buffer[MAX_ATTRSIZE];
@@ -57,7 +57,7 @@ krad_attrset_new(krb5_context ctx, krad_attrset **set)
     if (tmp == NULL)
         return ENOMEM;
     tmp->ctx = ctx;
-    TAILQ_INIT(&tmp->list);
+    K5_TAILQ_INIT(&tmp->list);
 
     *set = tmp;
     return 0;
@@ -71,9 +71,9 @@ krad_attrset_free(krad_attrset *set)
     if (set == NULL)
         return;
 
-    while (!TAILQ_EMPTY(&set->list)) {
-        a = TAILQ_FIRST(&set->list);
-        TAILQ_REMOVE(&set->list, a, list);
+    while (!K5_TAILQ_EMPTY(&set->list)) {
+        a = K5_TAILQ_FIRST(&set->list);
+        K5_TAILQ_REMOVE(&set->list, a, list);
         zap(a->buffer, sizeof(a->buffer));
         free(a);
     }
@@ -99,7 +99,7 @@ krad_attrset_add(krad_attrset *set, krad_attr type, const krb5_data *data)
     tmp->attr = make_data(tmp->buffer, data->length);
     memcpy(tmp->attr.data, data->data, data->length);
 
-    TAILQ_INSERT_TAIL(&set->list, tmp, list);
+    K5_TAILQ_INSERT_TAIL(&set->list, tmp, list);
     return 0;
 }
 
@@ -118,9 +118,9 @@ krad_attrset_del(krad_attrset *set, krad_attr type, size_t indx)
 {
     attr *a;
 
-    TAILQ_FOREACH(a, &set->list, list) {
+    K5_TAILQ_FOREACH(a, &set->list, list) {
         if (a->type == type && indx-- == 0) {
-            TAILQ_REMOVE(&set->list, a, list);
+            K5_TAILQ_REMOVE(&set->list, a, list);
             zap(a->buffer, sizeof(a->buffer));
             free(a);
             return;
@@ -133,7 +133,7 @@ krad_attrset_get(const krad_attrset *set, krad_attr type, size_t indx)
 {
     attr *a;
 
-    TAILQ_FOREACH(a, &set->list, list) {
+    K5_TAILQ_FOREACH(a, &set->list, list) {
         if (a->type == type && indx-- == 0)
             return &a->attr;
     }
@@ -152,7 +152,7 @@ krad_attrset_copy(const krad_attrset *set, krad_attrset **copy)
     if (retval != 0)
         return retval;
 
-    TAILQ_FOREACH(a, &set->list, list) {
+    K5_TAILQ_FOREACH(a, &set->list, list) {
         retval = krad_attrset_add(tmp, a->type, &a->attr);
         if (retval != 0) {
             krad_attrset_free(tmp);
@@ -179,7 +179,7 @@ kr_attrset_encode(const krad_attrset *set, const char *secret,
         return 0;
     }
 
-    TAILQ_FOREACH(a, &set->list, list) {
+    K5_TAILQ_FOREACH(a, &set->list, list) {
         retval = kr_attr_encode(set->ctx, secret, auth, a->type, &a->attr,
                                 buffer, &attrlen);
         if (retval != 0)
diff --git a/src/lib/krad/client.c b/src/lib/krad/client.c
index 0c37680..6365dd1 100644
--- a/src/lib/krad/client.c
+++ b/src/lib/krad/client.c
@@ -36,7 +36,7 @@
 #include <stdio.h>
 #include <limits.h>
 
-LIST_HEAD(server_head, server_st);
+K5_LIST_HEAD(server_head, server_st);
 
 typedef struct remote_state_st remote_state;
 typedef struct request_st request;
@@ -65,7 +65,7 @@ struct request_st {
 struct server_st {
     krad_remote *serv;
     time_t last;
-    LIST_ENTRY(server_st) list;
+    K5_LIST_ENTRY(server_st) list;
 };
 
 struct krad_client_st {
@@ -87,7 +87,7 @@ get_server(krad_client *rc, const struct addrinfo *ai, const char *secret,
     if (time(&currtime) == (time_t)-1)
         return errno;
 
-    LIST_FOREACH(srv, &rc->servers, list) {
+    K5_LIST_FOREACH(srv, &rc->servers, list) {
         if (kr_remote_equals(srv->serv, ai, secret)) {
             srv->last = currtime;
             *out = srv->serv;
@@ -106,7 +106,7 @@ get_server(krad_client *rc, const struct addrinfo *ai, const char *secret,
         return retval;
     }
 
-    LIST_INSERT_HEAD(&rc->servers, srv, list);
+    K5_LIST_INSERT_HEAD(&rc->servers, srv, list);
     *out = srv->serv;
     return 0;
 }
@@ -179,9 +179,9 @@ age(struct server_head *head, time_t currtime)
 {
     server *srv, *tmp;
 
-    LIST_FOREACH_SAFE(srv, head, list, tmp) {
+    K5_LIST_FOREACH_SAFE(srv, head, list, tmp) {
         if (currtime == (time_t)-1 || currtime - srv->last > 60 * 60) {
-            LIST_REMOVE(srv, list);
+            K5_LIST_REMOVE(srv, list);
             kr_remote_free(srv->serv);
             free(srv);
         }
diff --git a/src/lib/krad/remote.c b/src/lib/krad/remote.c
index df3de3a..f6abc43 100644
--- a/src/lib/krad/remote.c
+++ b/src/lib/krad/remote.c
@@ -41,11 +41,11 @@
 #define FLAGS_WRITE VERTO_EV_FLAG_IO_WRITE
 #define FLAGS_BASE  VERTO_EV_FLAG_PERSIST | VERTO_EV_FLAG_IO_ERROR
 
-TAILQ_HEAD(request_head, request_st);
+K5_TAILQ_HEAD(request_head, request_st);
 
 typedef struct request_st request;
 struct request_st {
-    TAILQ_ENTRY(request_st) list;
+    K5_TAILQ_ENTRY(request_st) list;
     krad_remote *rr;
     krad_packet *request;
     krad_cb cb;
@@ -83,7 +83,7 @@ iterator(request **out)
     if (tmp == NULL)
         return NULL;
 
-    *out = TAILQ_NEXT(tmp, list);
+    *out = K5_TAILQ_NEXT(tmp, list);
     return tmp->request;
 }
 
@@ -115,7 +115,7 @@ request_finish(request *req, krb5_error_code retval,
                const krad_packet *response)
 {
     if (retval != ETIMEDOUT)
-        TAILQ_REMOVE(&req->rr->list, req, list);
+        K5_TAILQ_REMOVE(&req->rr->list, req, list);
 
     req->cb(retval, req->request, response, req->data);
 
@@ -225,7 +225,7 @@ remote_shutdown(krad_remote *rr)
     remote_disconnect(rr);
 
     /* Start timers for all unsent packets. */
-    TAILQ_FOREACH(r, &rr->list, list) {
+    K5_TAILQ_FOREACH(r, &rr->list, list) {
         if (r->timer == NULL) {
             retval = request_start_timer(r, rr->vctx);
             if (retval != 0)
@@ -262,7 +262,7 @@ on_io_write(krad_remote *rr)
     ssize_t written;
     request *r;
 
-    TAILQ_FOREACH(r, &rr->list, list) {
+    K5_TAILQ_FOREACH(r, &rr->list, list) {
         tmp = krad_packet_encode(r->request);
 
         /* If the packet has already been sent, do nothing. */
@@ -348,7 +348,7 @@ on_io_read(krad_remote *rr)
         return;
 
     /* Decode the packet. */
-    tmp = TAILQ_FIRST(&rr->list);
+    tmp = K5_TAILQ_FIRST(&rr->list);
     retval = krad_packet_decode_response(rr->kctx, rr->secret, &rr->buffer,
                                          (krad_packet_iter_cb)iterator, &tmp,
                                          &req, &rsp);
@@ -358,7 +358,7 @@ on_io_read(krad_remote *rr)
 
     /* Match the response with an outstanding request. */
     if (req != NULL) {
-        TAILQ_FOREACH(r, &rr->list, list) {
+        K5_TAILQ_FOREACH(r, &rr->list, list) {
             if (r->request == req &&
                 r->sent == krad_packet_encode(req)->length) {
                 request_finish(r, 0, rsp);
@@ -397,7 +397,7 @@ kr_remote_new(krb5_context kctx, verto_ctx *vctx, const struct addrinfo *info,
     tmp->kctx = kctx;
     tmp->vctx = vctx;
     tmp->buffer = make_data(tmp->buffer_, 0);
-    TAILQ_INIT(&tmp->list);
+    K5_TAILQ_INIT(&tmp->list);
     tmp->fd = -1;
 
     tmp->secret = strdup(secret);
@@ -428,8 +428,8 @@ kr_remote_free(krad_remote *rr)
     if (rr == NULL)
         return;
 
-    while (!TAILQ_EMPTY(&rr->list))
-        request_finish(TAILQ_FIRST(&rr->list), ECANCELED, NULL);
+    while (!K5_TAILQ_EMPTY(&rr->list))
+        request_finish(K5_TAILQ_FIRST(&rr->list), ECANCELED, NULL);
 
     free(rr->secret);
     if (rr->info != NULL)
@@ -451,13 +451,13 @@ kr_remote_send(krad_remote *rr, krad_code code, krad_attrset *attrs,
     if (rr->info->ai_socktype == SOCK_STREAM)
         retries = 0;
 
-    r = TAILQ_FIRST(&rr->list);
+    r = K5_TAILQ_FIRST(&rr->list);
     retval = krad_packet_new_request(rr->kctx, rr->secret, code, attrs,
                                      (krad_packet_iter_cb)iterator, &r, &tmp);
     if (retval != 0)
         goto error;
 
-    TAILQ_FOREACH(r, &rr->list, list) {
+    K5_TAILQ_FOREACH(r, &rr->list, list) {
         if (r->request == tmp) {
             retval = EALREADY;
             goto error;
@@ -473,7 +473,7 @@ kr_remote_send(krad_remote *rr, krad_code code, krad_attrset *attrs,
     if (retval != 0)
         goto error;
 
-    TAILQ_INSERT_TAIL(&rr->list, r, list);
+    K5_TAILQ_INSERT_TAIL(&rr->list, r, list);
     if (pkt != NULL)
         *pkt = tmp;
     return 0;
@@ -488,7 +488,7 @@ kr_remote_cancel(krad_remote *rr, const krad_packet *pkt)
 {
     request *r;
 
-    TAILQ_FOREACH(r, &rr->list, list) {
+    K5_TAILQ_FOREACH(r, &rr->list, list) {
         if (r->request == pkt) {
             request_finish(r, ECANCELED, NULL);
             return;


More information about the cvs-krb5 mailing list