krb5 commit [krb5-1.12]: Use TAILQ macros instead of CIRCLEQ in libdb2
Tom Yu
tlyu at MIT.EDU
Tue Jul 22 14:35:54 EDT 2014
https://github.com/krb5/krb5/commit/c7bb9278ad12c9278f316479af56f9e952f4d650
commit c7bb9278ad12c9278f316479af56f9e952f4d650
Author: Greg Hudson <ghudson at mit.edu>
Date: Mon Feb 17 00:18:41 2014 -0500
Use TAILQ macros instead of CIRCLEQ in libdb2
The optimizer in gcc 4.8.1 (but not the current gcc head revision)
breaks the queue.h CIRCLEQ macros, apparently due to an overzealous
strict aliasing deduction. Use TAILQ macros in the libdb2 mpool code
instead.
(cherry picked from commit 26d874412983c4c9979a9f5e7bec51834ad4cda5)
ticket: 7860
version_fixed: 1.12.2
status: resolved
src/plugins/kdb/db2/libdb2/mpool/mpool.c | 43 ++++++++++++++----------------
src/plugins/kdb/db2/libdb2/mpool/mpool.h | 8 +++---
2 files changed, 24 insertions(+), 27 deletions(-)
diff --git a/src/plugins/kdb/db2/libdb2/mpool/mpool.c b/src/plugins/kdb/db2/libdb2/mpool/mpool.c
index acdc1b8..7941a9f 100644
--- a/src/plugins/kdb/db2/libdb2/mpool/mpool.c
+++ b/src/plugins/kdb/db2/libdb2/mpool/mpool.c
@@ -81,9 +81,9 @@ mpool_open(key, fd, pagesize, maxcache)
/* Allocate and initialize the MPOOL cookie. */
if ((mp = (MPOOL *)calloc(1, sizeof(MPOOL))) == NULL)
return (NULL);
- CIRCLEQ_INIT(&mp->lqh);
+ TAILQ_INIT(&mp->lqh);
for (entry = 0; entry < HASHSIZE; ++entry)
- CIRCLEQ_INIT(&mp->hqh[entry]);
+ TAILQ_INIT(&mp->hqh[entry]);
mp->maxcache = maxcache;
mp->npages = sb.st_size / pagesize;
mp->pagesize = pagesize;
@@ -143,8 +143,8 @@ mpool_new(mp, pgnoaddr, flags)
bp->flags = MPOOL_PINNED | MPOOL_INUSE;
head = &mp->hqh[HASHKEY(bp->pgno)];
- CIRCLEQ_INSERT_HEAD(head, bp, hq);
- CIRCLEQ_INSERT_TAIL(&mp->lqh, bp, q);
+ TAILQ_INSERT_HEAD(head, bp, hq);
+ TAILQ_INSERT_TAIL(&mp->lqh, bp, q);
return (bp->page);
}
@@ -168,8 +168,8 @@ mpool_delete(mp, page)
/* Remove from the hash and lru queues. */
head = &mp->hqh[HASHKEY(bp->pgno)];
- CIRCLEQ_REMOVE(head, bp, hq);
- CIRCLEQ_REMOVE(&mp->lqh, bp, q);
+ TAILQ_REMOVE(head, bp, hq);
+ TAILQ_REMOVE(&mp->lqh, bp, q);
free(bp);
return (RET_SUCCESS);
@@ -208,10 +208,10 @@ mpool_get(mp, pgno, flags)
* of the lru chain.
*/
head = &mp->hqh[HASHKEY(bp->pgno)];
- CIRCLEQ_REMOVE(head, bp, hq);
- CIRCLEQ_INSERT_HEAD(head, bp, hq);
- CIRCLEQ_REMOVE(&mp->lqh, bp, q);
- CIRCLEQ_INSERT_TAIL(&mp->lqh, bp, q);
+ TAILQ_REMOVE(head, bp, hq);
+ TAILQ_INSERT_HEAD(head, bp, hq);
+ TAILQ_REMOVE(&mp->lqh, bp, q);
+ TAILQ_INSERT_TAIL(&mp->lqh, bp, q);
/* Return a pinned page. */
bp->flags |= MPOOL_PINNED;
@@ -261,8 +261,8 @@ mpool_get(mp, pgno, flags)
* of the lru chain.
*/
head = &mp->hqh[HASHKEY(bp->pgno)];
- CIRCLEQ_INSERT_HEAD(head, bp, hq);
- CIRCLEQ_INSERT_TAIL(&mp->lqh, bp, q);
+ TAILQ_INSERT_HEAD(head, bp, hq);
+ TAILQ_INSERT_TAIL(&mp->lqh, bp, q);
/* Run through the user's filter. */
if (mp->pgin != NULL)
@@ -311,8 +311,8 @@ mpool_close(mp)
BKT *bp;
/* Free up any space allocated to the lru pages. */
- while ((bp = mp->lqh.cqh_first) != (void *)&mp->lqh) {
- CIRCLEQ_REMOVE(&mp->lqh, mp->lqh.cqh_first, q);
+ while ((bp = mp->lqh.tqh_first) != NULL) {
+ TAILQ_REMOVE(&mp->lqh, mp->lqh.tqh_first, q);
free(bp);
}
@@ -332,8 +332,7 @@ mpool_sync(mp)
BKT *bp;
/* Walk the lru chain, flushing any dirty pages to disk. */
- for (bp = mp->lqh.cqh_first;
- bp != (void *)&mp->lqh; bp = bp->q.cqe_next)
+ for (bp = mp->lqh.tqh_first; bp != NULL; bp = bp->q.tqe_next)
if (bp->flags & MPOOL_DIRTY &&
mpool_write(mp, bp) == RET_ERROR)
return (RET_ERROR);
@@ -363,8 +362,7 @@ mpool_bkt(mp)
* off any lists. If we don't find anything we grow the cache anyway.
* The cache never shrinks.
*/
- for (bp = mp->lqh.cqh_first;
- bp != (void *)&mp->lqh; bp = bp->q.cqe_next)
+ for (bp = mp->lqh.tqh_first; bp != NULL; bp = bp->q.tqe_next)
if (!(bp->flags & MPOOL_PINNED)) {
/* Flush if dirty. */
if (bp->flags & MPOOL_DIRTY &&
@@ -375,8 +373,8 @@ mpool_bkt(mp)
#endif
/* Remove from the hash and lru queues. */
head = &mp->hqh[HASHKEY(bp->pgno)];
- CIRCLEQ_REMOVE(head, bp, hq);
- CIRCLEQ_REMOVE(&mp->lqh, bp, q);
+ TAILQ_REMOVE(head, bp, hq);
+ TAILQ_REMOVE(&mp->lqh, bp, q);
#if defined(DEBUG) && !defined(DEBUG_IDX0SPLIT)
{ void *spage;
spage = bp->page;
@@ -450,7 +448,7 @@ mpool_look(mp, pgno)
BKT *bp;
head = &mp->hqh[HASHKEY(pgno)];
- for (bp = head->cqh_first; bp != (void *)head; bp = bp->hq.cqe_next)
+ for (bp = head->tqh_first; bp != NULL; bp = bp->hq.tqe_next)
if ((bp->pgno == pgno) && (bp->flags & MPOOL_INUSE)) {
#ifdef STATISTICS
++mp->cachehit;
@@ -494,8 +492,7 @@ mpool_stat(mp)
sep = "";
cnt = 0;
- for (bp = mp->lqh.cqh_first;
- bp != (void *)&mp->lqh; bp = bp->q.cqe_next) {
+ for (bp = mp->lqh.tqh_first; bp != NULL; bp = bp->q.tqe_next) {
(void)fprintf(stderr, "%s%d", sep, bp->pgno);
if (bp->flags & MPOOL_DIRTY)
(void)fprintf(stderr, "d");
diff --git a/src/plugins/kdb/db2/libdb2/mpool/mpool.h b/src/plugins/kdb/db2/libdb2/mpool/mpool.h
index 627ad5b..bed5ed3 100644
--- a/src/plugins/kdb/db2/libdb2/mpool/mpool.h
+++ b/src/plugins/kdb/db2/libdb2/mpool/mpool.h
@@ -47,8 +47,8 @@
/* The BKT structures are the elements of the queues. */
typedef struct _bkt {
- CIRCLEQ_ENTRY(_bkt) hq; /* hash queue */
- CIRCLEQ_ENTRY(_bkt) q; /* lru queue */
+ TAILQ_ENTRY(_bkt) hq; /* hash queue */
+ TAILQ_ENTRY(_bkt) q; /* lru queue */
void *page; /* page */
db_pgno_t pgno; /* page number */
@@ -59,9 +59,9 @@ typedef struct _bkt {
} BKT;
typedef struct MPOOL {
- CIRCLEQ_HEAD(_lqh, _bkt) lqh; /* lru queue head */
+ TAILQ_HEAD(_lqh, _bkt) lqh; /* lru queue head */
/* hash queue array */
- CIRCLEQ_HEAD(_hqh, _bkt) hqh[HASHSIZE];
+ TAILQ_HEAD(_hqh, _bkt) hqh[HASHSIZE];
db_pgno_t curcache; /* current number of cached pages */
db_pgno_t maxcache; /* max number of cached pages */
db_pgno_t npages; /* number of pages in the file */
More information about the cvs-krb5
mailing list