krb5 commit: Add LMDB KDB module tests

Greg Hudson ghudson at mit.edu
Tue May 29 15:00:06 EDT 2018


https://github.com/krb5/krb5/commit/dafc1daeedb008faa9dc6246e33f37a5c45462ca
commit dafc1daeedb008faa9dc6246e33f37a5c45462ca
Author: Greg Hudson <ghudson at mit.edu>
Date:   Wed Apr 25 23:50:06 2018 -0400

    Add LMDB KDB module tests
    
    Add a new k5test function multidb_realms() which yields a realm using
    BDB and a realm using LMDB.  Run the tests in t_dump.py, t_iprop.py,
    the lockout tests in t_policy.py, and kdbtest (in t_kdb.py) with both
    databases.
    
    If K5TEST_LMDB is set in the environment, use LMDB for all Python
    tests and all tests in tests/dejagnu, with two exceptions
    (t_unlockiter.py and t_kdb_locking.py) which test BDB-specific
    behavior.  To support those exceptions, add the "bdb_only" realm
    initialization flag.
    
    In t_iprop.py, be sure to properly clean up kpropd daemons, using a
    new k5test realm method stop_kpropd() to stop a kpropd daemon started
    with start_kpropd() and remove it from the realm's list.
    
    ticket: 8674

 src/Makefile.in                            |    6 +-
 src/tests/dejagnu/config/default.exp       |   15 +-
 src/tests/dejagnu/krb-standalone/kprop.exp |    9 +-
 src/tests/t_dump.py                        |  158 ++++----
 src/tests/t_iprop.py                       |  662 ++++++++++++++--------------
 src/tests/t_kdb.py                         |    6 +-
 src/tests/t_kdb_locking.py                 |    2 +-
 src/tests/t_policy.py                      |   48 +-
 src/tests/t_unlockiter.py                  |    4 +-
 src/util/k5test.py                         |   48 ++-
 10 files changed, 518 insertions(+), 440 deletions(-)

diff --git a/src/Makefile.in b/src/Makefile.in
index e2c178f..bd7a2dc 100644
--- a/src/Makefile.in
+++ b/src/Makefile.in
@@ -490,7 +490,7 @@ check-prerecurse: runenv.py
 	$(RM) $(SKIPTESTS)
 	touch $(SKIPTESTS)
 
-check-unix:
+check-unix: check-lmdb-$(HAVE_LMDB)
 	cat $(SKIPTESTS)
 
 check-pytests-no: check-postrecurse
@@ -501,6 +501,10 @@ check-cmocka-no: check-postrecurse
 	@echo 'Skipped cmocka tests: cmocka library or header not found' >> \
 		$(SKIPTESTS)
 
+check-lmdb-yes:
+check-lmdb-no:
+	@echo 'Skipped LMDB tests: LMDB KDB module not built' >> $(SKIPTESTS)
+
 # Create a test realm and spawn a shell in an environment pointing to it.
 # If CROSSNUM is set, create that many fully connected test realms and
 # point the shell at the first one.
diff --git a/src/tests/dejagnu/config/default.exp b/src/tests/dejagnu/config/default.exp
index 2d1686c..18b8c33 100644
--- a/src/tests/dejagnu/config/default.exp
+++ b/src/tests/dejagnu/config/default.exp
@@ -455,7 +455,8 @@ proc delete_db {} {
     global tmppwd
     # Master and slave db files
     file delete $tmppwd/kdc-db $tmppwd/kdc-db.ok $tmppwd/kdc-db.kadm5 \
-	$tmppwd/kdc-db.kadm5.lock \
+	$tmppwd/kdc-db.kadm5.lock $tmppwd/kdc-db.mdb $tmppwd/kdc-db.mdb-lock \
+	$tmppwd/kdc-db.lockout.mdb $tmppwd/kdc-db.lockout.mdb-lock \
 	$tmppwd/kdc-db.ulog \
 	$tmppwd/slave-db $tmppwd/slave-db.ok $tmppwd/slave-db.kadm5 $tmppwd/slave-db.kadm5.lock \
 	$tmppwd/slave-db~ $tmppwd/slave-db~.ok $tmppwd/slave-db~.kadm5 $tmppwd/slave-db~.kadm5.lock
@@ -933,6 +934,7 @@ proc setup_krb5_conf { {type client} } {
     global mode
     global portbase
     global srcdir
+    global env
 
     set pkinit_certs [findfile "[pwd]/$srcdir/pkinit-certs" "[pwd]/$srcdir/pkinit-certs" "$srcdir/pkinit-certs"]
     # Create a krb5.conf file.
@@ -980,7 +982,7 @@ proc setup_krb5_conf { {type client} } {
 	puts $conffile "		kdc = $hostname:[expr 1 + $portbase]"
 	puts $conffile "		admin_server = $hostname:[expr 4 + $portbase]"
 	puts $conffile "		kpasswd_server = $hostname:[expr 5 + $portbase]"
-	puts $conffile "		database_module = foo_db2"
+	puts $conffile "		database_module = db"
 	puts $conffile "	\}"
 	puts $conffile ""
 	puts $conffile "\[domain_realm\]"
@@ -993,8 +995,13 @@ proc setup_krb5_conf { {type client} } {
 	puts $conffile ""
 	puts $conffile "\[dbmodules\]"
 	puts $conffile "	db_module_dir = $tmppwd/../../../plugins/kdb"
-	puts $conffile "	foo_db2 = {"
-	puts $conffile "		db_library = db2"
+	puts $conffile "	db = {"
+	if [info exists env(K5TEST_LMDB)] {
+	    puts $conffile "		db_library = klmdb"
+	    puts $conffile "		nosync = true"
+	} else {
+	    puts $conffile "		db_library = db2"
+	}
 	puts $conffile "		database_name = $tmppwd/$type-db"
 	puts $conffile "	}"
 	close $conffile
diff --git a/src/tests/dejagnu/krb-standalone/kprop.exp b/src/tests/dejagnu/krb-standalone/kprop.exp
index cc1a26a..f23ebe1 100644
--- a/src/tests/dejagnu/krb-standalone/kprop.exp
+++ b/src/tests/dejagnu/krb-standalone/kprop.exp
@@ -6,7 +6,7 @@
 # processes when the procedure ends.
 
 proc setup_slave {} {
-    global tmppwd hostname REALMNAME
+    global tmppwd hostname REALMNAME env
     file delete $tmppwd/slave-stash $tmppwd/slave-acl
     file copy -force $tmppwd/stash:foo $tmppwd/slave-stash
     file copy -force $tmppwd/acl $tmppwd/slave-acl
@@ -16,7 +16,12 @@ proc setup_slave {} {
 	close $aclfile
     }
     file copy -force $tmppwd/adb.lock $tmppwd/slave-adb.lock
-    foreach suffix { {} .kadm5 .kadm5.lock .ok } {
+    if [info exists env(K5TEST_LMDB)] {
+	set suffixes { .mdb .mdb-lock .lockout.mdb .lockout.mdb-lock }
+    } else {
+	set suffixes { {} .kadm5 .kadm5.lock .ok }
+    }
+    foreach suffix $suffixes {
 	file copy -force $tmppwd/kdc-db$suffix $tmppwd/slave-db$suffix
     }
 }
diff --git a/src/tests/t_dump.py b/src/tests/t_dump.py
index 2a424d9..22cff00 100755
--- a/src/tests/t_dump.py
+++ b/src/tests/t_dump.py
@@ -2,82 +2,12 @@
 from k5test import *
 from filecmp import cmp
 
-# Make sure we can dump and load an ordinary database, and that
-# principals and policies survive a dump/load cycle.
-
-realm = K5Realm(start_kdc=False)
-realm.run([kadminl, 'addpol', 'fred'])
-
-# Create a dump file.
-dumpfile = os.path.join(realm.testdir, 'dump')
-realm.run([kdb5_util, 'dump', dumpfile])
-
-# Write additional policy records to the dump.  Use the 1.8 format for
-# one of them, to test retroactive compatibility (for issue #8213).
-f = open('testdir/dump', 'a')
-f.write('policy	compat	0	0	3	4	5	0	'
-        '0	0	0\n')
-f.write('policy	barney	0	0	1	1	1	0	'
-        '0	0	0	0	0	0	-	1	'
-        '2	28	'
-        'fd100f5064625f6372656174696f6e404b5242544553542e434f4d00\n')
-f.close()
-
-# Destroy and load the database; check that the policies exist.
-# Spot-check principal and policy fields.
-mark('reload after dump')
-realm.run([kdb5_util, 'destroy', '-f'])
-realm.run([kdb5_util, 'load', dumpfile])
-out = realm.run([kadminl, 'getprincs'])
-if realm.user_princ not in out or realm.host_princ not in out:
-    fail('Missing principal after load')
-out = realm.run([kadminl, 'getprinc', realm.user_princ])
-if 'Expiration date: [never]' not in out or 'MKey: vno 1' not in out:
-    fail('Principal has wrong value after load')
-out = realm.run([kadminl, 'getpols'])
-if 'fred\n' not in out or 'barney\n' not in out:
-    fail('Missing policy after load')
-realm.run([kadminl, 'getpol', 'compat'],
-          expected_msg='Number of old keys kept: 5')
-realm.run([kadminl, 'getpol', 'barney'],
-          expected_msg='Number of old keys kept: 1')
-
-# Dump/load again, and make sure everything is still there.
-mark('second reload')
-realm.run([kdb5_util, 'dump', dumpfile])
-realm.run([kdb5_util, 'load', dumpfile])
-out = realm.run([kadminl, 'getprincs'])
-if realm.user_princ not in out or realm.host_princ not in out:
-    fail('Missing principal after load')
-out = realm.run([kadminl, 'getpols'])
-if 'compat\n' not in out or 'fred\n' not in out or 'barney\n' not in out:
-    fail('Missing policy after second load')
-
-srcdumpdir = os.path.join(srctop, 'tests', 'dumpfiles')
-srcdump = os.path.join(srcdumpdir, 'dump')
-srcdump_r18 = os.path.join(srcdumpdir, 'dump.r18')
-srcdump_r13 = os.path.join(srcdumpdir, 'dump.r13')
-srcdump_b7 = os.path.join(srcdumpdir, 'dump.b7')
-srcdump_ov = os.path.join(srcdumpdir, 'dump.ov')
-
-# Load a dump file from the source directory.
-realm.run([kdb5_util, 'destroy', '-f'])
-realm.run([kdb5_util, 'load', srcdump])
-realm.run([kdb5_util, 'stash', '-P', 'master'])
-
 def dump_compare(realm, opt, srcfile):
     mark('dump comparison against %s' % os.path.basename(srcfile))
     realm.run([kdb5_util, 'dump'] + opt + [dumpfile])
     if not cmp(srcfile, dumpfile, False):
         fail('Dump output does not match %s' % srcfile)
 
-# Dump the resulting DB in each non-iprop format and compare with
-# expected outputs.
-dump_compare(realm, [], srcdump)
-dump_compare(realm, ['-r18'], srcdump_r18)
-dump_compare(realm, ['-r13'], srcdump_r13)
-dump_compare(realm, ['-b7'], srcdump_b7)
-dump_compare(realm, ['-ov'], srcdump_ov)
 
 def load_dump_check_compare(realm, opt, srcfile):
     mark('load check from %s' % os.path.basename(srcfile))
@@ -89,15 +19,85 @@ def load_dump_check_compare(realm, opt, srcfile):
     realm.run([kadminl, 'getpols'], expected_msg='testpol')
     dump_compare(realm, opt, srcfile)
 
-# Load each format of dump, check it, re-dump it, and compare.
-load_dump_check_compare(realm, ['-r18'], srcdump_r18)
-load_dump_check_compare(realm, ['-r13'], srcdump_r13)
-load_dump_check_compare(realm, ['-b7'], srcdump_b7)
 
-# Loading the last (-b7 format) dump won't have loaded the
-# per-principal kadm data.  Load that incrementally with -ov.
-realm.run([kadminl, 'getprinc', 'user'], expected_msg='Policy: [none]')
-realm.run([kdb5_util, 'load', '-update', '-ov', srcdump_ov])
-realm.run([kadminl, 'getprinc', 'user'], expected_msg='Policy: testpol')
+for realm in multidb_realms(start_kdc=False):
+
+    # Make sure we can dump and load an ordinary database, and that
+    # principals and policies survive a dump/load cycle.
+
+    realm.run([kadminl, 'addpol', 'fred'])
+
+    # Create a dump file.
+    dumpfile = os.path.join(realm.testdir, 'dump')
+    realm.run([kdb5_util, 'dump', dumpfile])
+
+    # Write additional policy records to the dump.  Use the 1.8 format for
+    # one of them, to test retroactive compatibility (for issue #8213).
+    f = open('testdir/dump', 'a')
+    f.write('policy\tcompat\t0\t0\t3\t4\t5\t0\t0\t0\t0\n')
+    f.write('policy\tbarney\t0\t0\t1\t1\t1\t0\t0\t0\t0\t0\t0\t0\t-\t1\t2\t28\t'
+            'fd100f5064625f6372656174696f6e404b5242544553542e434f4d00\n')
+    f.close()
+
+    # Destroy and load the database; check that the policies exist.
+    # Spot-check principal and policy fields.
+    mark('reload after dump')
+    realm.run([kdb5_util, 'destroy', '-f'])
+    realm.run([kdb5_util, 'load', dumpfile])
+    out = realm.run([kadminl, 'getprincs'])
+    if realm.user_princ not in out or realm.host_princ not in out:
+        fail('Missing principal after load')
+    out = realm.run([kadminl, 'getprinc', realm.user_princ])
+    if 'Expiration date: [never]' not in out or 'MKey: vno 1' not in out:
+        fail('Principal has wrong value after load')
+    out = realm.run([kadminl, 'getpols'])
+    if 'fred\n' not in out or 'barney\n' not in out:
+        fail('Missing policy after load')
+    realm.run([kadminl, 'getpol', 'compat'],
+              expected_msg='Number of old keys kept: 5')
+    realm.run([kadminl, 'getpol', 'barney'],
+              expected_msg='Number of old keys kept: 1')
+
+    # Dump/load again, and make sure everything is still there.
+    mark('second reload')
+    realm.run([kdb5_util, 'dump', dumpfile])
+    realm.run([kdb5_util, 'load', dumpfile])
+    out = realm.run([kadminl, 'getprincs'])
+    if realm.user_princ not in out or realm.host_princ not in out:
+        fail('Missing principal after load')
+    out = realm.run([kadminl, 'getpols'])
+    if 'compat\n' not in out or 'fred\n' not in out or 'barney\n' not in out:
+        fail('Missing policy after second load')
+
+    srcdumpdir = os.path.join(srctop, 'tests', 'dumpfiles')
+    srcdump = os.path.join(srcdumpdir, 'dump')
+    srcdump_r18 = os.path.join(srcdumpdir, 'dump.r18')
+    srcdump_r13 = os.path.join(srcdumpdir, 'dump.r13')
+    srcdump_b7 = os.path.join(srcdumpdir, 'dump.b7')
+    srcdump_ov = os.path.join(srcdumpdir, 'dump.ov')
+
+    # Load a dump file from the source directory.
+    realm.run([kdb5_util, 'destroy', '-f'])
+    realm.run([kdb5_util, 'load', srcdump])
+    realm.run([kdb5_util, 'stash', '-P', 'master'])
+
+    # Dump the resulting DB in each non-iprop format and compare with
+    # expected outputs.
+    dump_compare(realm, [], srcdump)
+    dump_compare(realm, ['-r18'], srcdump_r18)
+    dump_compare(realm, ['-r13'], srcdump_r13)
+    dump_compare(realm, ['-b7'], srcdump_b7)
+    dump_compare(realm, ['-ov'], srcdump_ov)
+
+    # Load each format of dump, check it, re-dump it, and compare.
+    load_dump_check_compare(realm, ['-r18'], srcdump_r18)
+    load_dump_check_compare(realm, ['-r13'], srcdump_r13)
+    load_dump_check_compare(realm, ['-b7'], srcdump_b7)
+
+    # Loading the last (-b7 format) dump won't have loaded the
+    # per-principal kadm data.  Load that incrementally with -ov.
+    realm.run([kadminl, 'getprinc', 'user'], expected_msg='Policy: [none]')
+    realm.run([kdb5_util, 'load', '-update', '-ov', srcdump_ov])
+    realm.run([kadminl, 'getprinc', 'user'], expected_msg='Policy: testpol')
 
 success('Dump/load tests')
diff --git a/src/tests/t_iprop.py b/src/tests/t_iprop.py
index 13ef1ca..54b4098 100755
--- a/src/tests/t_iprop.py
+++ b/src/tests/t_iprop.py
@@ -129,340 +129,354 @@ conf_slave2 = {'realms': {'$realm': {'iprop_slave_poll': '600',
 
 conf_foo = {'libdefaults': {'default_realm': 'FOO'},
             'domain_realm': {hostname: 'FOO'}}
-
-realm = K5Realm(kdc_conf=conf, create_user=False, start_kadmind=True)
-slave1 = realm.special_env('slave1', True, kdc_conf=conf_slave1)
-slave1m = realm.special_env('slave1m', True, krb5_conf=conf_foo,
-                            kdc_conf=conf_slave1m)
-slave2 = realm.special_env('slave2', True, kdc_conf=conf_slave2)
-
-# A default_realm and domain_realm that do not match the KDC's realm.
-# The FOO realm iprop_logfile setting is needed to run kproplog during
-# a slave3 test, since kproplog has no realm option.
 conf_slave3 = {'realms': {'$realm': {'iprop_slave_poll': '600',
                                      'iprop_logfile': '$testdir/ulog.slave3',
                                      'iprop_port': '$port8'},
                           'FOO': {'iprop_logfile': '$testdir/ulog.slave3'}},
                'dbmodules': {'db': {'database_name': '$testdir/db.slave3'}}}
-slave3 = realm.special_env('slave3', True, krb5_conf=conf_foo,
-                           kdc_conf=conf_slave3)
 
-# A default realm and a domain realm map that differ.
 krb5_conf_slave4 = {'domain_realm': {hostname: 'FOO'}}
 conf_slave4 = {'realms': {'$realm': {'iprop_slave_poll': '600',
                                      'iprop_logfile': '$testdir/ulog.slave4',
                                      'iprop_port': '$port8'}},
                'dbmodules': {'db': {'database_name': '$testdir/db.slave4'}}}
-slave4 = realm.special_env('slave4', True, krb5_conf=krb5_conf_slave4,
-                            kdc_conf=conf_slave4)
-
-# Define some principal names.  pr3 is long enough to cause internal
-# reallocs, but not long enough to grow the basic ulog entry size.
-pr1 = 'wakawaka@' + realm.realm
-pr2 = 'w@' + realm.realm
-c = 'chocolate-flavored-school-bus'
-cs = c + '/'
-pr3 = (cs + cs + cs + cs + cs + cs + cs + cs + cs + cs + cs + cs + c +
-       '@' + realm.realm)
-
-# Create the kpropd ACL file.
-acl_file = os.path.join(realm.testdir, 'kpropd-acl')
-acl = open(acl_file, 'w')
-acl.write(realm.host_princ + '\n')
-acl.close()
-
-ulog = os.path.join(realm.testdir, 'db.ulog')
-if not os.path.exists(ulog):
-    fail('update log not created: ' + ulog)
-
-# Create the principal used to authenticate kpropd to kadmind.
-kiprop_princ = 'kiprop/' + hostname
-realm.extract_keytab(kiprop_princ, realm.keytab)
-
-# Create the initial slave databases.
-dumpfile = os.path.join(realm.testdir, 'dump')
-realm.run([kdb5_util, 'dump', dumpfile])
-realm.run([kdb5_util, 'load', dumpfile], slave1)
-realm.run([kdb5_util, 'load', dumpfile], slave2)
-realm.run([kdb5_util, '-r', realm.realm, 'load', dumpfile], slave3)
-realm.run([kdb5_util, 'load', dumpfile], slave4)
-
-# Reinitialize the master ulog so we know exactly what to expect in
-# it.
-realm.run([kproplog, '-R'])
-check_ulog(1, 1, 1, [None])
-
-# Make some changes to the master DB.
-realm.addprinc(pr1)
-realm.addprinc(pr3)
-realm.addprinc(pr2)
-realm.run([kadminl, 'modprinc', '-allow_tix', pr2])
-realm.run([kadminl, 'modprinc', '+allow_tix', pr2])
-check_ulog(6, 1, 6, [None, pr1, pr3, pr2, pr2, pr2])
-
-# Start kpropd for slave1 and get a full dump from master.
-mark('propagate M->1 full')
-kpropd1 = realm.start_kpropd(slave1, ['-d'])
-wait_for_prop(kpropd1, True, 1, 6)
-out = realm.run([kadminl, 'listprincs'], env=slave1)
-if pr1 not in out or pr2 not in out or pr3 not in out:
-    fail('slave1 does not have all principals from master')
-check_ulog(1, 6, 6, [None], slave1)
-
-# Make a change and check that it propagates incrementally.
-mark('propagate M->1 incremental')
-realm.run([kadminl, 'modprinc', '-allow_tix', pr2])
-check_ulog(7, 1, 7, [None, pr1, pr3, pr2, pr2, pr2, pr2])
-kpropd1.send_signal(signal.SIGUSR1)
-wait_for_prop(kpropd1, False, 6, 7)
-check_ulog(2, 6, 7, [None, pr2], slave1)
-realm.run([kadminl, 'getprinc', pr2], env=slave1,
-          expected_msg='Attributes: DISALLOW_ALL_TIX')
-
-# Start kadmind -proponly for slave1.  (Use the slave1m environment
-# which defines iprop_port to $port8.)
-slave1_out_dump_path = os.path.join(realm.testdir, 'dump.slave1.out')
-slave2_in_dump_path = os.path.join(realm.testdir, 'dump.slave2.in')
-slave2_kprop_port = str(realm.portbase + 9)
-realm.start_server([kadmind, '-r', realm.realm, '-nofork', '-proponly', '-W',
-                    '-p', kdb5_util, '-K', kprop, '-k', slave2_kprop_port,
-                    '-F', slave1_out_dump_path], 'starting...', slave1m)
-
-# Test similar default_realm and domain_realm map settings with -r realm.
-mark('propagate 1->3 full')
-slave3_in_dump_path = os.path.join(realm.testdir, 'dump.slave3.in')
-kpropd3 = realm.start_server([kpropd, '-d', '-D', '-r', realm.realm, '-P',
-                              slave2_kprop_port, '-f', slave3_in_dump_path,
-                              '-p', kdb5_util, '-a', acl_file, '-A', hostname],
-                             'ready', slave3)
-wait_for_prop(kpropd3, True, 1, 7)
-out = realm.run([kadminl, '-r', realm.realm, 'listprincs'], env=slave3)
-if pr1 not in out or pr2 not in out or pr3 not in out:
-    fail('slave3 does not have all principals from slave1')
-check_ulog(1, 7, 7, [None], env=slave3)
-
-# Test an incremental propagation for the kpropd -r case.
-mark('propagate M->1->3 incremental')
-realm.run([kadminl, 'modprinc', '-maxlife', '20 minutes', pr1])
-check_ulog(8, 1, 8, [None, pr1, pr3, pr2, pr2, pr2, pr2, pr1])
-kpropd1.send_signal(signal.SIGUSR1)
-wait_for_prop(kpropd1, False, 7, 8)
-check_ulog(3, 6, 8, [None, pr2, pr1], slave1)
-realm.run([kadminl, 'getprinc', pr1], env=slave1,
-          expected_msg='Maximum ticket life: 0 days 00:20:00')
-kpropd3.send_signal(signal.SIGUSR1)
-wait_for_prop(kpropd3, False, 7, 8)
-check_ulog(2, 7, 8, [None, pr1], slave3)
-realm.run([kadminl, '-r', realm.realm, 'getprinc', pr1], env=slave3,
-          expected_msg='Maximum ticket life: 0 days 00:20:00')
-stop_daemon(kpropd3)
-
-# Test dissimilar default_realm and domain_realm map settings (no -r realm).
-mark('propagate 1->4 full')
-slave4_in_dump_path = os.path.join(realm.testdir, 'dump.slave4.in')
-kpropd4 = realm.start_server([kpropd, '-d', '-D', '-P', slave2_kprop_port,
-                              '-f', slave4_in_dump_path, '-p', kdb5_util,
-                              '-a', acl_file, '-A', hostname], 'ready', slave4)
-wait_for_prop(kpropd4, True, 1, 8)
-out = realm.run([kadminl, 'listprincs'], env=slave4)
-if pr1 not in out or pr2 not in out or pr3 not in out:
-    fail('slave4 does not have all principals from slave1')
-stop_daemon(kpropd4)
-
-# Start kpropd for slave2.  The -A option isn't needed since we're
-# talking to the same host as master (we specify it anyway to exercise
-# the code), but slave2 defines iprop_port to $port8 so it will talk
-# to slave1.  Get a full dump from slave1.
-mark('propagate 1->2 full')
-kpropd2 = realm.start_server([kpropd, '-d', '-D', '-P', slave2_kprop_port,
-                              '-f', slave2_in_dump_path, '-p', kdb5_util,
-                              '-a', acl_file, '-A', hostname], 'ready', slave2)
-wait_for_prop(kpropd2, True, 1, 8)
-check_ulog(2, 7, 8, [None, pr1], slave2)
-out = realm.run([kadminl, 'listprincs'], env=slave1)
-if pr1 not in out or pr2 not in out or pr3 not in out:
-    fail('slave2 does not have all principals from slave1')
-
-# Make another change and check that it propagates incrementally to
-# both slaves.
-mark('propagate M->1->2 incremental')
-realm.run([kadminl, 'modprinc', '-maxrenewlife', '22 hours', pr1])
-check_ulog(9, 1, 9, [None, pr1, pr3, pr2, pr2, pr2, pr2, pr1, pr1])
-kpropd1.send_signal(signal.SIGUSR1)
-wait_for_prop(kpropd1, False, 8, 9)
-check_ulog(4, 6, 9, [None, pr2, pr1, pr1], slave1)
-realm.run([kadminl, 'getprinc', pr1], env=slave1,
-          expected_msg='Maximum renewable life: 0 days 22:00:00\n')
-kpropd2.send_signal(signal.SIGUSR1)
-wait_for_prop(kpropd2, False, 8, 9)
-check_ulog(3, 7, 9, [None, pr1, pr1], slave2)
-realm.run([kadminl, 'getprinc', pr1], env=slave2,
-          expected_msg='Maximum renewable life: 0 days 22:00:00\n')
-
-# Reset the ulog on slave1 to force a full resync from master.  The
-# resync will use the old dump file and then propagate changes.
-# slave2 should still be in sync with slave1 after the resync, so make
-# sure it doesn't take a full resync.
-mark('propagate M->1->2 full')
-realm.run([kproplog, '-R'], slave1)
-check_ulog(1, 1, 1, [None], slave1)
-kpropd1.send_signal(signal.SIGUSR1)
-wait_for_prop(kpropd1, True, 1, 9)
-check_ulog(4, 6, 9, [None, pr2, pr1, pr1], slave1)
-kpropd2.send_signal(signal.SIGUSR1)
-wait_for_prop(kpropd2, False, 9, 9)
-check_ulog(3, 7, 9, [None, pr1, pr1], slave2)
-
-# Make another change and check that it propagates incrementally to
-# both slaves.
-mark('propagate M->1->2 incremental (after reset)')
-realm.run([kadminl, 'modprinc', '+allow_tix', pr2])
-check_ulog(10, 1, 10, [None, pr1, pr3, pr2, pr2, pr2, pr2, pr1, pr1, pr2])
-kpropd1.send_signal(signal.SIGUSR1)
-wait_for_prop(kpropd1, False, 9, 10)
-check_ulog(5, 6, 10, [None, pr2, pr1, pr1, pr2], slave1)
-realm.run([kadminl, 'getprinc', pr2], env=slave1, expected_msg='Attributes:\n')
-kpropd2.send_signal(signal.SIGUSR1)
-wait_for_prop(kpropd2, False, 9, 10)
-check_ulog(4, 7, 10, [None, pr1, pr1, pr2], slave2)
-realm.run([kadminl, 'getprinc', pr2], env=slave2, expected_msg='Attributes:\n')
-
-# Create a policy and check that it propagates via full resync.
-mark('propagate M->1->2 full (new policy)')
-realm.run([kadminl, 'addpol', '-minclasses', '2', 'testpol'])
-check_ulog(1, 1, 1, [None])
-kpropd1.send_signal(signal.SIGUSR1)
-wait_for_prop(kpropd1, True, 10, 1)
-check_ulog(1, 1, 1, [None], slave1)
-realm.run([kadminl, 'getpol', 'testpol'], env=slave1,
-          expected_msg='Minimum number of password character classes: 2')
-kpropd2.send_signal(signal.SIGUSR1)
-wait_for_prop(kpropd2, True, 10, 1)
-check_ulog(1, 1, 1, [None], slave2)
-realm.run([kadminl, 'getpol', 'testpol'], env=slave2,
-          expected_msg='Minimum number of password character classes: 2')
-
-# Modify the policy and test that it also propagates via full resync.
-mark('propagate M->1->2 full (policy change)')
-realm.run([kadminl, 'modpol', '-minlength', '17', 'testpol'])
-check_ulog(1, 1, 1, [None])
-kpropd1.send_signal(signal.SIGUSR1)
-wait_for_prop(kpropd1, True, 1, 1)
-check_ulog(1, 1, 1, [None], slave1)
-realm.run([kadminl, 'getpol', 'testpol'], env=slave1,
-          expected_msg='Minimum password length: 17')
-kpropd2.send_signal(signal.SIGUSR1)
-wait_for_prop(kpropd2, True, 1, 1)
-check_ulog(1, 1, 1, [None], slave2)
-realm.run([kadminl, 'getpol', 'testpol'], env=slave2,
-          expected_msg='Minimum password length: 17')
-
-# Delete the policy and test that it propagates via full resync.
-mark('propgate M->1->2 full (policy delete)')
-realm.run([kadminl, 'delpol', 'testpol'])
-check_ulog(1, 1, 1, [None])
-kpropd1.send_signal(signal.SIGUSR1)
-wait_for_prop(kpropd1, True, 1, 1)
-check_ulog(1, 1, 1, [None], slave1)
-realm.run([kadminl, 'getpol', 'testpol'], env=slave1, expected_code=1,
-          expected_msg='Policy does not exist')
-kpropd2.send_signal(signal.SIGUSR1)
-wait_for_prop(kpropd2, True, 1, 1)
-check_ulog(1, 1, 1, [None], slave2)
-realm.run([kadminl, 'getpol', 'testpol'], env=slave2, expected_code=1,
-          expected_msg='Policy does not exist')
-
-# Modify a principal on the master and test that it propagates incrementally.
-mark('propagate M->1->2 incremental (after policy changes)')
-realm.run([kadminl, 'modprinc', '-maxlife', '10 minutes', pr1])
-check_ulog(2, 1, 2, [None, pr1])
-kpropd1.send_signal(signal.SIGUSR1)
-wait_for_prop(kpropd1, False, 1, 2)
-check_ulog(2, 1, 2, [None, pr1], slave1)
-realm.run([kadminl, 'getprinc', pr1], env=slave1,
-          expected_msg='Maximum ticket life: 0 days 00:10:00')
-kpropd2.send_signal(signal.SIGUSR1)
-wait_for_prop(kpropd2, False, 1, 2)
-check_ulog(2, 1, 2, [None, pr1], slave2)
-realm.run([kadminl, 'getprinc', pr1], env=slave2,
-          expected_msg='Maximum ticket life: 0 days 00:10:00')
-
-# Delete a principal and test that it propagates incrementally.
-mark('propagate M->1->2 incremental (princ delete)')
-realm.run([kadminl, 'delprinc', pr3])
-check_ulog(3, 1, 3, [None, pr1, pr3])
-kpropd1.send_signal(signal.SIGUSR1)
-wait_for_prop(kpropd1, False, 2, 3)
-check_ulog(3, 1, 3, [None, pr1, pr3], slave1)
-realm.run([kadminl, 'getprinc', pr3], env=slave1, expected_code=1,
-          expected_msg='Principal does not exist')
-kpropd2.send_signal(signal.SIGUSR1)
-wait_for_prop(kpropd2, False, 2, 3)
-check_ulog(3, 1, 3, [None, pr1, pr3], slave2)
-realm.run([kadminl, 'getprinc', pr3], env=slave2, expected_code=1,
-          expected_msg='Principal does not exist')
-
-# Rename a principal and test that it propagates incrementally.
-mark('propagate M->1->2 incremental (princ rename)')
-renpr = "quacked@" + realm.realm
-realm.run([kadminl, 'renprinc', pr1, renpr])
-check_ulog(6, 1, 6, [None, pr1, pr3, renpr, pr1, renpr])
-kpropd1.send_signal(signal.SIGUSR1)
-wait_for_prop(kpropd1, False, 3, 6)
-check_ulog(6, 1, 6, [None, pr1, pr3, renpr, pr1, renpr], slave1)
-realm.run([kadminl, 'getprinc', pr1], env=slave1, expected_code=1,
-          expected_msg='Principal does not exist')
-realm.run([kadminl, 'getprinc', renpr], env=slave1)
-kpropd2.send_signal(signal.SIGUSR1)
-wait_for_prop(kpropd2, False, 3, 6)
-check_ulog(6, 1, 6, [None, pr1, pr3, renpr, pr1, renpr], slave2)
-realm.run([kadminl, 'getprinc', pr1], env=slave2, expected_code=1,
-          expected_msg='Principal does not exist')
-realm.run([kadminl, 'getprinc', renpr], env=slave2)
-
-pr1 = renpr
-
-# Reset the ulog on the master to force a full resync.
-mark('propagate M->1->2 full (ulog reset)')
-realm.run([kproplog, '-R'])
-check_ulog(1, 1, 1, [None])
-kpropd1.send_signal(signal.SIGUSR1)
-wait_for_prop(kpropd1, True, 6, 1)
-check_ulog(1, 1, 1, [None], slave1)
-kpropd2.send_signal(signal.SIGUSR1)
-wait_for_prop(kpropd2, True, 6, 1)
-check_ulog(1, 1, 1, [None], slave2)
-
-# Stop the kprop daemons so we can test kpropd -t.
-stop_daemon(kpropd1)
-stop_daemon(kpropd2)
-mark('kpropd -t')
-
-# Test the case where no updates are needed.
-out = realm.run_kpropd_once(slave1, ['-d'])
-if 'KDC is synchronized' not in out:
-    fail('Expected synchronized from kpropd -t')
-check_ulog(1, 1, 1, [None], slave1)
-
-# Make a change on the master and fetch it incrementally.
-realm.run([kadminl, 'modprinc', '-maxlife', '5 minutes', pr1])
-check_ulog(2, 1, 2, [None, pr1])
-out = realm.run_kpropd_once(slave1, ['-d'])
-if 'Got incremental updates (sno=2 ' not in out:
-    fail('Expected full dump and synchronized from kpropd -t')
-check_ulog(2, 1, 2, [None, pr1], slave1)
-realm.run([kadminl, 'getprinc', pr1], env=slave1,
-          expected_msg='Maximum ticket life: 0 days 00:05:00')
-
-# Propagate a policy change via full resync.
-realm.run([kadminl, 'addpol', '-minclasses', '3', 'testpol'])
-check_ulog(1, 1, 1, [None])
-out = realm.run_kpropd_once(slave1, ['-d'])
-if ('Full propagation transfer finished' not in out or
-    'KDC is synchronized' not in out):
-    fail('Expected full dump and synchronized from kpropd -t')
-check_ulog(1, 1, 1, [None], slave1)
-realm.run([kadminl, 'getpol', 'testpol'], env=slave1,
-          expected_msg='Minimum number of password character classes: 3')
+
+for realm in multidb_realms(kdc_conf=conf, create_user=False,
+                            start_kadmind=True):
+    slave1 = realm.special_env('slave1', True, kdc_conf=conf_slave1)
+    slave1m = realm.special_env('slave1m', True, krb5_conf=conf_foo,
+                                kdc_conf=conf_slave1m)
+    slave2 = realm.special_env('slave2', True, kdc_conf=conf_slave2)
+
+    # A default_realm and domain_realm that do not match the KDC's
+    # realm.  The FOO realm iprop_logfile setting is needed to run
+    # kproplog during a slave3 test, since kproplog has no realm
+    # option.
+    slave3 = realm.special_env('slave3', True, krb5_conf=conf_foo,
+                               kdc_conf=conf_slave3)
+
+    # A default realm and a domain realm map that differ.
+    slave4 = realm.special_env('slave4', True, krb5_conf=krb5_conf_slave4,
+                               kdc_conf=conf_slave4)
+
+    # Define some principal names.  pr3 is long enough to cause internal
+    # reallocs, but not long enough to grow the basic ulog entry size.
+    pr1 = 'wakawaka@' + realm.realm
+    pr2 = 'w@' + realm.realm
+    c = 'chocolate-flavored-school-bus'
+    cs = c + '/'
+    pr3 = (cs + cs + cs + cs + cs + cs + cs + cs + cs + cs + cs + cs + c +
+           '@' + realm.realm)
+
+    # Create the kpropd ACL file.
+    acl_file = os.path.join(realm.testdir, 'kpropd-acl')
+    acl = open(acl_file, 'w')
+    acl.write(realm.host_princ + '\n')
+    acl.close()
+
+    ulog = os.path.join(realm.testdir, 'db.ulog')
+    if not os.path.exists(ulog):
+        fail('update log not created: ' + ulog)
+
+    # Create the principal used to authenticate kpropd to kadmind.
+    kiprop_princ = 'kiprop/' + hostname
+    realm.extract_keytab(kiprop_princ, realm.keytab)
+
+    # Create the initial slave databases.
+    dumpfile = os.path.join(realm.testdir, 'dump')
+    realm.run([kdb5_util, 'dump', dumpfile])
+    realm.run([kdb5_util, 'load', dumpfile], slave1)
+    realm.run([kdb5_util, 'load', dumpfile], slave2)
+    realm.run([kdb5_util, '-r', realm.realm, 'load', dumpfile], slave3)
+    realm.run([kdb5_util, 'load', dumpfile], slave4)
+
+    # Reinitialize the master ulog so we know exactly what to expect in
+    # it.
+    realm.run([kproplog, '-R'])
+    check_ulog(1, 1, 1, [None])
+
+    # Make some changes to the master DB.
+    realm.addprinc(pr1)
+    realm.addprinc(pr3)
+    realm.addprinc(pr2)
+    realm.run([kadminl, 'modprinc', '-allow_tix', pr2])
+    realm.run([kadminl, 'modprinc', '+allow_tix', pr2])
+    check_ulog(6, 1, 6, [None, pr1, pr3, pr2, pr2, pr2])
+
+    # Start kpropd for slave1 and get a full dump from master.
+    mark('propagate M->1 full')
+    kpropd1 = realm.start_kpropd(slave1, ['-d'])
+    wait_for_prop(kpropd1, True, 1, 6)
+    out = realm.run([kadminl, 'listprincs'], env=slave1)
+    if pr1 not in out or pr2 not in out or pr3 not in out:
+        fail('slave1 does not have all principals from master')
+    check_ulog(1, 6, 6, [None], slave1)
+
+    # Make a change and check that it propagates incrementally.
+    mark('propagate M->1 incremental')
+    realm.run([kadminl, 'modprinc', '-allow_tix', pr2])
+    check_ulog(7, 1, 7, [None, pr1, pr3, pr2, pr2, pr2, pr2])
+    kpropd1.send_signal(signal.SIGUSR1)
+    wait_for_prop(kpropd1, False, 6, 7)
+    check_ulog(2, 6, 7, [None, pr2], slave1)
+    realm.run([kadminl, 'getprinc', pr2], env=slave1,
+              expected_msg='Attributes: DISALLOW_ALL_TIX')
+
+    # Start kadmind -proponly for slave1.  (Use the slave1m environment
+    # which defines iprop_port to $port8.)
+    slave1_out_dump_path = os.path.join(realm.testdir, 'dump.slave1.out')
+    slave2_in_dump_path = os.path.join(realm.testdir, 'dump.slave2.in')
+    slave2_kprop_port = str(realm.portbase + 9)
+    kadmind_proponly = realm.start_server([kadmind, '-r', realm.realm,
+                                           '-nofork', '-proponly',
+                                           '-W', '-p', kdb5_util,
+                                           '-K', kprop, '-k',
+                                           slave2_kprop_port,
+                                           '-F', slave1_out_dump_path],
+                                          'starting...', slave1m)
+
+    # Test similar default_realm and domain_realm map settings with -r realm.
+    mark('propagate 1->3 full')
+    slave3_in_dump_path = os.path.join(realm.testdir, 'dump.slave3.in')
+    kpropd3 = realm.start_server([kpropd, '-d', '-D', '-r', realm.realm, '-P',
+                                  slave2_kprop_port, '-f', slave3_in_dump_path,
+                                  '-p', kdb5_util, '-a', acl_file,
+                                  '-A', hostname], 'ready', slave3)
+    wait_for_prop(kpropd3, True, 1, 7)
+    out = realm.run([kadminl, '-r', realm.realm, 'listprincs'], env=slave3)
+    if pr1 not in out or pr2 not in out or pr3 not in out:
+        fail('slave3 does not have all principals from slave1')
+    check_ulog(1, 7, 7, [None], env=slave3)
+
+    # Test an incremental propagation for the kpropd -r case.
+    mark('propagate M->1->3 incremental')
+    realm.run([kadminl, 'modprinc', '-maxlife', '20 minutes', pr1])
+    check_ulog(8, 1, 8, [None, pr1, pr3, pr2, pr2, pr2, pr2, pr1])
+    kpropd1.send_signal(signal.SIGUSR1)
+    wait_for_prop(kpropd1, False, 7, 8)
+    check_ulog(3, 6, 8, [None, pr2, pr1], slave1)
+    realm.run([kadminl, 'getprinc', pr1], env=slave1,
+              expected_msg='Maximum ticket life: 0 days 00:20:00')
+    kpropd3.send_signal(signal.SIGUSR1)
+    wait_for_prop(kpropd3, False, 7, 8)
+    check_ulog(2, 7, 8, [None, pr1], slave3)
+    realm.run([kadminl, '-r', realm.realm, 'getprinc', pr1], env=slave3,
+              expected_msg='Maximum ticket life: 0 days 00:20:00')
+    stop_daemon(kpropd3)
+
+    # Test dissimilar default_realm and domain_realm map settings (no
+    # -r realm).
+    mark('propagate 1->4 full')
+    slave4_in_dump_path = os.path.join(realm.testdir, 'dump.slave4.in')
+    kpropd4 = realm.start_server([kpropd, '-d', '-D', '-P', slave2_kprop_port,
+                                  '-f', slave4_in_dump_path, '-p', kdb5_util,
+                                  '-a', acl_file, '-A', hostname], 'ready',
+                                 slave4)
+    wait_for_prop(kpropd4, True, 1, 8)
+    out = realm.run([kadminl, 'listprincs'], env=slave4)
+    if pr1 not in out or pr2 not in out or pr3 not in out:
+        fail('slave4 does not have all principals from slave1')
+    stop_daemon(kpropd4)
+
+    # Start kpropd for slave2.  The -A option isn't needed since we're
+    # talking to the same host as master (we specify it anyway to
+    # exercise the code), but slave2 defines iprop_port to $port8 so
+    # it will talk to slave1.  Get a full dump from slave1.
+    mark('propagate 1->2 full')
+    kpropd2 = realm.start_server([kpropd, '-d', '-D', '-P', slave2_kprop_port,
+                                  '-f', slave2_in_dump_path, '-p', kdb5_util,
+                                  '-a', acl_file, '-A', hostname], 'ready',
+                                 slave2)
+    wait_for_prop(kpropd2, True, 1, 8)
+    check_ulog(2, 7, 8, [None, pr1], slave2)
+    out = realm.run([kadminl, 'listprincs'], env=slave1)
+    if pr1 not in out or pr2 not in out or pr3 not in out:
+        fail('slave2 does not have all principals from slave1')
+
+    # Make another change and check that it propagates incrementally
+    # to both slaves.
+    mark('propagate M->1->2 incremental')
+    realm.run([kadminl, 'modprinc', '-maxrenewlife', '22 hours', pr1])
+    check_ulog(9, 1, 9, [None, pr1, pr3, pr2, pr2, pr2, pr2, pr1, pr1])
+    kpropd1.send_signal(signal.SIGUSR1)
+    wait_for_prop(kpropd1, False, 8, 9)
+    check_ulog(4, 6, 9, [None, pr2, pr1, pr1], slave1)
+    realm.run([kadminl, 'getprinc', pr1], env=slave1,
+              expected_msg='Maximum renewable life: 0 days 22:00:00\n')
+    kpropd2.send_signal(signal.SIGUSR1)
+    wait_for_prop(kpropd2, False, 8, 9)
+    check_ulog(3, 7, 9, [None, pr1, pr1], slave2)
+    realm.run([kadminl, 'getprinc', pr1], env=slave2,
+              expected_msg='Maximum renewable life: 0 days 22:00:00\n')
+
+    # Reset the ulog on slave1 to force a full resync from master.
+    # The resync will use the old dump file and then propagate
+    # changes.  slave2 should still be in sync with slave1 after the
+    # resync, so make sure it doesn't take a full resync.
+    mark('propagate M->1->2 full')
+    realm.run([kproplog, '-R'], slave1)
+    check_ulog(1, 1, 1, [None], slave1)
+    kpropd1.send_signal(signal.SIGUSR1)
+    wait_for_prop(kpropd1, True, 1, 9)
+    check_ulog(4, 6, 9, [None, pr2, pr1, pr1], slave1)
+    kpropd2.send_signal(signal.SIGUSR1)
+    wait_for_prop(kpropd2, False, 9, 9)
+    check_ulog(3, 7, 9, [None, pr1, pr1], slave2)
+
+    # Make another change and check that it propagates incrementally to
+    # both slaves.
+    mark('propagate M->1->2 incremental (after reset)')
+    realm.run([kadminl, 'modprinc', '+allow_tix', pr2])
+    check_ulog(10, 1, 10, [None, pr1, pr3, pr2, pr2, pr2, pr2, pr1, pr1, pr2])
+    kpropd1.send_signal(signal.SIGUSR1)
+    wait_for_prop(kpropd1, False, 9, 10)
+    check_ulog(5, 6, 10, [None, pr2, pr1, pr1, pr2], slave1)
+    realm.run([kadminl, 'getprinc', pr2], env=slave1,
+              expected_msg='Attributes:\n')
+    kpropd2.send_signal(signal.SIGUSR1)
+    wait_for_prop(kpropd2, False, 9, 10)
+    check_ulog(4, 7, 10, [None, pr1, pr1, pr2], slave2)
+    realm.run([kadminl, 'getprinc', pr2], env=slave2,
+              expected_msg='Attributes:\n')
+
+    # Create a policy and check that it propagates via full resync.
+    mark('propagate M->1->2 full (new policy)')
+    realm.run([kadminl, 'addpol', '-minclasses', '2', 'testpol'])
+    check_ulog(1, 1, 1, [None])
+    kpropd1.send_signal(signal.SIGUSR1)
+    wait_for_prop(kpropd1, True, 10, 1)
+    check_ulog(1, 1, 1, [None], slave1)
+    realm.run([kadminl, 'getpol', 'testpol'], env=slave1,
+              expected_msg='Minimum number of password character classes: 2')
+    kpropd2.send_signal(signal.SIGUSR1)
+    wait_for_prop(kpropd2, True, 10, 1)
+    check_ulog(1, 1, 1, [None], slave2)
+    realm.run([kadminl, 'getpol', 'testpol'], env=slave2,
+              expected_msg='Minimum number of password character classes: 2')
+
+    # Modify the policy and test that it also propagates via full resync.
+    mark('propagate M->1->2 full (policy change)')
+    realm.run([kadminl, 'modpol', '-minlength', '17', 'testpol'])
+    check_ulog(1, 1, 1, [None])
+    kpropd1.send_signal(signal.SIGUSR1)
+    wait_for_prop(kpropd1, True, 1, 1)
+    check_ulog(1, 1, 1, [None], slave1)
+    realm.run([kadminl, 'getpol', 'testpol'], env=slave1,
+              expected_msg='Minimum password length: 17')
+    kpropd2.send_signal(signal.SIGUSR1)
+    wait_for_prop(kpropd2, True, 1, 1)
+    check_ulog(1, 1, 1, [None], slave2)
+    realm.run([kadminl, 'getpol', 'testpol'], env=slave2,
+              expected_msg='Minimum password length: 17')
+
+    # Delete the policy and test that it propagates via full resync.
+    mark('propgate M->1->2 full (policy delete)')
+    realm.run([kadminl, 'delpol', 'testpol'])
+    check_ulog(1, 1, 1, [None])
+    kpropd1.send_signal(signal.SIGUSR1)
+    wait_for_prop(kpropd1, True, 1, 1)
+    check_ulog(1, 1, 1, [None], slave1)
+    realm.run([kadminl, 'getpol', 'testpol'], env=slave1, expected_code=1,
+              expected_msg='Policy does not exist')
+    kpropd2.send_signal(signal.SIGUSR1)
+    wait_for_prop(kpropd2, True, 1, 1)
+    check_ulog(1, 1, 1, [None], slave2)
+    realm.run([kadminl, 'getpol', 'testpol'], env=slave2, expected_code=1,
+              expected_msg='Policy does not exist')
+
+    # Modify a principal on the master and test that it propagates
+    # incrementally.
+    mark('propagate M->1->2 incremental (after policy changes)')
+    realm.run([kadminl, 'modprinc', '-maxlife', '10 minutes', pr1])
+    check_ulog(2, 1, 2, [None, pr1])
+    kpropd1.send_signal(signal.SIGUSR1)
+    wait_for_prop(kpropd1, False, 1, 2)
+    check_ulog(2, 1, 2, [None, pr1], slave1)
+    realm.run([kadminl, 'getprinc', pr1], env=slave1,
+              expected_msg='Maximum ticket life: 0 days 00:10:00')
+    kpropd2.send_signal(signal.SIGUSR1)
+    wait_for_prop(kpropd2, False, 1, 2)
+    check_ulog(2, 1, 2, [None, pr1], slave2)
+    realm.run([kadminl, 'getprinc', pr1], env=slave2,
+              expected_msg='Maximum ticket life: 0 days 00:10:00')
+
+    # Delete a principal and test that it propagates incrementally.
+    mark('propagate M->1->2 incremental (princ delete)')
+    realm.run([kadminl, 'delprinc', pr3])
+    check_ulog(3, 1, 3, [None, pr1, pr3])
+    kpropd1.send_signal(signal.SIGUSR1)
+    wait_for_prop(kpropd1, False, 2, 3)
+    check_ulog(3, 1, 3, [None, pr1, pr3], slave1)
+    realm.run([kadminl, 'getprinc', pr3], env=slave1, expected_code=1,
+              expected_msg='Principal does not exist')
+    kpropd2.send_signal(signal.SIGUSR1)
+    wait_for_prop(kpropd2, False, 2, 3)
+    check_ulog(3, 1, 3, [None, pr1, pr3], slave2)
+    realm.run([kadminl, 'getprinc', pr3], env=slave2, expected_code=1,
+              expected_msg='Principal does not exist')
+
+    # Rename a principal and test that it propagates incrementally.
+    mark('propagate M->1->2 incremental (princ rename)')
+    renpr = "quacked@" + realm.realm
+    realm.run([kadminl, 'renprinc', pr1, renpr])
+    check_ulog(6, 1, 6, [None, pr1, pr3, renpr, pr1, renpr])
+    kpropd1.send_signal(signal.SIGUSR1)
+    wait_for_prop(kpropd1, False, 3, 6)
+    check_ulog(6, 1, 6, [None, pr1, pr3, renpr, pr1, renpr], slave1)
+    realm.run([kadminl, 'getprinc', pr1], env=slave1, expected_code=1,
+              expected_msg='Principal does not exist')
+    realm.run([kadminl, 'getprinc', renpr], env=slave1)
+    kpropd2.send_signal(signal.SIGUSR1)
+    wait_for_prop(kpropd2, False, 3, 6)
+    check_ulog(6, 1, 6, [None, pr1, pr3, renpr, pr1, renpr], slave2)
+    realm.run([kadminl, 'getprinc', pr1], env=slave2, expected_code=1,
+              expected_msg='Principal does not exist')
+    realm.run([kadminl, 'getprinc', renpr], env=slave2)
+
+    pr1 = renpr
+
+    # Reset the ulog on the master to force a full resync.
+    mark('propagate M->1->2 full (ulog reset)')
+    realm.run([kproplog, '-R'])
+    check_ulog(1, 1, 1, [None])
+    kpropd1.send_signal(signal.SIGUSR1)
+    wait_for_prop(kpropd1, True, 6, 1)
+    check_ulog(1, 1, 1, [None], slave1)
+    kpropd2.send_signal(signal.SIGUSR1)
+    wait_for_prop(kpropd2, True, 6, 1)
+    check_ulog(1, 1, 1, [None], slave2)
+
+    # Stop the kprop daemons so we can test kpropd -t.
+    realm.stop_kpropd(kpropd1)
+    stop_daemon(kpropd2)
+    stop_daemon(kadmind_proponly)
+    mark('kpropd -t')
+
+    # Test the case where no updates are needed.
+    out = realm.run_kpropd_once(slave1, ['-d'])
+    if 'KDC is synchronized' not in out:
+        fail('Expected synchronized from kpropd -t')
+    check_ulog(1, 1, 1, [None], slave1)
+
+    # Make a change on the master and fetch it incrementally.
+    realm.run([kadminl, 'modprinc', '-maxlife', '5 minutes', pr1])
+    check_ulog(2, 1, 2, [None, pr1])
+    out = realm.run_kpropd_once(slave1, ['-d'])
+    if 'Got incremental updates (sno=2 ' not in out:
+        fail('Expected full dump and synchronized from kpropd -t')
+    check_ulog(2, 1, 2, [None, pr1], slave1)
+    realm.run([kadminl, 'getprinc', pr1], env=slave1,
+              expected_msg='Maximum ticket life: 0 days 00:05:00')
+
+    # Propagate a policy change via full resync.
+    realm.run([kadminl, 'addpol', '-minclasses', '3', 'testpol'])
+    check_ulog(1, 1, 1, [None])
+    out = realm.run_kpropd_once(slave1, ['-d'])
+    if ('Full propagation transfer finished' not in out or
+        'KDC is synchronized' not in out):
+        fail('Expected full dump and synchronized from kpropd -t')
+    check_ulog(1, 1, 1, [None], slave1)
+    realm.run([kadminl, 'getpol', 'testpol'], env=slave1,
+              expected_msg='Minimum number of password character classes: 3')
 
 success('iprop tests')
diff --git a/src/tests/t_kdb.py b/src/tests/t_kdb.py
index 8438a45..78de223 100755
--- a/src/tests/t_kdb.py
+++ b/src/tests/t_kdb.py
@@ -3,9 +3,9 @@ from k5test import *
 import time
 from itertools import imap
 
-# Run kdbtest against the BDB module.
-realm = K5Realm(create_kdb=False)
-realm.run(['./kdbtest'])
+# Run kdbtest against the non-LDAP KDB modules.
+for realm in multidb_realms(create_kdb=False):
+    realm.run(['./kdbtest'])
 
 # Set up an OpenLDAP test server if we can.
 
diff --git a/src/tests/t_kdb_locking.py b/src/tests/t_kdb_locking.py
index aac0a22..ea32ad7 100755
--- a/src/tests/t_kdb_locking.py
+++ b/src/tests/t_kdb_locking.py
@@ -13,7 +13,7 @@ import os
 from k5test import *
 
 p = 'foo'
-realm = K5Realm(create_user=False)
+realm = K5Realm(create_user=False, bdb_only=True)
 realm.addprinc(p, p)
 
 kadm5_lock = os.path.join(realm.testdir, 'db.kadm5.lock')
diff --git a/src/tests/t_policy.py b/src/tests/t_policy.py
index 9d92ebd..5c9e87c 100755
--- a/src/tests/t_policy.py
+++ b/src/tests/t_policy.py
@@ -60,34 +60,36 @@ realm.run([kadminl, 'cpw', '-pw', 'aa', 'pwuser'])
 
 # Test basic password lockout support.
 mark('password lockout')
-realm.run([kadminl, 'addpol', '-maxfailure', '2', '-failurecountinterval',
-           '5m', 'lockout'])
-realm.run([kadminl, 'modprinc', '+requires_preauth', '-policy', 'lockout',
-           'user'])
-
-# kinit twice with the wrong password.
-realm.run([kinit, realm.user_princ], input='wrong\n', expected_code=1,
-          expected_msg='Password incorrect while getting initial credentials')
-realm.run([kinit, realm.user_princ], input='wrong\n', expected_code=1,
-          expected_msg='Password incorrect while getting initial credentials')
-
-# Now the account should be locked out.
-m = 'Client\'s credentials have been revoked while getting initial credentials'
-realm.run([kinit, realm.user_princ], expected_code=1, expected_msg=m)
-
-# Check that modprinc -unlock allows a further attempt.
-realm.run([kadminl, 'modprinc', '-unlock', 'user'])
-realm.kinit(realm.user_princ, password('user'))
-
-# Make sure a nonexistent policy reference doesn't prevent authentication.
-realm.run([kadminl, 'delpol', 'lockout'])
-realm.kinit(realm.user_princ, password('user'))
+realm.stop()
+for realm in multidb_realms(create_host=False):
+    realm.run([kadminl, 'addpol', '-maxfailure', '2', '-failurecountinterval',
+               '5m', 'lockout'])
+    realm.run([kadminl, 'modprinc', '+requires_preauth', '-policy', 'lockout',
+               'user'])
+
+    # kinit twice with the wrong password.
+    msg = 'Password incorrect while getting initial credentials'
+    realm.run([kinit, realm.user_princ], input='wrong\n', expected_code=1,
+              expected_msg=msg)
+    realm.run([kinit, realm.user_princ], input='wrong\n', expected_code=1,
+              expected_msg=msg)
+
+    # Now the account should be locked out.
+    msg = 'credentials have been revoked while getting initial credentials'
+    realm.run([kinit, realm.user_princ], expected_code=1, expected_msg=msg)
+
+    # Check that modprinc -unlock allows a further attempt.
+    realm.run([kadminl, 'modprinc', '-unlock', 'user'])
+    realm.kinit(realm.user_princ, password('user'))
+
+    # Make sure a nonexistent policy reference doesn't prevent authentication.
+    realm.run([kadminl, 'delpol', 'lockout'])
+    realm.kinit(realm.user_princ, password('user'))
 
 # Regression test for issue #7099: databases created prior to krb5 1.3 have
 # multiple history keys, and kadmin prior to 1.7 didn't necessarily use the
 # first one to create history entries.
 mark('#7099 regression test')
-realm.stop()
 realm = K5Realm(start_kdc=False)
 # Create a history principal with two keys.
 realm.run(['./hist', 'make'])
diff --git a/src/tests/t_unlockiter.py b/src/tests/t_unlockiter.py
index 2a438e9..60ed77c 100755
--- a/src/tests/t_unlockiter.py
+++ b/src/tests/t_unlockiter.py
@@ -3,7 +3,8 @@ from k5test import *
 
 # Default KDB iteration is locked.  Expect write lock failure unless
 # unlocked iteration is explicitly requested.
-realm = K5Realm(create_user=False, create_host=False, start_kdc=False)
+realm = K5Realm(create_user=False, create_host=False, start_kdc=False,
+                bdb_only=True)
 realm.run(['./unlockiter'], expected_code=1)
 realm.run(['./unlockiter', '-u'])
 realm.run(['./unlockiter', '-l'], expected_code=1)
@@ -11,6 +12,7 @@ realm.run(['./unlockiter', '-l'], expected_code=1)
 # Set default to unlocked iteration.  Only explicitly requested locked
 # iteration should block the write lock.
 realm = K5Realm(create_user=False, create_host=False, start_kdc=False,
+                bdb_only=True,
                 krb5_conf={'dbmodules': {'db': {'unlockiter': 'true'}}})
 realm.run(['./unlockiter'])
 realm.run(['./unlockiter', '-u'])
diff --git a/src/util/k5test.py b/src/util/k5test.py
index bc32877..8d635af 100644
--- a/src/util/k5test.py
+++ b/src/util/k5test.py
@@ -118,6 +118,9 @@ keyword arguments:
 
 * get_creds=False: Don't get user credentials.
 
+* bdb_only=True: Use the DB2 KDB module even if K5TEST_LMDB is set in
+  the environment.
+
 Scripts may use the following functions and variables:
 
 * fail(message): Display message (plus leading marker and trailing
@@ -165,6 +168,12 @@ Scripts may use the following functions and variables:
   honored.  If keywords contains krb5_conf and/or kdc_conf fragments,
   they will be merged with the default and per-pass specifications.
 
+* multidb_realms(**keywords): Yields a realm for multiple DB modules.
+  Currently DB2 and LMDB are included.  Ideally LDAP would be
+  included, but setting up a test LDAP server currently requires a
+  one-second delay, so all LDAP tests are currently confined to
+  t_kdb.py.  keywords may contain any K5Realm initializer.
+
 * cross_realms(num, xtgts=None, args=None, **keywords): This function
   returns a list of num realms, where each realm's configuration knows
   how to contact all of the realms.  By default, each realm will
@@ -390,6 +399,8 @@ def fail(msg):
         sys.stdout.write(_last_cmd_output)
     if _current_pass:
         print "*** Failed in test pass:", _current_pass
+    if _current_db:
+        print "*** Failed with db:", _current_db
     sys.exit(1)
 
 
@@ -810,8 +821,9 @@ class K5Realm(object):
                  krb5_conf=None, kdc_conf=None, create_kdb=True,
                  krbtgt_keysalt=None, create_user=True, get_creds=True,
                  create_host=True, start_kdc=True, start_kadmind=False,
-                 start_kpropd=False):
+                 start_kpropd=False, bdb_only=False):
         global hostname, _default_krb5_conf, _default_kdc_conf
+        global _lmdb_kdc_conf, _current_db
 
         self.realm = realm
         self.testdir = os.path.join(os.getcwd(), testdir)
@@ -826,7 +838,11 @@ class K5Realm(object):
         self.ccache = os.path.join(self.testdir, 'ccache')
         self.kadmin_ccache = os.path.join(self.testdir, 'kadmin_ccache')
         self._krb5_conf = _cfg_merge(_default_krb5_conf, krb5_conf)
-        self._kdc_conf = _cfg_merge(_default_kdc_conf, kdc_conf)
+        base_kdc_conf = _default_kdc_conf
+        if (os.getenv('K5TEST_LMDB') is not None and
+            not bdb_only and not _current_db):
+            base_kdc_conf = _cfg_merge(base_kdc_conf, _lmdb_kdc_conf)
+        self._kdc_conf = _cfg_merge(base_kdc_conf, kdc_conf)
         self._kdc_proc = None
         self._kadmind_proc = None
         self._kpropd_procs = []
@@ -1008,6 +1024,10 @@ class K5Realm(object):
         self._kpropd_procs.append(proc)
         return proc
 
+    def stop_kpropd(self, proc):
+        stop_daemon(proc)
+        self._kpropd_procs.remove(proc)
+
     def run_kpropd_once(self, env, args=[]):
         return self.run(self._kpropd_args() + ['-t'] + args, env=env)
 
@@ -1102,6 +1122,20 @@ def multipass_realms(**keywords):
         _current_pass = None
 
 
+def multidb_realms(**keywords):
+    global _current_db, _dbpasses
+    caller_kdc_conf = keywords.get('kdc_conf')
+    for p in _dbpasses:
+        (name, kdc_conf) = p
+        output('*** Using DB type %s\n' % name)
+        keywords['kdc_conf'] = _cfg_merge(kdc_conf, caller_kdc_conf)
+        _current_db = name
+        realm = K5Realm(**keywords)
+        yield realm
+        realm.stop()
+        _current_db = None
+
+
 def cross_realms(num, xtgts=None, args=None, **keywords):
     # Build keyword args for each realm.
     realm_args = []
@@ -1198,6 +1232,10 @@ _default_kdc_conf = {
         'default': 'FILE:$testdir/others.log'}}
 
 
+_lmdb_kdc_conf = {'dbmodules': {'db': {'db_library': 'klmdb',
+                                       'nosync': 'true'}}}
+
+
 # A pass is a tuple of: name, krbtgt_keysalt, krb5_conf, kdc_conf.
 _passes = [
     # No special settings; exercises AES256.
@@ -1282,6 +1320,7 @@ _passes = [
 
 _success = False
 _current_pass = None
+_current_db = None
 _daemons = []
 _parse_args()
 atexit.register(_onexit)
@@ -1298,6 +1337,11 @@ runenv = _import_runenv()
 hostname = _get_hostname()
 null_input = open(os.devnull, 'r')
 
+# A DB pass is a tuple of: name, kdc_conf.
+_dbpasses = [('db2', None)]
+if runenv.have_lmdb == 'yes':
+    _dbpasses.append(('lmdb', _lmdb_kdc_conf))
+
 krb5kdc = os.path.join(buildtop, 'kdc', 'krb5kdc')
 kadmind = os.path.join(buildtop, 'kadmin', 'server', 'kadmind')
 kadmin = os.path.join(buildtop, 'kadmin', 'cli', 'kadmin')


More information about the cvs-krb5 mailing list